id
int64 0
755k
| file_name
stringlengths 3
109
| file_path
stringlengths 13
185
| content
stringlengths 31
9.38M
| size
int64 31
9.38M
| language
stringclasses 1
value | extension
stringclasses 11
values | total_lines
int64 1
340k
| avg_line_length
float64 2.18
149k
| max_line_length
int64 7
2.22M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 6
65
| repo_stars
int64 100
47.3k
| repo_forks
int64 0
12k
| repo_open_issues
int64 0
3.4k
| repo_license
stringclasses 9
values | repo_extraction_date
stringclasses 92
values | exact_duplicates_redpajama
bool 2
classes | near_duplicates_redpajama
bool 2
classes | exact_duplicates_githubcode
bool 2
classes | exact_duplicates_stackv2
bool 1
class | exact_duplicates_stackv1
bool 2
classes | near_duplicates_githubcode
bool 2
classes | near_duplicates_stackv1
bool 2
classes | near_duplicates_stackv2
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6,019
|
profiling_timer.hpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Common/profiling_timer.hpp
|
#pragma once
#include <util/types.hpp>
#include "time.hpp"
namespace rsx
{
struct profiling_timer
{
bool enabled = false;
u64 last;
profiling_timer() = default;
void start()
{
if (enabled) [[unlikely]]
{
last = get_system_time();
}
}
s64 duration()
{
if (!enabled) [[likely]]
{
return 0ll;
}
auto old = last;
last = get_system_time();
return static_cast<s64>(last - old);
}
};
}
| 441
|
C++
|
.h
| 29
| 12.068966
| 39
| 0.618227
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
6,020
|
texture_cache_predictor.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Common/texture_cache_predictor.h
|
#pragma once
#include "../rsx_cache.h"
#include "../rsx_utils.h"
#include "TextureUtils.h"
#include <unordered_map>
namespace rsx
{
/**
* Predictor Entry History Queue
*/
template <u32 buffer_size>
class texture_cache_predictor_entry_history_queue
{
std::array<u32, buffer_size> buffer;
u32 m_front;
u32 m_size;
public:
texture_cache_predictor_entry_history_queue()
{
clear();
}
void clear()
{
m_front = buffer_size;
m_size = 0;
}
usz size() const
{
return m_size;
}
bool empty() const
{
return m_size == 0;
}
void push(u32 val)
{
if (m_size < buffer_size)
{
m_size++;
}
if (m_front == 0)
{
m_front = buffer_size - 1;
}
else
{
m_front--;
}
AUDIT(m_front < buffer_size);
buffer[m_front] = val;
}
u32 operator[](u32 pos) const
{
AUDIT(pos < m_size);
AUDIT(m_front < buffer_size);
return buffer[(m_front + pos) % buffer_size];
}
};
/**
* Predictor key
*/
template <typename traits>
struct texture_cache_predictor_key
{
using texture_format = typename traits::texture_format;
using section_storage_type = typename traits::section_storage_type;
address_range cpu_range;
texture_format format;
texture_upload_context context;
// Constructors
texture_cache_predictor_key() = default;
texture_cache_predictor_key(const address_range& _cpu_range, texture_format _format, texture_upload_context _context)
: cpu_range(_cpu_range)
, format(_format)
, context(_context)
{}
texture_cache_predictor_key(const section_storage_type& section)
: cpu_range(section.get_section_range())
, format(section.get_format())
, context(section.get_context())
{}
// Methods
bool operator==(const texture_cache_predictor_key& other) const
{
return cpu_range == other.cpu_range && format == other.format && context == other.context;
}
};
/**
* Predictor entry
*/
template<typename traits>
class texture_cache_predictor_entry
{
public:
using key_type = texture_cache_predictor_key<traits>;
using section_storage_type = typename traits::section_storage_type;
const key_type key;
private:
u32 m_guessed_writes;
u32 m_writes_since_last_flush;
static const u32 max_write_history_size = 16;
texture_cache_predictor_entry_history_queue<max_write_history_size> write_history;
static const u32 max_confidence = 8; // Cannot be more "confident" than this value
static const u32 confident_threshold = 6; // We are confident if confidence >= confidence_threshold
static const u32 starting_confidence = 3;
static const u32 confidence_guessed_flush = 2; // Confidence granted when we correctly guess there will be a flush
static const u32 confidence_guessed_no_flush = 1; // Confidence granted when we correctly guess there won't be a flush
static const u32 confidence_incorrect_guess = -2; // Confidence granted when our guess is incorrect
static const u32 confidence_mispredict = -4; // Confidence granted when a speculative flush is incorrect
u32 confidence;
public:
texture_cache_predictor_entry(key_type _key)
: key(std::move(_key))
{
reset();
}
~texture_cache_predictor_entry() = default;
u32 get_confidence() const
{
return confidence;
}
bool is_confident() const
{
return confidence >= confident_threshold;
}
bool key_matches(const key_type& other_key) const
{
return key == other_key;
}
bool key_matches(const section_storage_type& section) const
{
return key_matches(key_type(section));
}
void update_confidence(s32 delta)
{
if (delta > 0)
{
confidence += delta;
if (confidence > max_confidence)
{
confidence = max_confidence;
}
}
else if (delta < 0)
{
u32 neg_delta = static_cast<u32>(-delta);
if (confidence > neg_delta)
{
confidence -= neg_delta;
}
else
{
confidence = 0;
}
}
}
private:
// Returns how many writes we think there will be this time (i.e. between the last flush and the next flush)
// Returning UINT32_MAX means no guess is possible
u32 guess_number_of_writes() const
{
const auto history_size = write_history.size();
if (history_size == 0)
{
// We need some history to be able to take a guess
return -1;
}
else if (history_size == 1)
{
// If we have one history entry, we assume it will repeat
return write_history[0];
}
else
{
// For more than one entry, we try and find a pattern, and assume it holds
const u32 stop_when_found_matches = 4;
u32 matches_found = 0;
u32 guess = -1;
for (u32 i = 0; i < history_size; i++)
{
// If we are past the number of writes, it's not the same as this time
if (write_history[i] < m_writes_since_last_flush)
continue;
u32 cur_matches_found = 1;
// Try to find more matches
for (u32 j = 0; i + j + 1 < history_size; j++)
{
if (write_history[i + j + 1] != write_history[j])
break;
// We found another matching value
cur_matches_found++;
if (cur_matches_found >= stop_when_found_matches)
break;
}
// If we found more matches than all other comparisons, we take a guess
if (cur_matches_found > matches_found)
{
guess = write_history[i];
matches_found = cur_matches_found;
}
if (matches_found >= stop_when_found_matches)
break;
}
return guess;
}
}
void calculate_next_guess(bool reset)
{
if (reset || m_guessed_writes == umax || m_writes_since_last_flush > m_guessed_writes)
{
m_guessed_writes = guess_number_of_writes();
}
}
public:
void reset()
{
confidence = starting_confidence;
m_writes_since_last_flush = 0;
m_guessed_writes = -1;
write_history.clear();
}
void on_flush()
{
update_confidence(is_flush_likely() ? confidence_guessed_flush : confidence_incorrect_guess);
// Update history
write_history.push(m_writes_since_last_flush);
m_writes_since_last_flush = 0;
calculate_next_guess(true);
}
void on_write(bool mispredict)
{
if (mispredict || is_flush_likely())
{
update_confidence(mispredict ? confidence_mispredict : confidence_incorrect_guess);
}
else
{
update_confidence(confidence_guessed_no_flush);
}
m_writes_since_last_flush++;
calculate_next_guess(false);
}
bool is_flush_likely() const
{
return m_writes_since_last_flush >= m_guessed_writes;
}
// Returns true if we believe that the next operation on this memory range will be a flush
bool predict() const
{
// Disable prediction if we have a low confidence in our predictions
if (!is_confident())
return false;
return is_flush_likely();
}
};
/**
* Predictor
*/
template <typename traits>
class texture_cache_predictor
{
public:
// Traits
using section_storage_type = typename traits::section_storage_type;
using texture_cache_type = typename traits::texture_cache_base_type;
using key_type = texture_cache_predictor_key<traits>;
using mapped_type = texture_cache_predictor_entry<traits>;
using map_type = std::unordered_map<key_type, mapped_type>;
using value_type = typename map_type::value_type;
using size_type = typename map_type::size_type;
using iterator = typename map_type::iterator;
using const_iterator = typename map_type::const_iterator;
private:
// Member variables
map_type m_entries;
texture_cache_type* m_tex_cache;
public:
// Per-frame statistics
atomic_t<u32> m_mispredictions_this_frame = {0};
// Constructors
texture_cache_predictor(texture_cache_type* tex_cache)
: m_tex_cache(tex_cache) {}
~texture_cache_predictor() = default;
// Trait wrappers
constexpr iterator begin() noexcept { return m_entries.begin(); }
constexpr const_iterator begin() const noexcept { return m_entries.begin(); }
inline iterator end() noexcept { return m_entries.end(); }
inline const_iterator end() const noexcept { return m_entries.end(); }
bool empty() const noexcept { return m_entries.empty(); }
size_type size() const noexcept { return m_entries.size(); }
void clear() { m_entries.clear(); }
mapped_type& operator[](const key_type& key)
{
auto ret = m_entries.try_emplace(key, key);
AUDIT(ret.first != m_entries.end());
return ret.first->second;
}
mapped_type& operator[](const section_storage_type& section)
{
return (*this)[key_type(section)];
}
// Callbacks
void on_frame_end()
{
m_mispredictions_this_frame = 0;
}
void on_misprediction()
{
m_mispredictions_this_frame++;
}
// Returns true if the next operation is likely to be a read
bool predict(const key_type& key) const
{
// Use "find" to avoid allocating entries if they do not exist
const_iterator entry_it = m_entries.find(key);
if (entry_it == m_entries.end())
{
return false;
}
else
{
return entry_it->second.predict();
}
}
bool predict(const section_storage_type& section) const
{
return predict(key_type(section));
}
};
} // namespace rsx
template <typename Traits>
struct std::hash<rsx::texture_cache_predictor_key<Traits>>
{
usz operator()(const rsx::texture_cache_predictor_key<Traits>& k) const
{
usz result = std::hash<utils::address_range>{}(k.cpu_range);
result ^= static_cast<usz>(k.format);
result ^= (static_cast<usz>(k.context) << 16);
return result;
}
};
| 9,600
|
C++
|
.h
| 341
| 24.378299
| 121
| 0.673338
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,021
|
ring_buffer_helper.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Common/ring_buffer_helper.h
|
#pragma once
#include "Utilities/StrFmt.h"
#include "util/asm.hpp"
/**
* Ring buffer memory helper :
* There are 2 "pointers" (offset inside a memory buffer to be provided by class derivative)
* PUT pointer "points" to the start of allocatable space.
* GET pointer "points" to the start of memory in use by the GPU.
* Space between GET and PUT is used by the GPU ; this structure check that this memory is not overwritten.
* User has to update the GET pointer when synchronisation happens.
*/
class data_heap
{
protected:
/**
* Does alloc cross get position ?
*/
template<int Alignment>
bool can_alloc(usz size) const
{
usz alloc_size = utils::align(size, Alignment);
usz aligned_put_pos = utils::align(m_put_pos, Alignment);
if (aligned_put_pos + alloc_size < m_size)
{
// range before get
if (aligned_put_pos + alloc_size < m_get_pos)
return true;
// range after get
if (aligned_put_pos > m_get_pos)
return true;
return false;
}
else
{
// ..]....[..get..
if (aligned_put_pos < m_get_pos)
return false;
// ..get..]...[...
// Actually all resources extending beyond heap space starts at 0
if (alloc_size > m_get_pos)
return false;
return true;
}
}
// Grow the buffer to hold at least size bytes
virtual bool grow(usz /*size*/)
{
// Stub
return false;
}
usz m_size;
usz m_put_pos; // Start of free space
usz m_min_guard_size; //If an allocation touches the guard region, reset the heap to avoid going over budget
usz m_current_allocated_size;
usz m_largest_allocated_pool;
char* m_name;
public:
data_heap() = default;
~data_heap() = default;
data_heap(const data_heap&) = delete;
data_heap(data_heap&&) = delete;
usz m_get_pos; // End of free space
void init(usz heap_size, const char* buffer_name = "unnamed", usz min_guard_size=0x10000)
{
m_name = const_cast<char*>(buffer_name);
m_size = heap_size;
m_put_pos = 0;
m_get_pos = heap_size - 1;
//allocation stats
m_min_guard_size = min_guard_size;
m_current_allocated_size = 0;
m_largest_allocated_pool = 0;
}
template<int Alignment>
usz alloc(usz size)
{
const usz alloc_size = utils::align(size, Alignment);
const usz aligned_put_pos = utils::align(m_put_pos, Alignment);
if (!can_alloc<Alignment>(size) && !grow(alloc_size))
{
fmt::throw_exception("[%s] Working buffer not big enough, buffer_length=%d allocated=%d requested=%d guard=%d largest_pool=%d",
m_name, m_size, m_current_allocated_size, size, m_min_guard_size, m_largest_allocated_pool);
}
const usz block_length = (aligned_put_pos - m_put_pos) + alloc_size;
m_current_allocated_size += block_length;
m_largest_allocated_pool = std::max(m_largest_allocated_pool, block_length);
if (aligned_put_pos + alloc_size < m_size)
{
m_put_pos = aligned_put_pos + alloc_size;
return aligned_put_pos;
}
else
{
m_put_pos = alloc_size;
return 0;
}
}
/**
* return current putpos - 1
*/
usz get_current_put_pos_minus_one() const
{
return (m_put_pos > 0) ? m_put_pos - 1 : m_size - 1;
}
virtual bool is_critical() const
{
const usz guard_length = std::max(m_min_guard_size, m_largest_allocated_pool);
return (m_current_allocated_size + guard_length) >= m_size;
}
void reset_allocation_stats()
{
m_current_allocated_size = 0;
m_largest_allocated_pool = 0;
m_get_pos = get_current_put_pos_minus_one();
}
// Updates the current_allocated_size metrics
void notify()
{
if (m_get_pos == umax)
m_current_allocated_size = 0;
else if (m_get_pos < m_put_pos)
m_current_allocated_size = (m_put_pos - m_get_pos - 1);
else if (m_get_pos > m_put_pos)
m_current_allocated_size = (m_put_pos + (m_size - m_get_pos - 1));
else
fmt::throw_exception("m_put_pos == m_get_pos!");
}
usz size() const
{
return m_size;
}
};
| 3,834
|
C++
|
.h
| 132
| 26.227273
| 130
| 0.682682
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,022
|
surface_store.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Common/surface_store.h
|
#pragma once
#include "surface_utils.h"
#include "simple_array.hpp"
#include "ranged_map.hpp"
#include "surface_cache_dma.hpp"
#include "../gcm_enums.h"
#include "../rsx_utils.h"
#include <list>
#include "util/asm.hpp"
#include <unordered_map>
namespace rsx
{
namespace utility
{
std::vector<u8> get_rtt_indexes(surface_target color_target);
usz get_aligned_pitch(surface_color_format format, u32 width);
usz get_packed_pitch(surface_color_format format, u32 width);
}
template <typename Traits>
struct surface_store
{
static constexpr u32 get_aa_factor_u(surface_antialiasing aa_mode)
{
return (aa_mode == surface_antialiasing::center_1_sample)? 1 : 2;
}
static constexpr u32 get_aa_factor_v(surface_antialiasing aa_mode)
{
switch (aa_mode)
{
case surface_antialiasing::center_1_sample:
case surface_antialiasing::diagonal_centered_2_samples:
return 1;
default:
return 2;
}
}
public:
using surface_storage_type = typename Traits::surface_storage_type;
using surface_type = typename Traits::surface_type;
using command_list_type = typename Traits::command_list_type;
using surface_overlap_info = surface_overlap_info_t<surface_type>;
using surface_ranged_map = ranged_map<surface_storage_type, 0x400000>;
using surface_cache_dma_map = surface_cache_dma<Traits, 0x400000>;
protected:
surface_ranged_map m_render_targets_storage = {};
surface_ranged_map m_depth_stencil_storage = {};
rsx::address_range m_render_targets_memory_range;
rsx::address_range m_depth_stencil_memory_range;
surface_cache_dma_map m_dma_block;
bool m_invalidate_on_write = false;
rsx::surface_raster_type m_active_raster_type = rsx::surface_raster_type::linear;
public:
rsx::simple_array<u8> m_bound_render_target_ids = {};
std::array<std::pair<u32, surface_type>, 4> m_bound_render_targets = {};
std::pair<u32, surface_type> m_bound_depth_stencil = {};
// List of sections derived from a section that has been split and invalidated
std::vector<std::pair<u32, surface_type>> orphaned_surfaces;
// List of sections that have been wholly inherited and invalidated
std::vector<surface_type> superseded_surfaces;
std::list<surface_storage_type> invalidated_resources;
const u64 max_invalidated_resources_count = 256ull;
u64 cache_tag = 1ull; // Use 1 as the start since 0 is default tag on new surfaces
u64 write_tag = 1ull;
// Amount of virtual PS3 memory tied to allocated textures
u64 m_active_memory_used = 0;
surface_store() = default;
~surface_store() = default;
surface_store(const surface_store&) = delete;
private:
template <bool is_depth_surface>
void split_surface_region(command_list_type cmd, u32 address, surface_type prev_surface, u16 width, u16 height, u8 bpp, rsx::surface_antialiasing aa)
{
auto insert_new_surface = [&](
u32 new_address,
deferred_clipped_region<surface_type>& region,
surface_ranged_map& data)
{
surface_storage_type sink;
surface_type invalidated = 0;
if (const auto found = data.find(new_address);
found != data.end())
{
if (Traits::is_compatible_surface(Traits::get(found->second), region.source, region.width, region.height, 1))
{
if (found->second->last_use_tag >= prev_surface->last_use_tag)
{
// If memory in this block is newer, do not overwrite with stale data
return;
}
// There is no need to erase due to the reinsertion below
sink = std::move(found->second);
}
else
{
invalidate(found->second);
data.erase(new_address);
auto &old = invalidated_resources.back();
if (Traits::surface_is_pitch_compatible(old, prev_surface->get_rsx_pitch()))
{
if (old->last_use_tag >= prev_surface->last_use_tag) [[unlikely]]
{
invalidated = Traits::get(old);
}
}
}
}
if (sink)
{
// Memory requirements can be altered when cloning
free_rsx_memory(Traits::get(sink));
}
Traits::clone_surface(cmd, sink, region.source, new_address, region);
allocate_rsx_memory(Traits::get(sink));
if (invalidated) [[unlikely]]
{
// Halfplement the merge by crude inheritance. Should recursively split the memory blocks instead.
if (sink->old_contents.empty()) [[likely]]
{
sink->set_old_contents(invalidated);
}
else
{
const auto existing = sink->get_normalized_memory_area();
const auto incoming = invalidated->get_normalized_memory_area();
deferred_clipped_region<surface_type> region{};
region.source = invalidated;
region.target = Traits::get(sink);
region.width = std::min(existing.x2, incoming.x2);
region.height = std::min(existing.y2, incoming.y2);
sink->set_old_contents_region(region, true);
}
}
ensure(region.target == Traits::get(sink));
orphaned_surfaces.push_back({ address, region.target });
data.emplace(region.target->get_memory_range(), std::move(sink));
};
// Define incoming region
size2u old, _new;
const auto prev_area = prev_surface->get_normalized_memory_area();
const auto prev_bpp = prev_surface->get_bpp();
old.width = prev_area.x2;
old.height = prev_area.y2;
_new.width = width * bpp * get_aa_factor_u(aa);
_new.height = height * get_aa_factor_v(aa);
if (old.width <= _new.width && old.height <= _new.height)
{
// No extra memory to be preserved
return;
}
// One-time data validity test
ensure(prev_surface);
if (prev_surface->read_barrier(cmd); !prev_surface->test())
{
return;
}
if (old.width > _new.width)
{
// Split in X
const u32 baseaddr = address + _new.width;
const u32 bytes_to_texels_x = (prev_bpp * prev_surface->samples_x);
deferred_clipped_region<surface_type> copy;
copy.src_x = _new.width / bytes_to_texels_x;
copy.src_y = 0;
copy.dst_x = 0;
copy.dst_y = 0;
copy.width = std::max<u16>((old.width - _new.width) / bytes_to_texels_x, 1);
copy.height = prev_surface->template get_surface_height<>();
copy.transfer_scale_x = 1.f;
copy.transfer_scale_y = 1.f;
copy.target = nullptr;
copy.source = prev_surface;
if constexpr (is_depth_surface)
{
insert_new_surface(baseaddr, copy, m_depth_stencil_storage);
}
else
{
insert_new_surface(baseaddr, copy, m_render_targets_storage);
}
}
if (old.height > _new.height)
{
// Split in Y
const u32 baseaddr = address + (_new.height * prev_surface->get_rsx_pitch());
const u32 bytes_to_texels_x = (prev_bpp * prev_surface->samples_x);
deferred_clipped_region<surface_type> copy;
copy.src_x = 0;
copy.src_y = _new.height / prev_surface->samples_y;
copy.dst_x = 0;
copy.dst_y = 0;
copy.width = std::max<u16>(std::min(_new.width, old.width) / bytes_to_texels_x, 1);
copy.height = std::max<u16>((old.height - _new.height) / prev_surface->samples_y, 1);
copy.transfer_scale_x = 1.f;
copy.transfer_scale_y = 1.f;
copy.target = nullptr;
copy.source = prev_surface;
if constexpr (is_depth_surface)
{
insert_new_surface(baseaddr, copy, m_depth_stencil_storage);
}
else
{
insert_new_surface(baseaddr, copy, m_render_targets_storage);
}
}
}
template <bool is_depth_surface>
void intersect_surface_region(command_list_type cmd, u32 address, surface_type new_surface, surface_type prev_surface)
{
auto scan_list = [&new_surface, address](const rsx::address_range& mem_range,
surface_ranged_map& data) -> std::vector<std::pair<u32, surface_type>>
{
std::vector<std::pair<u32, surface_type>> result;
for (auto it = data.begin_range(mem_range); it != data.end(); ++it)
{
auto surface = Traits::get(it->second);
if (new_surface->last_use_tag >= surface->last_use_tag ||
new_surface == surface ||
address == it->first)
{
// Do not bother synchronizing with uninitialized data
continue;
}
// Memory partition check
if (mem_range.start >= constants::local_mem_base)
{
if (it->first < constants::local_mem_base) continue;
}
else
{
if (it->first >= constants::local_mem_base) continue;
}
// Pitch check
if (!rsx::pitch_compatible(surface, new_surface))
{
continue;
}
// Range check
const rsx::address_range this_range = surface->get_memory_range();
if (!this_range.overlaps(mem_range))
{
continue;
}
result.push_back({ it->first, surface });
ensure(it->first == surface->base_addr);
}
return result;
};
const rsx::address_range mem_range = new_surface->get_memory_range();
auto list1 = scan_list(mem_range, m_render_targets_storage);
auto list2 = scan_list(mem_range, m_depth_stencil_storage);
if (prev_surface)
{
// Append the previous removed surface to the intersection list
if constexpr (is_depth_surface)
{
list2.push_back({ address, prev_surface });
}
else
{
list1.push_back({ address, prev_surface });
}
}
else
{
if (list1.empty() && list2.empty())
{
return;
}
}
std::vector<std::pair<u32, surface_type>> surface_info;
if (list1.empty())
{
surface_info = std::move(list2);
}
else if (list2.empty())
{
surface_info = std::move(list1);
}
else
{
const auto reserve = list1.size() + list2.size();
surface_info = std::move(list1);
surface_info.reserve(reserve);
for (const auto& e : list2) surface_info.push_back(e);
}
for (const auto &e: surface_info)
{
auto this_address = e.first;
auto surface = e.second;
if (surface->old_contents.size() == 1) [[unlikely]]
{
// Dirty zombies are possible with unused pixel storage subslices and are valid
// Avoid double transfer if possible
// This is an optional optimization that can be safely disabled
surface = static_cast<decltype(surface)>(surface->old_contents[0].source);
// Ignore self-reference
if (new_surface == surface)
{
continue;
}
// If this surface has already been added via another descendant, just ignore it
bool ignore = false;
for (const auto& slice : new_surface->old_contents)
{
if (slice.source == surface)
{
ignore = true;
break;
}
}
if (ignore) continue;
this_address = surface->base_addr;
ensure(this_address);
}
if (new_surface->inherit_surface_contents(surface) == surface_inheritance_result::full &&
surface->memory_usage_flags == surface_usage_flags::storage &&
surface != prev_surface &&
surface == e.second)
{
// This has been 'swallowed' by the new surface and can be safely freed
auto& storage = surface->is_depth_surface() ? m_depth_stencil_storage : m_render_targets_storage;
auto& object = ::at32(storage, e.first);
ensure(object);
if (!surface->old_contents.empty()) [[unlikely]]
{
surface->read_barrier(cmd);
}
invalidate(object);
storage.erase(e.first);
superseded_surfaces.push_back(surface);
}
}
}
template <bool depth, typename format_type, typename ...Args>
surface_type bind_surface_address(
command_list_type command_list,
u32 address,
format_type format,
surface_antialiasing antialias,
usz width, usz height, usz pitch,
u8 bpp,
Args&&... extra_params)
{
surface_storage_type old_surface_storage;
surface_storage_type new_surface_storage;
surface_type old_surface = nullptr;
surface_type new_surface = nullptr;
bool do_intersection_test = true;
bool store = true;
// Workaround. Preserve new surface tag value because pitch convert is unimplemented
u64 new_content_tag = 0;
address_range* storage_bounds;
surface_ranged_map* primary_storage;
surface_ranged_map* secondary_storage;
if constexpr (depth)
{
primary_storage = &m_depth_stencil_storage;
secondary_storage = &m_render_targets_storage;
storage_bounds = &m_depth_stencil_memory_range;
}
else
{
primary_storage = &m_render_targets_storage;
secondary_storage = &m_depth_stencil_storage;
storage_bounds = &m_render_targets_memory_range;
}
// Check if render target already exists
auto It = primary_storage->find(address);
if (It != primary_storage->end())
{
surface_storage_type& surface = It->second;
const bool pitch_compatible = Traits::surface_is_pitch_compatible(surface, pitch);
if (!pitch_compatible)
{
// This object should be pitch-converted and re-intersected with
if (old_surface_storage = Traits::convert_pitch(command_list, surface, pitch); old_surface_storage)
{
old_surface = Traits::get(old_surface_storage);
}
else
{
// Preserve content age. This is hacky, but matches previous behavior
// TODO: Remove when pitch convert is implemented
new_content_tag = Traits::get(surface)->last_use_tag;
}
}
if (Traits::surface_matches_properties(surface, format, width, height, antialias))
{
if (!pitch_compatible)
{
Traits::invalidate_surface_contents(command_list, Traits::get(surface), format, address, pitch);
}
Traits::notify_surface_persist(surface);
Traits::prepare_surface_for_drawing(command_list, Traits::get(surface));
new_surface = Traits::get(surface);
store = false;
}
else
{
if (pitch_compatible)
{
// Preserve memory outside the area to be inherited if needed
split_surface_region<depth>(command_list, address, Traits::get(surface), static_cast<u16>(width), static_cast<u16>(height), bpp, antialias);
old_surface = Traits::get(surface);
}
// This will be unconditionally moved to invalidated list shortly
free_rsx_memory(Traits::get(surface));
Traits::notify_surface_invalidated(surface);
if (old_surface_storage)
{
// Pitch-converted data. Send to invalidated pool immediately.
invalidated_resources.push_back(std::move(old_surface_storage));
}
old_surface_storage = std::move(surface);
primary_storage->erase(It);
}
}
if (!new_surface)
{
// Range test
const auto aa_factor_v = get_aa_factor_v(antialias);
rsx::address_range range = rsx::address_range::start_length(address, static_cast<u32>(pitch * height * aa_factor_v));
*storage_bounds = range.get_min_max(*storage_bounds);
// Search invalidated resources for a suitable surface
for (auto It = invalidated_resources.begin(); It != invalidated_resources.end(); It++)
{
auto &surface = *It;
if (Traits::surface_matches_properties(surface, format, width, height, antialias, true))
{
new_surface_storage = std::move(surface);
Traits::notify_surface_reused(new_surface_storage);
if (old_surface_storage)
{
// Exchange this surface with the invalidated one
surface = std::move(old_surface_storage);
}
else
{
// Iterator is now empty - erase it
invalidated_resources.erase(It);
}
new_surface = Traits::get(new_surface_storage);
Traits::invalidate_surface_contents(command_list, new_surface, format, address, pitch);
Traits::prepare_surface_for_drawing(command_list, new_surface);
allocate_rsx_memory(new_surface);
break;
}
}
}
// Check for stale storage
if (old_surface_storage)
{
// This was already determined to be invalid and is excluded from testing above
invalidated_resources.push_back(std::move(old_surface_storage));
}
if (!new_surface)
{
ensure(store);
new_surface_storage = Traits::create_new_surface(address, format, width, height, pitch, antialias, std::forward<Args>(extra_params)...);
new_surface = Traits::get(new_surface_storage);
Traits::prepare_surface_for_drawing(command_list, new_surface);
allocate_rsx_memory(new_surface);
}
// Remove and preserve if possible any overlapping/replaced surface from the other pool
auto aliased_surface = secondary_storage->find(address);
if (aliased_surface != secondary_storage->end())
{
if (Traits::surface_is_pitch_compatible(aliased_surface->second, pitch))
{
auto surface = Traits::get(aliased_surface->second);
split_surface_region<!depth>(command_list, address, surface, static_cast<u16>(width), static_cast<u16>(height), bpp, antialias);
if (!old_surface || old_surface->last_use_tag < surface->last_use_tag)
{
// TODO: This can leak data outside inherited bounds
old_surface = surface;
}
}
invalidate(aliased_surface->second);
secondary_storage->erase(aliased_surface);
}
// Check if old_surface is 'new' and hopefully avoid intersection
if (old_surface)
{
if (old_surface->last_use_tag < new_surface->last_use_tag)
{
// Can happen if aliasing occurs; a probable condition due to memory splitting
// This is highly unlikely but is possible in theory
old_surface = nullptr;
}
else if (old_surface->last_use_tag >= write_tag)
{
const auto new_area = new_surface->get_normalized_memory_area();
const auto old_area = old_surface->get_normalized_memory_area();
if (new_area.x2 <= old_area.x2 && new_area.y2 <= old_area.y2)
{
do_intersection_test = false;
new_surface->set_old_contents(old_surface);
}
}
}
if (do_intersection_test)
{
if (new_content_tag)
{
new_surface->last_use_tag = new_content_tag;
}
intersect_surface_region<depth>(command_list, address, new_surface, old_surface);
}
if (store)
{
// New surface was found among invalidated surfaces or created from scratch
primary_storage->emplace(new_surface->get_memory_range(), std::move(new_surface_storage));
}
ensure(!old_surface_storage);
ensure(new_surface->get_spp() == get_format_sample_count(antialias));
return new_surface;
}
void allocate_rsx_memory(surface_type surface)
{
const auto memory_size = surface->get_memory_range().length();
m_active_memory_used += memory_size;
}
void free_rsx_memory(surface_type surface)
{
ensure(surface->has_refs()); // "Surface memory double free"
if (const auto memory_size = surface->get_memory_range().length();
m_active_memory_used >= memory_size) [[likely]]
{
m_active_memory_used -= memory_size;
}
else
{
rsx_log.error("Memory allocation underflow!");
m_active_memory_used = 0;
}
}
inline void invalidate(surface_storage_type& storage)
{
free_rsx_memory(Traits::get(storage));
Traits::notify_surface_invalidated(storage);
invalidated_resources.push_back(std::move(storage));
}
int remove_duplicates_fast_impl(std::vector<surface_overlap_info>& sections, const rsx::address_range& range)
{
// Range tests to check for gaps
std::list<utils::address_range> m_ranges;
bool invalidate_sections = false;
int removed_count = 0;
for (auto it = sections.crbegin(); it != sections.crend(); ++it)
{
auto this_range = it->surface->get_memory_range();
if (invalidate_sections)
{
if (this_range.inside(range))
{
invalidate_surface_address(it->base_address, it->is_depth);
removed_count++;
}
continue;
}
if (it->surface->get_rsx_pitch() != it->surface->get_native_pitch() &&
it->surface->template get_surface_height<>() != 1)
{
// Memory gap in descriptor
continue;
}
// Insert the range, respecting sort order
bool inserted = false;
for (auto iter = m_ranges.begin(); iter != m_ranges.end(); ++iter)
{
if (this_range.start < iter->start)
{
// This range slots in here. Test ranges after this one to find the end position
auto pos = iter;
for (auto _p = ++iter; _p != m_ranges.end();)
{
if (_p->start > (this_range.end + 1))
{
// Gap
break;
}
// Consume
this_range.end = std::max(this_range.end, _p->end);
_p = m_ranges.erase(_p);
}
m_ranges.insert(pos, this_range);
break;
}
}
if (!inserted)
{
m_ranges.push_back(this_range);
}
else if (m_ranges.size() == 1 && range.inside(m_ranges.front()))
{
invalidate_sections = true;
}
}
rsx_log.notice("rsx::surface_cache::check_for_duplicates_fast analysed %u overlapping sections and removed %u", ::size32(sections), removed_count);
return removed_count;
}
void remove_duplicates_fallback_impl(std::vector<surface_overlap_info>& sections, const rsx::address_range& range)
{
// Originally used to debug crashes but this function breaks often enough that I'll leave the checks in for now.
// Safe to remove after some time if no asserts are reported.
constexpr u32 overrun_cookie_value = 0xCAFEBABEu;
// Generic painter's algorithm to detect obsolete sections
ensure(range.length() < 64 * 0x100000);
std::vector<u8> marker(range.length() + sizeof(overrun_cookie_value), 0);
// Tag end
write_to_ptr(marker, range.length(), overrun_cookie_value);
u32 removed_count = 0;
auto compare_and_tag_row = [&](const u32 offset, u32 length) -> bool
{
u64 mask = 0;
u8* dst_ptr = marker.data() + offset;
while (length >= 8)
{
const u64 value = read_from_ptr<u64>(dst_ptr);
const u64 block_mask = ~value; // If the value is not all 1s, set valid to true
mask |= block_mask;
write_to_ptr<u64>(dst_ptr, umax);
dst_ptr += 8;
length -= 8;
}
if (length >= 4)
{
const u32 value = read_from_ptr<u32>(dst_ptr);
const u32 block_mask = ~value;
mask |= block_mask;
write_to_ptr<u32>(dst_ptr, umax);
dst_ptr += 4;
length -= 4;
}
if (length >= 2)
{
const u16 value = read_from_ptr<u16>(dst_ptr);
const u16 block_mask = ~value;
mask |= block_mask;
write_to_ptr<u16>(dst_ptr, umax);
dst_ptr += 2;
length -= 2;
}
if (length)
{
const u8 value = *dst_ptr;
const u8 block_mask = ~value;
mask |= block_mask;
*dst_ptr = umax;
}
return !!mask;
};
for (auto it = sections.crbegin(); it != sections.crend(); ++it)
{
auto this_range = it->surface->get_memory_range();
ensure(this_range.overlaps(range));
const auto native_pitch = it->surface->template get_surface_width<rsx::surface_metrics::bytes>();
const auto rsx_pitch = it->surface->get_rsx_pitch();
auto num_rows = it->surface->template get_surface_height<rsx::surface_metrics::samples>();
bool valid = false;
if (this_range.start < range.start)
{
// Starts outside bounds
const auto internal_offset = (range.start - this_range.start);
const auto row_num = internal_offset / rsx_pitch;
const auto row_offset = internal_offset % rsx_pitch;
// This section is unconditionally valid
valid = true;
if (row_offset < native_pitch)
{
compare_and_tag_row(0, std::min(native_pitch - row_offset, range.length()));
}
// Jump to next row...
this_range.start = this_range.start + (row_num + 1) * rsx_pitch;
}
if (this_range.end > range.end)
{
// Unconditionally valid
valid = true;
this_range.end = range.end;
}
if (valid)
{
if (this_range.start >= this_range.end)
{
continue;
}
num_rows = utils::aligned_div(this_range.length(), rsx_pitch);
}
for (u32 row = 0, offset = (this_range.start - range.start), section_len = (this_range.end - range.start + 1);
row < num_rows;
++row, offset += rsx_pitch)
{
valid |= compare_and_tag_row(offset, std::min<u32>(native_pitch, (section_len - offset)));
}
if (!valid)
{
removed_count++;
rsx_log.warning("Stale surface at address 0x%x will be deleted", it->base_address);
invalidate_surface_address(it->base_address, it->is_depth);
}
}
// Notify
rsx_log.notice("rsx::surface_cache::check_for_duplicates_fallback analysed %u overlapping sections and removed %u", ::size32(sections), removed_count);
// Verify no OOB
ensure(read_from_ptr<u32>(marker, range.length()) == overrun_cookie_value);
}
protected:
/**
* If render target already exists at address, issue state change operation on cmdList.
* Otherwise create one with width, height, clearColor info.
* returns the corresponding render target resource.
*/
template <typename ...Args>
surface_type bind_address_as_render_targets(
command_list_type command_list,
u32 address,
surface_color_format color_format,
surface_antialiasing antialias,
usz width, usz height, usz pitch,
Args&&... extra_params)
{
return bind_surface_address<false>(
command_list, address, color_format, antialias,
width, height, pitch, get_format_block_size_in_bytes(color_format),
std::forward<Args>(extra_params)...);
}
template <typename ...Args>
surface_type bind_address_as_depth_stencil(
command_list_type command_list,
u32 address,
surface_depth_format2 depth_format,
surface_antialiasing antialias,
usz width, usz height, usz pitch,
Args&&... extra_params)
{
return bind_surface_address<true>(
command_list, address, depth_format, antialias,
width, height, pitch,
get_format_block_size_in_bytes(depth_format),
std::forward<Args>(extra_params)...);
}
std::tuple<std::vector<surface_type>, std::vector<surface_type>>
find_overlapping_set(const utils::address_range& range) const
{
std::vector<surface_type> color_result, depth_result;
utils::address_range result_range;
if (m_render_targets_memory_range.valid() &&
range.overlaps(m_render_targets_memory_range))
{
for (auto it = m_render_targets_storage.begin_range(range); it != m_render_targets_storage.end(); ++it)
{
auto surface = Traits::get(it->second);
const auto surface_range = surface->get_memory_range();
if (!range.overlaps(surface_range))
continue;
color_result.push_back(surface);
}
}
if (m_depth_stencil_memory_range.valid() &&
range.overlaps(m_depth_stencil_memory_range))
{
for (auto it = m_depth_stencil_storage.begin_range(range); it != m_depth_stencil_storage.end(); ++it)
{
auto surface = Traits::get(it->second);
const auto surface_range = surface->get_memory_range();
if (!range.overlaps(surface_range))
continue;
depth_result.push_back(surface);
}
}
return { color_result, depth_result, result_range };
}
void write_to_dma_buffers(
command_list_type command_list,
const utils::address_range& range)
{
auto block_range = m_dma_block.to_block_range(range);
auto [color_data, depth_stencil_data] = find_overlapping_set(block_range);
auto [bo, offset, bo_timestamp] = m_dma_block
.with_range(command_list, block_range)
.get(block_range.start);
u64 src_offset, dst_offset, write_length;
auto block_length = block_range.length();
auto all_data = std::move(color_data);
all_data.insert(all_data.end(), depth_stencil_data.begin(), depth_stencil_data.end());
if (all_data.size() > 1)
{
std::sort(all_data.begin(), all_data.end(), [](const auto& a, const auto& b)
{
return a->last_use_tag < b->last_use_tag;
});
}
for (const auto& surface : all_data)
{
if (surface->last_use_tag <= bo_timestamp)
{
continue;
}
const auto this_range = surface->get_memory_range();
const auto max_length = this_range.length();
if (this_range.start < block_range.start)
{
src_offset = block_range.start - this_range.start;
dst_offset = 0;
}
else
{
src_offset = 0;
dst_offset = this_range.start - block_range.start;
}
write_length = std::min(max_length, block_length - dst_offset);
Traits::write_render_target_to_memory(command_list, bo, surface, dst_offset, src_offset, write_length);
}
m_dma_block.touch(block_range);
}
public:
/**
* Update bound color and depth surface.
* Must be called everytime surface format, clip, or addresses changes.
*/
template <typename ...Args>
void prepare_render_target(
command_list_type command_list,
surface_color_format color_format, surface_depth_format2 depth_format,
u32 clip_horizontal_reg, u32 clip_vertical_reg,
surface_target set_surface_target,
surface_antialiasing antialias,
surface_raster_type raster_type,
const std::array<u32, 4> &surface_addresses, u32 address_z,
const std::array<u32, 4> &surface_pitch, u32 zeta_pitch,
Args&&... extra_params)
{
u32 clip_width = clip_horizontal_reg;
u32 clip_height = clip_vertical_reg;
cache_tag = rsx::get_shared_tag();
m_invalidate_on_write = (antialias != rsx::surface_antialiasing::center_1_sample);
m_active_raster_type = raster_type;
// Make previous RTTs sampleable
for (const auto& i : m_bound_render_target_ids)
{
auto &rtt = m_bound_render_targets[i];
Traits::prepare_surface_for_sampling(command_list, std::get<1>(rtt));
rtt = std::make_pair(0, nullptr);
}
m_bound_render_target_ids.clear();
if (const auto rtt_indices = utility::get_rtt_indexes(set_surface_target);
!rtt_indices.empty()) [[likely]]
{
// Create/Reuse requested rtts
for (u8 surface_index : rtt_indices)
{
if (surface_addresses[surface_index] == 0)
continue;
m_bound_render_targets[surface_index] = std::make_pair(surface_addresses[surface_index],
bind_address_as_render_targets(command_list, surface_addresses[surface_index], color_format, antialias,
clip_width, clip_height, surface_pitch[surface_index], std::forward<Args>(extra_params)...));
m_bound_render_target_ids.push_back(surface_index);
}
}
// Same for depth buffer
if (std::get<1>(m_bound_depth_stencil) != nullptr)
{
Traits::prepare_surface_for_sampling(command_list, std::get<1>(m_bound_depth_stencil));
}
if (address_z) [[likely]]
{
m_bound_depth_stencil = std::make_pair(address_z,
bind_address_as_depth_stencil(command_list, address_z, depth_format, antialias,
clip_width, clip_height, zeta_pitch, std::forward<Args>(extra_params)...));
}
else
{
m_bound_depth_stencil = std::make_pair(0, nullptr);
}
}
u8 get_color_surface_count() const
{
return static_cast<u8>(m_bound_render_target_ids.size());
}
surface_type get_surface_at(u32 address)
{
auto It = m_render_targets_storage.find(address);
if (It != m_render_targets_storage.end())
return Traits::get(It->second);
auto _It = m_depth_stencil_storage.find(address);
if (_It != m_depth_stencil_storage.end())
return Traits::get(_It->second);
return nullptr;
}
/**
* Invalidates surface that exists at an address
*/
void invalidate_surface_address(u32 addr, bool depth)
{
if (address_is_bound(addr))
{
rsx_log.error("Cannot invalidate a currently bound render target!");
return;
}
if (!depth)
{
auto It = m_render_targets_storage.find(addr);
if (It != m_render_targets_storage.end())
{
invalidate(It->second);
m_render_targets_storage.erase(It);
return;
}
}
else
{
auto It = m_depth_stencil_storage.find(addr);
if (It != m_depth_stencil_storage.end())
{
invalidate(It->second);
m_depth_stencil_storage.erase(It);
return;
}
}
}
inline bool address_is_bound(u32 address) const
{
ensure(address);
for (int i = 0; i < 4; ++i)
{
if (m_bound_render_targets[i].first == address)
{
return true;
}
}
return (m_bound_depth_stencil.first == address);
}
template <typename commandbuffer_type>
std::vector<surface_overlap_info> get_merged_texture_memory_region(commandbuffer_type& cmd, u32 texaddr, u32 required_width, u32 required_height, u32 required_pitch, u8 required_bpp, rsx::surface_access access)
{
std::vector<surface_overlap_info> result;
std::vector<std::pair<u32, bool>> dirty;
const auto surface_internal_pitch = (required_width * required_bpp);
// Sanity check
if (surface_internal_pitch > required_pitch) [[unlikely]]
{
rsx_log.warning("Invalid 2D region descriptor. w=%d, h=%d, bpp=%d, pitch=%d",
required_width, required_height, required_bpp, required_pitch);
return {};
}
const auto test_range = utils::address_range::start_length(texaddr, (required_pitch * required_height) - (required_pitch - surface_internal_pitch));
auto process_list_function = [&](surface_ranged_map& data, bool is_depth)
{
for (auto it = data.begin_range(test_range); it != data.end(); ++it)
{
const auto range = it->second->get_memory_range();
if (!range.overlaps(test_range))
continue;
auto surface = it->second.get();
if (access.is_transfer() && access.is_read() && surface->write_through())
{
// The surface has no data other than what can be loaded from CPU
continue;
}
if (!rsx::pitch_compatible(surface, required_pitch, required_height))
continue;
surface_overlap_info info;
u32 width, height;
info.surface = surface;
info.base_address = range.start;
info.is_depth = is_depth;
const u32 normalized_surface_width = surface->template get_surface_width<rsx::surface_metrics::bytes>() / required_bpp;
const u32 normalized_surface_height = surface->template get_surface_height<rsx::surface_metrics::samples>();
if (range.start >= texaddr) [[likely]]
{
const auto offset = range.start - texaddr;
info.dst_area.y = (offset / required_pitch);
info.dst_area.x = (offset % required_pitch) / required_bpp;
if (info.dst_area.x >= required_width || info.dst_area.y >= required_height) [[unlikely]]
{
// Out of bounds
continue;
}
info.src_area.x = 0;
info.src_area.y = 0;
width = std::min<u32>(normalized_surface_width, required_width - info.dst_area.x);
height = std::min<u32>(normalized_surface_height, required_height - info.dst_area.y);
}
else
{
const auto pitch = surface->get_rsx_pitch();
const auto offset = texaddr - range.start;
info.src_area.y = (offset / pitch);
info.src_area.x = (offset % pitch) / required_bpp;
if (info.src_area.x >= normalized_surface_width || info.src_area.y >= normalized_surface_height) [[unlikely]]
{
// Region lies outside the actual texture area, but inside the 'tile'
// In this case, a small region lies to the top-left corner, partially occupying the target
continue;
}
info.dst_area.x = 0;
info.dst_area.y = 0;
width = std::min<u32>(required_width, normalized_surface_width - info.src_area.x);
height = std::min<u32>(required_height, normalized_surface_height - info.src_area.y);
}
// Delay this as much as possible to avoid side-effects of spamming barrier
if (surface->memory_barrier(cmd, access); !surface->test())
{
dirty.emplace_back(range.start, is_depth);
continue;
}
info.is_clipped = (width < required_width || height < required_height);
info.src_area.height = info.dst_area.height = height;
info.dst_area.width = width;
if (auto surface_bpp = surface->get_bpp(); surface_bpp != required_bpp) [[unlikely]]
{
// Width is calculated in the coordinate-space of the requester; normalize
info.src_area.x = (info.src_area.x * required_bpp) / surface_bpp;
info.src_area.width = utils::align(width * required_bpp, surface_bpp) / surface_bpp;
}
else
{
info.src_area.width = width;
}
result.push_back(info);
}
};
// Range test helper to quickly discard blocks
// Fortunately, render targets tend to be clustered anyway
if (m_render_targets_memory_range.valid() &&
test_range.overlaps(m_render_targets_memory_range))
{
process_list_function(m_render_targets_storage, false);
}
if (m_depth_stencil_memory_range.valid() &&
test_range.overlaps(m_depth_stencil_memory_range))
{
process_list_function(m_depth_stencil_storage, true);
}
if (!dirty.empty())
{
for (const auto& p : dirty)
{
invalidate_surface_address(p.first, p.second);
}
}
if (result.size() > 1)
{
std::sort(result.begin(), result.end(), [](const auto &a, const auto &b)
{
if (a.surface->last_use_tag == b.surface->last_use_tag)
{
const auto area_a = a.dst_area.width * a.dst_area.height;
const auto area_b = b.dst_area.width * b.dst_area.height;
return area_a < area_b;
}
return a.surface->last_use_tag < b.surface->last_use_tag;
});
}
return result;
}
void check_for_duplicates(std::vector<surface_overlap_info>& sections)
{
utils::address_range test_range;
for (const auto& section : sections)
{
const auto range = section.surface->get_memory_range();
test_range.start = std::min(test_range.start, range.start);
test_range.end = std::max(test_range.end, range.end);
}
if (!remove_duplicates_fast_impl(sections, test_range))
{
remove_duplicates_fallback_impl(sections, test_range);
}
}
void on_write(const std::array<bool, 4>& color_mrt_writes_enabled, const bool depth_stencil_writes_enabled)
{
if (write_tag >= cache_tag && !m_invalidate_on_write)
{
return;
}
// TODO: Take WCB/WDB into account. Should speed this up a bit by skipping sync_tag calls
write_tag = rsx::get_shared_tag();
for (const auto& i : m_bound_render_target_ids)
{
if (color_mrt_writes_enabled[i])
{
auto surface = m_bound_render_targets[i].second;
if (surface->last_use_tag > cache_tag) [[ likely ]]
{
surface->on_write_fast(write_tag);
}
else
{
surface->on_write(write_tag, rsx::surface_state_flags::require_resolve, m_active_raster_type);
}
}
}
if (auto zsurface = m_bound_depth_stencil.second;
zsurface && depth_stencil_writes_enabled)
{
if (zsurface->last_use_tag > cache_tag) [[ likely ]]
{
zsurface->on_write_fast(write_tag);
}
else
{
zsurface->on_write(write_tag, rsx::surface_state_flags::require_resolve, m_active_raster_type);
}
}
}
void invalidate_all()
{
// Unbind and invalidate all resources
auto free_resource_list = [&](auto &data, const utils::address_range& range)
{
for (auto it = data.begin_range(range); it != data.end(); ++it)
{
invalidate(it->second);
}
data.clear();
};
free_resource_list(m_render_targets_storage, m_render_targets_memory_range);
free_resource_list(m_depth_stencil_storage, m_depth_stencil_memory_range);
ensure(m_active_memory_used == 0);
m_bound_depth_stencil = std::make_pair(0, nullptr);
m_bound_render_target_ids.clear();
for (auto &rtt : m_bound_render_targets)
{
rtt = std::make_pair(0, nullptr);
}
}
void invalidate_range(const rsx::address_range& range)
{
for (auto it = m_render_targets_storage.begin_range(range); it != m_render_targets_storage.end(); ++it)
{
auto& rtt = it->second;
if (range.overlaps(rtt->get_memory_range()))
{
rtt->clear_rw_barrier();
rtt->state_flags |= rsx::surface_state_flags::erase_bkgnd;
}
}
for (auto it = m_depth_stencil_storage.begin_range(range); it != m_depth_stencil_storage.end(); ++it)
{
auto& ds = it->second;
if (range.overlaps(ds->get_memory_range()))
{
ds->clear_rw_barrier();
ds->state_flags |= rsx::surface_state_flags::erase_bkgnd;
}
}
}
bool check_memory_usage(u64 max_safe_memory) const
{
if (m_active_memory_used <= max_safe_memory) [[likely]]
{
return false;
}
else if (m_active_memory_used > (max_safe_memory * 3) / 2)
{
rsx_log.warning("Surface cache is using too much memory! (%dM)", m_active_memory_used / 0x100000);
}
else
{
rsx_log.trace("Surface cache is using too much memory! (%dM)", m_active_memory_used / 0x100000);
}
return true;
}
virtual bool can_collapse_surface(const surface_storage_type&, problem_severity)
{
return true;
}
void trim_invalidated_resources(command_list_type cmd, problem_severity severity)
{
// It is possible to have stale invalidated resources holding references to other invalidated resources.
// This can bloat the VRAM usage significantly especially if the references are never collapsed.
for (auto& surface : invalidated_resources)
{
if (!surface->has_refs() || surface->old_contents.empty())
{
continue;
}
if (can_collapse_surface(surface, severity))
{
surface->memory_barrier(cmd, rsx::surface_access::transfer_read);
}
}
}
void collapse_dirty_surfaces(command_list_type cmd, problem_severity severity)
{
auto process_list_function = [&](surface_ranged_map& data, const utils::address_range& range)
{
for (auto It = data.begin_range(range); It != data.end();)
{
auto surface = Traits::get(It->second);
if (surface->dirty())
{
// Force memory barrier to release some resources
if (can_collapse_surface(It->second, severity))
{
// NOTE: Do not call memory_barrier under fatal conditions as it can create allocations!
// It would be safer to leave the resources hanging around and spill them instead
surface->memory_barrier(cmd, rsx::surface_access::memory_read);
}
}
else if (!surface->test())
{
// Remove this
invalidate(It->second);
It = data.erase(It);
continue;
}
++It;
}
};
process_list_function(m_render_targets_storage, m_render_targets_memory_range);
process_list_function(m_depth_stencil_storage, m_depth_stencil_memory_range);
}
virtual bool handle_memory_pressure(command_list_type cmd, problem_severity severity)
{
ensure(severity >= rsx::problem_severity::moderate);
const auto old_usage = m_active_memory_used;
// Try and find old surfaces to remove
collapse_dirty_surfaces(cmd, severity);
// Check invalidated resources as they can have long dependency chains
if (invalidated_resources.size() > max_invalidated_resources_count ||
severity >= rsx::problem_severity::severe)
{
trim_invalidated_resources(cmd, severity);
}
return (m_active_memory_used < old_usage);
}
void run_cleanup_internal(
command_list_type cmd,
rsx::problem_severity memory_pressure,
u32 max_surface_store_memory_mb,
std::function<void(command_list_type)> pre_task_callback)
{
if (check_memory_usage(max_surface_store_memory_mb * 0x100000))
{
pre_task_callback(cmd);
const auto severity = std::max(memory_pressure, rsx::problem_severity::moderate);
handle_memory_pressure(cmd, severity);
}
else if (invalidated_resources.size() > max_invalidated_resources_count)
{
pre_task_callback(cmd);
rsx_log.warning("[PERFORMANCE WARNING] Invalidated resource pool has exceeded the desired limit. A trim will now be attempted. Current=%u, Limit=%u",
invalidated_resources.size(), max_invalidated_resources_count);
// Check invalidated resources as they can have long dependency chains
trim_invalidated_resources(cmd, memory_pressure);
if ((invalidated_resources.size() + 16u) > max_invalidated_resources_count)
{
// We didn't release enough resources, scan the active RTTs as well
collapse_dirty_surfaces(cmd, memory_pressure);
}
}
}
};
}
| 43,865
|
C++
|
.h
| 1,258
| 29.797297
| 212
| 0.664025
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,023
|
surface_utils.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Common/surface_utils.h
|
#pragma once
#include "util/types.hpp"
#include "Utilities/geometry.h"
#include "Utilities/address_range.h"
#include "TextureUtils.h"
#include "../rsx_utils.h"
#include "Emu/Memory/vm.h"
#define ENABLE_SURFACE_CACHE_DEBUG 0
namespace rsx
{
enum surface_state_flags : u32
{
ready = 0,
erase_bkgnd = 1,
require_resolve = 2,
require_unresolve = 4
};
enum class surface_sample_layout : u32
{
null = 0,
ps3 = 1
};
enum class surface_inheritance_result : u32
{
none = 0,
partial,
full
};
template <typename surface_type>
struct surface_overlap_info_t
{
surface_type surface = nullptr;
u32 base_address = 0;
bool is_depth = false;
bool is_clipped = false;
coordu src_area;
coordu dst_area;
};
template <typename surface_type>
struct deferred_clipped_region
{
u16 src_x, src_y, dst_x, dst_y, width, height;
f32 transfer_scale_x, transfer_scale_y;
surface_type target;
surface_type source;
template <typename T>
deferred_clipped_region<T> cast() const
{
deferred_clipped_region<T> ret;
ret.src_x = src_x;
ret.src_y = src_y;
ret.dst_x = dst_x;
ret.dst_y = dst_y;
ret.width = width;
ret.height = height;
ret.transfer_scale_x = transfer_scale_x;
ret.transfer_scale_y = transfer_scale_y;
ret.target = static_cast<T>(target);
ret.source = static_cast<T>(source);
return ret;
}
operator bool() const
{
return (source != nullptr);
}
template <typename T>
void init_transfer(T target_surface)
{
if (!width)
{
// Perform intersection here
const auto region = rsx::get_transferable_region(target_surface);
auto src_w = std::get<0>(region);
auto src_h = std::get<1>(region);
auto dst_w = std::get<2>(region);
auto dst_h = std::get<3>(region);
// Apply resolution scale if needed
if (g_cfg.video.resolution_scale_percent != 100)
{
auto src = static_cast<T>(source);
std::tie(src_w, src_h) = rsx::apply_resolution_scale<true>(src_w, src_h,
src->template get_surface_width<rsx::surface_metrics::pixels>(),
src->template get_surface_height<rsx::surface_metrics::pixels>());
std::tie(dst_w, dst_h) = rsx::apply_resolution_scale<true>(dst_w, dst_h,
target_surface->template get_surface_width<rsx::surface_metrics::pixels>(),
target_surface->template get_surface_height<rsx::surface_metrics::pixels>());
}
width = src_w;
height = src_h;
transfer_scale_x = f32(dst_w) / src_w;
transfer_scale_y = f32(dst_h) / src_h;
target = target_surface;
}
}
areai src_rect() const
{
ensure(width);
return { src_x, src_y, src_x + width, src_y + height };
}
areai dst_rect() const
{
ensure(width);
return { dst_x, dst_y, dst_x + u16(width * transfer_scale_x + 0.5f), dst_y + u16(height * transfer_scale_y + 0.5f) };
}
};
template <typename image_storage_type>
struct render_target_descriptor : public rsx::ref_counted
{
u64 last_use_tag = 0; // tag indicating when this block was last confirmed to have been written to
u32 base_addr = 0;
#if (ENABLE_SURFACE_CACHE_DEBUG)
u64 memory_hash = 0;
#else
std::array<std::pair<u32, u64>, 3> memory_tag_samples;
#endif
std::vector<deferred_clipped_region<image_storage_type>> old_contents;
// Surface properties
u32 rsx_pitch = 0;
u32 native_pitch = 0;
u16 surface_width = 0;
u16 surface_height = 0;
u8 spp = 1;
u8 samples_x = 1;
u8 samples_y = 1;
rsx::address_range memory_range;
std::unique_ptr<typename std::remove_pointer_t<image_storage_type>> resolve_surface;
surface_sample_layout sample_layout = surface_sample_layout::null;
surface_raster_type raster_type = surface_raster_type::linear;
flags32_t memory_usage_flags = surface_usage_flags::unknown;
flags32_t state_flags = surface_state_flags::ready;
flags32_t msaa_flags = surface_state_flags::ready;
flags32_t stencil_init_flags = 0;
union
{
rsx::surface_color_format gcm_color_format;
rsx::surface_depth_format2 gcm_depth_format;
}
format_info;
struct
{
u64 timestamp = 0;
bool locked = false;
}
texture_cache_metadata;
render_target_descriptor() {}
virtual ~render_target_descriptor()
{
if (!old_contents.empty())
{
// Cascade resource derefs
rsx_log.error("Resource was destroyed whilst holding a resource reference!");
}
}
virtual image_storage_type get_surface(rsx::surface_access access_type) = 0;
virtual bool is_depth_surface() const = 0;
void reset()
{
texture_cache_metadata = {};
}
template<rsx::surface_metrics Metrics = rsx::surface_metrics::pixels, typename T = u32>
T get_surface_width() const
{
if constexpr (Metrics == rsx::surface_metrics::samples)
{
return static_cast<T>(surface_width * samples_x);
}
else if constexpr (Metrics == rsx::surface_metrics::pixels)
{
return static_cast<T>(surface_width);
}
else if constexpr (Metrics == rsx::surface_metrics::bytes)
{
return static_cast<T>(native_pitch);
}
else
{
fmt::throw_exception("Unreachable");
}
}
template<rsx::surface_metrics Metrics = rsx::surface_metrics::pixels, typename T = u32>
T get_surface_height() const
{
if constexpr (Metrics == rsx::surface_metrics::samples)
{
return static_cast<T>(surface_height * samples_y);
}
else if constexpr (Metrics == rsx::surface_metrics::pixels)
{
return static_cast<T>(surface_height);
}
else if constexpr (Metrics == rsx::surface_metrics::bytes)
{
return static_cast<T>(surface_height * samples_y);
}
else
{
fmt::throw_exception("Unreachable");
}
}
inline u32 get_rsx_pitch() const
{
return rsx_pitch;
}
inline u32 get_native_pitch() const
{
return native_pitch;
}
inline u8 get_bpp() const
{
return u8(get_native_pitch() / get_surface_width<rsx::surface_metrics::samples>());
}
inline u8 get_spp() const
{
return spp;
}
void set_aa_mode(rsx::surface_antialiasing aa)
{
switch (aa)
{
case rsx::surface_antialiasing::center_1_sample:
samples_x = samples_y = spp = 1;
break;
case rsx::surface_antialiasing::diagonal_centered_2_samples:
samples_x = spp = 2;
samples_y = 1;
break;
case rsx::surface_antialiasing::square_centered_4_samples:
case rsx::surface_antialiasing::square_rotated_4_samples:
samples_x = samples_y = 2;
spp = 4;
break;
default:
fmt::throw_exception("Unknown AA mode 0x%x", static_cast<u8>(aa));
}
}
void set_spp(u8 count)
{
switch (count)
{
case 1:
samples_x = samples_y = spp = 1;
break;
case 2:
samples_x = spp = 2;
samples_y = 1;
break;
case 4:
samples_x = samples_y = 2;
spp = 4;
break;
default:
fmt::throw_exception("Unexpected sample count 0x%x", count);
}
}
void set_format(rsx::surface_color_format format)
{
format_info.gcm_color_format = format;
}
void set_format(rsx::surface_depth_format2 format)
{
format_info.gcm_depth_format = format;
}
inline rsx::surface_color_format get_surface_color_format() const
{
return format_info.gcm_color_format;
}
inline rsx::surface_depth_format2 get_surface_depth_format() const
{
return format_info.gcm_depth_format;
}
inline u32 get_gcm_format() const
{
return
(
is_depth_surface() ?
get_compatible_gcm_format(format_info.gcm_depth_format).first :
get_compatible_gcm_format(format_info.gcm_color_format).first
);
}
inline bool dirty() const
{
return (state_flags != rsx::surface_state_flags::ready) || !old_contents.empty();
}
inline bool write_through() const
{
return (state_flags & rsx::surface_state_flags::erase_bkgnd) && old_contents.empty();
}
#if (ENABLE_SURFACE_CACHE_DEBUG)
u64 hash_block() const
{
const auto padding = (rsx_pitch - native_pitch) / 8;
const auto row_length = (native_pitch) / 8;
auto num_rows = (surface_height * samples_y);
auto ptr = reinterpret_cast<u64*>(vm::g_sudo_addr + base_addr);
auto col = row_length;
u64 result = 0;
while (num_rows--)
{
while (col--)
{
result ^= *ptr++;
}
ptr += padding;
col = row_length;
}
return result;
}
void queue_tag(u32 address)
{
ensure(native_pitch);
ensure(rsx_pitch);
base_addr = address;
const u32 internal_height = get_surface_height<rsx::surface_metrics::samples>();
const u32 excess = (rsx_pitch - native_pitch);
memory_range = rsx::address_range::start_length(base_addr, internal_height * rsx_pitch - excess);
}
void sync_tag()
{
memory_hash = hash_block();
}
void shuffle_tag()
{
memory_hash = ~memory_hash;
}
bool test() const
{
return hash_block() == memory_hash;
}
#else
void queue_tag(u32 address)
{
ensure(native_pitch);
ensure(rsx_pitch);
// Clear metadata
reset();
base_addr = address;
const u32 size_x = (native_pitch > 8)? (native_pitch - 8) : 0u;
const u32 size_y = u32(surface_height * samples_y) - 1u;
const position2u samples[] =
{
// NOTE: Sorted by probability to catch dirty flag
{0, 0},
{size_x, size_y},
{size_x / 2, size_y / 2},
// Auxilliary, highly unlikely to ever catch anything
// NOTE: Currently unused as length of samples is truncated to 3
{size_x, 0},
{0, size_y},
};
for (uint n = 0; n < memory_tag_samples.size(); ++n)
{
const auto sample_offset = (samples[n].y * rsx_pitch) + samples[n].x;
memory_tag_samples[n].first = (sample_offset + base_addr);
}
const u32 internal_height = get_surface_height<rsx::surface_metrics::samples>();
const u32 excess = (rsx_pitch - native_pitch);
memory_range = rsx::address_range::start_length(base_addr, internal_height * rsx_pitch - excess);
}
void sync_tag()
{
for (auto &e : memory_tag_samples)
{
e.second = *reinterpret_cast<nse_t<u64, 1>*>(vm::g_sudo_addr + e.first);
}
}
void shuffle_tag()
{
memory_tag_samples[0].second = ~memory_tag_samples[0].second;
}
bool test()
{
for (auto &e : memory_tag_samples)
{
if (e.second != *reinterpret_cast<nse_t<u64, 1>*>(vm::g_sudo_addr + e.first))
return false;
}
return true;
}
#endif
void invalidate_GPU_memory()
{
// Here be dragons. Use with caution.
shuffle_tag();
state_flags |= rsx::surface_state_flags::erase_bkgnd;
}
void clear_rw_barrier()
{
for (auto &e : old_contents)
{
ensure(dynamic_cast<rsx::ref_counted*>(e.source))->release();
}
old_contents.clear();
}
template <typename T>
u32 prepare_rw_barrier_for_transfer(T *target)
{
if (old_contents.size() <= 1)
return 0;
// Sort here before doing transfers since surfaces may have been updated in the meantime
std::sort(old_contents.begin(), old_contents.end(), [](auto& a, auto &b)
{
auto _a = static_cast<T*>(a.source);
auto _b = static_cast<T*>(b.source);
return (_a->last_use_tag < _b->last_use_tag);
});
// Try and optimize by omitting possible overlapped transfers
for (usz i = old_contents.size() - 1; i > 0 /* Intentional */; i--)
{
old_contents[i].init_transfer(target);
const auto dst_area = old_contents[i].dst_rect();
if (unsigned(dst_area.x2) == target->width() && unsigned(dst_area.y2) == target->height() &&
!dst_area.x1 && !dst_area.y1)
{
// This transfer will overwrite everything older
return u32(i);
}
}
return 0;
}
template<typename T>
void set_old_contents(T* other)
{
ensure(old_contents.empty());
if (!other || other->get_rsx_pitch() != this->get_rsx_pitch())
{
return;
}
old_contents.emplace_back();
old_contents.back().source = other;
other->add_ref();
}
template<typename T>
void set_old_contents_region(const T& region, bool normalized)
{
// NOTE: This method will not perform pitch verification!
ensure(region.source);
ensure(region.source != static_cast<decltype(region.source)>(this));
old_contents.push_back(region.template cast<image_storage_type>());
auto &slice = old_contents.back();
region.source->add_ref();
// Reverse normalization process if needed
if (normalized)
{
const u16 bytes_to_texels_x = region.source->get_bpp() * region.source->samples_x;
const u16 rows_to_texels_y = region.source->samples_y;
slice.src_x /= bytes_to_texels_x;
slice.src_y /= rows_to_texels_y;
slice.width /= bytes_to_texels_x;
slice.height /= rows_to_texels_y;
const u16 bytes_to_texels_x2 = (get_bpp() * samples_x);
const u16 rows_to_texels_y2 = samples_y;
slice.dst_x /= bytes_to_texels_x2;
slice.dst_y /= rows_to_texels_y2;
slice.transfer_scale_x = f32(bytes_to_texels_x) / bytes_to_texels_x2;
slice.transfer_scale_y = f32(rows_to_texels_y) / rows_to_texels_y2;
}
// Apply resolution scale if needed
if (g_cfg.video.resolution_scale_percent != 100)
{
auto [src_width, src_height] = rsx::apply_resolution_scale<true>(slice.width, slice.height, slice.source->width(), slice.source->height());
auto [dst_width, dst_height] = rsx::apply_resolution_scale<true>(slice.width, slice.height, slice.target->width(), slice.target->height());
slice.transfer_scale_x *= f32(dst_width) / src_width;
slice.transfer_scale_y *= f32(dst_height) / src_height;
slice.width = src_width;
slice.height = src_height;
std::tie(slice.src_x, slice.src_y) = rsx::apply_resolution_scale<false>(slice.src_x, slice.src_y, slice.source->width(), slice.source->height());
std::tie(slice.dst_x, slice.dst_y) = rsx::apply_resolution_scale<false>(slice.dst_x, slice.dst_y, slice.target->width(), slice.target->height());
}
}
template <typename T>
surface_inheritance_result inherit_surface_contents(T* surface)
{
const auto child_w = get_surface_width<rsx::surface_metrics::bytes>();
const auto child_h = get_surface_height<rsx::surface_metrics::bytes>();
const auto parent_w = surface->template get_surface_width<rsx::surface_metrics::bytes>();
const auto parent_h = surface->template get_surface_height<rsx::surface_metrics::bytes>();
const auto [src_offset, dst_offset, size] = rsx::intersect_region(surface->base_addr, parent_w, parent_h, base_addr, child_w, child_h, get_rsx_pitch());
if (!size.width || !size.height)
{
return surface_inheritance_result::none;
}
ensure(src_offset.x < parent_w && src_offset.y < parent_h);
ensure(dst_offset.x < child_w && dst_offset.y < child_h);
// TODO: Eventually need to stack all the overlapping regions, but for now just do the latest rect in the space
deferred_clipped_region<T*> region{};
region.src_x = src_offset.x;
region.src_y = src_offset.y;
region.dst_x = dst_offset.x;
region.dst_y = dst_offset.y;
region.width = size.width;
region.height = size.height;
region.source = surface;
region.target = static_cast<T*>(this);
set_old_contents_region(region, true);
return (region.width == parent_w && region.height == parent_h) ?
surface_inheritance_result::full :
surface_inheritance_result::partial;
}
void on_write(u64 write_tag = 0,
rsx::surface_state_flags resolve_flags = surface_state_flags::require_resolve,
surface_raster_type type = rsx::surface_raster_type::undefined)
{
if (write_tag)
{
// Update use tag if requested
last_use_tag = write_tag;
}
// Tag unconditionally without introducing new data
sync_tag();
// HACK!! This should be cleared through memory barriers only
state_flags = rsx::surface_state_flags::ready;
if (spp > 1 && sample_layout != surface_sample_layout::null)
{
msaa_flags = resolve_flags;
}
if (!old_contents.empty())
{
clear_rw_barrier();
}
if (type != rsx::surface_raster_type::undefined)
{
raster_type = type;
}
}
void on_write_copy(u64 write_tag = 0,
bool keep_optimizations = false,
surface_raster_type type = rsx::surface_raster_type::undefined)
{
on_write(write_tag, rsx::surface_state_flags::require_unresolve, type);
if (!keep_optimizations && is_depth_surface())
{
// A successful write-copy occured, cannot guarantee flat contents in stencil area
stencil_init_flags |= (1 << 9);
}
}
inline void on_write_fast(u64 write_tag)
{
ensure(write_tag);
last_use_tag = write_tag;
if (spp > 1 && sample_layout != surface_sample_layout::null)
{
msaa_flags |= rsx::surface_state_flags::require_resolve;
}
}
// Returns the rect area occupied by this surface expressed as an 8bpp image with no AA
inline areau get_normalized_memory_area() const
{
const u16 internal_width = get_surface_width<rsx::surface_metrics::bytes>();
const u16 internal_height = get_surface_height<rsx::surface_metrics::bytes>();
return { 0, 0, internal_width, internal_height };
}
inline rsx::address_range get_memory_range() const
{
return memory_range;
}
template <typename T>
void transform_samples_to_pixels(area_base<T>& area)
{
if (spp == 1) [[likely]] return;
area.x1 /= samples_x;
area.x2 /= samples_x;
area.y1 /= samples_y;
area.y2 /= samples_y;
}
template <typename T>
void transform_pixels_to_samples(area_base<T>& area)
{
if (spp == 1) [[likely]] return;
area.x1 *= samples_x;
area.x2 *= samples_x;
area.y1 *= samples_y;
area.y2 *= samples_y;
}
template <typename T>
void transform_samples_to_pixels(T& x1, T& x2, T& y1, T& y2)
{
if (spp == 1) [[likely]] return;
x1 /= samples_x;
x2 /= samples_x;
y1 /= samples_y;
y2 /= samples_y;
}
template <typename T>
void transform_pixels_to_samples(T& x1, T& x2, T& y1, T& y2)
{
if (spp == 1) [[likely]] return;
x1 *= samples_x;
x2 *= samples_x;
y1 *= samples_y;
y2 *= samples_y;
}
template<typename T>
void transform_blit_coordinates(rsx::surface_access access_type, area_base<T>& region)
{
if (spp == 1 || sample_layout == rsx::surface_sample_layout::ps3)
return;
ensure(access_type.is_read() || access_type.is_transfer());
transform_samples_to_pixels(region);
}
void on_lock()
{
add_ref();
texture_cache_metadata.locked = true;
texture_cache_metadata.timestamp = rsx::get_shared_tag();
}
void on_unlock()
{
texture_cache_metadata.locked = false;
texture_cache_metadata.timestamp = rsx::get_shared_tag();
release();
}
void on_swap_out()
{
if (is_locked())
{
on_unlock();
}
else
{
release();
}
}
void on_swap_in(bool lock)
{
if (!is_locked() && lock)
{
on_lock();
}
else
{
add_ref();
}
}
void on_clone_from(const render_target_descriptor* ref)
{
if (ref->is_locked() && !is_locked())
{
// Propagate locked state only.
texture_cache_metadata = ref->texture_cache_metadata;
}
rsx_pitch = ref->get_rsx_pitch();
last_use_tag = ref->last_use_tag;
raster_type = ref->raster_type; // Can't actually cut up swizzled data
}
bool is_locked() const
{
return texture_cache_metadata.locked;
}
bool has_flushable_data() const
{
ensure(is_locked());
ensure(texture_cache_metadata.timestamp);
return (texture_cache_metadata.timestamp < last_use_tag);
}
};
}
| 19,360
|
C++
|
.h
| 656
| 25.568598
| 155
| 0.66437
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,024
|
rsx_replay.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Capture/rsx_replay.h
|
#pragma once
#include "Emu/CPU/CPUThread.h"
#include "Emu/RSX/rsx_methods.h"
#include <unordered_map>
#include <unordered_set>
namespace rsx
{
enum : u32
{
c_fc_magic = "RRC"_u32,
c_fc_version = 0x5,
};
struct frame_capture_data
{
struct memory_block_data
{
std::vector<u8> data{};
};
// simple block to hold ps3 address and data
struct memory_block
{
ENABLE_BITWISE_SERIALIZATION;
u32 offset; // Offset in rsx address space
u32 location; // rsx memory location of the block
u64 data_state;
};
struct replay_command
{
std::pair<u32, u32> rsx_command{}; // fifo command
std::unordered_set<u64> memory_state{}; // index into memory_map for the various memory blocks that need applying before this command can run
u64 tile_state{0}; // tile state for this command
u64 display_buffer_state{0};
};
struct tile_info
{
ENABLE_BITWISE_SERIALIZATION;
u32 tile;
u32 limit;
u32 pitch;
u32 format;
};
struct zcull_info
{
ENABLE_BITWISE_SERIALIZATION;
u32 region;
u32 size;
u32 start;
u32 offset;
u32 status0;
u32 status1;
};
// bleh, may need to break these out, might be unnecessary to do both always
struct tile_state
{
ENABLE_BITWISE_SERIALIZATION;
tile_info tiles[15]{};
zcull_info zculls[8]{};
};
struct buffer_state
{
ENABLE_BITWISE_SERIALIZATION;
u32 width{0};
u32 height{0};
u32 pitch{0};
u32 offset{0};
};
struct display_buffers_state
{
ENABLE_BITWISE_SERIALIZATION;
std::array<buffer_state, 8> buffers{};
u32 count{0};
};
u32 magic = c_fc_magic;
u32 version = c_fc_version;
u32 LE_format = std::endian::little == std::endian::native;
// hashmap of holding various states for tile
std::unordered_map<u64, tile_state> tile_map;
// hashmap of various memory 'changes' that can be applied to ps3 memory
std::unordered_map<u64, memory_block> memory_map;
// hashmap of memory blocks that can be applied, this is split from above for size decrease
std::unordered_map<u64, memory_block_data> memory_data_map;
// display buffer state map
std::unordered_map<u64, display_buffers_state> display_buffers_map;
// actual command queue to hold everything above
std::vector<replay_command> replay_commands;
// Initial registers state at the beginning of the capture
rsx::rsx_state reg_state;
void reset()
{
magic = c_fc_magic;
version = c_fc_version;
tile_map.clear();
memory_map.clear();
replay_commands.clear();
reg_state = method_registers;
}
};
class rsx_replay_thread : public cpu_thread
{
struct rsx_context
{
be_t<u32> user_addr;
be_t<u64> dev_addr;
be_t<u32> mem_handle;
be_t<u32> context_id;
be_t<u64> mem_addr;
be_t<u64> dma_addr;
be_t<u64> reports_addr;
be_t<u64> driver_info;
};
struct current_state
{
u64 tile_hash{0};
u64 display_buffer_hash{0};
frame_capture_data::display_buffers_state buffer_state{};
frame_capture_data::tile_state tile_state{};
};
u32 user_mem_addr{};
current_state cs{};
std::unique_ptr<frame_capture_data> frame;
public:
rsx_replay_thread(std::unique_ptr<frame_capture_data>&& frame_data)
: cpu_thread(0)
, frame(std::move(frame_data))
{
}
using cpu_thread::operator=;
void cpu_task() override;
private:
be_t<u32> allocate_context();
std::vector<u32> alloc_write_fifo(be_t<u32> context_id) const;
void apply_frame_state(be_t<u32> context_id, const frame_capture_data::replay_command& replay_cmd);
};
}
| 3,566
|
C++
|
.h
| 134
| 23.231343
| 144
| 0.694656
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,025
|
rsx_trace.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Capture/rsx_trace.h
|
#pragma once
#include <string>
#include <array>
#include <vector>
#include "util/types.hpp"
#include "Emu/RSX/rsx_methods.h"
namespace rsx
{
struct frame_trace_data
{
struct draw_state
{
std::string name;
std::pair<std::string, std::string> programs;
rsx::rsx_state state;
std::array<std::vector<std::byte>, 4> color_buffer;
std::array<std::vector<std::byte>, 2> depth_stencil;
std::vector<std::byte> index;
u32 vertex_count;
};
std::vector<std::pair<u32, u32>> command_queue;
std::vector<draw_state> draw_calls;
void reset()
{
command_queue.clear();
draw_calls.clear();
}
};
}
| 608
|
C++
|
.h
| 29
| 18.931034
| 54
| 0.711304
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,026
|
rsx_capture.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Capture/rsx_capture.h
|
#pragma once
#include "rsx_replay.h"
namespace rsx
{
class thread;
namespace capture
{
void capture_draw_memory(thread* rsx);
void capture_image_in(thread* rsx, frame_capture_data::replay_command& replay_command);
void capture_buffer_notify(thread* rsx, frame_capture_data::replay_command& replay_command);
void capture_display_tile_state(thread* rsx, frame_capture_data::replay_command& replay_command);
}
}
| 422
|
C++
|
.h
| 13
| 30.461538
| 99
| 0.776961
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,027
|
RSXDMAWriter.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Host/RSXDMAWriter.h
|
#pragma once
#include <util/types.hpp>
#include <unordered_map>
#include <functional>
#include <deque>
namespace rsx
{
struct host_gpu_context_t
{
u64 magic = 0xCAFEBABE;
u64 event_counter = 0;
u64 texture_load_request_event = 0;
u64 texture_load_complete_event = 0;
u64 last_label_acquire_event = 0;
u64 last_label_release2_event = 0;
u64 commands_complete_event = 0;
inline u64 inc_counter() volatile
{
// Workaround for volatile increment warning. GPU can see this value directly, but currently we do not modify it on the device.
event_counter = event_counter + 1;
return event_counter;
}
inline bool in_flight_commands_completed() const volatile
{
return last_label_release2_event <= commands_complete_event;
}
inline bool texture_loads_completed() const volatile
{
// Return true if all texture load requests are done.
return texture_load_complete_event == texture_load_request_event;
}
inline bool has_unflushed_texture_loads() const volatile
{
return texture_load_request_event > last_label_release2_event;
}
inline u64 on_texture_load_acquire() volatile
{
texture_load_request_event = inc_counter();
return texture_load_request_event;
}
inline void on_texture_load_release() volatile
{
// Normally released by the host device, but implemented nonetheless for software fallback
texture_load_complete_event = texture_load_request_event;
}
inline u64 on_label_acquire() volatile
{
last_label_acquire_event = inc_counter();
return last_label_acquire_event;
}
inline void on_label_release() volatile
{
last_label_release2_event = last_label_acquire_event;
}
inline bool needs_label_release() const volatile
{
return last_label_acquire_event > last_label_release2_event;
}
};
struct host_gpu_write_op_t
{
int dispatch_class = 0;
void* userdata = nullptr;
};
struct host_dispatch_handler_t
{
int dispatch_class = 0;
std::function<bool(const volatile host_gpu_context_t*, const host_gpu_write_op_t*)> handler;
};
class RSXDMAWriter
{
public:
RSXDMAWriter(void* mem)
: m_host_context_ptr(new (mem)host_gpu_context_t)
{}
RSXDMAWriter(host_gpu_context_t* pctx)
: m_host_context_ptr(pctx)
{}
void update();
void register_handler(host_dispatch_handler_t handler);
void deregister_handler(int dispatch_class);
void enqueue(const host_gpu_write_op_t& request);
void drain_label_queue();
volatile host_gpu_context_t* host_ctx() const
{
return m_host_context_ptr;
}
private:
std::unordered_map<int, host_dispatch_handler_t> m_dispatch_handlers;
volatile host_gpu_context_t* m_host_context_ptr = nullptr;
std::deque<host_gpu_write_op_t> m_job_queue;
};
}
| 2,856
|
C++
|
.h
| 93
| 26.365591
| 131
| 0.711291
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,028
|
overlay_debug_overlay.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_debug_overlay.h
|
#pragma once
#include "overlays.h"
namespace rsx
{
namespace overlays
{
class debug_overlay : public user_interface
{
label text_display;
text_guard_t text_guard{};
public:
debug_overlay();
compiled_resource get_compiled() override;
void set_text(std::string&& text);
};
void reset_debug_overlay();
void set_debug_overlay_text(std::string&& text);
}
}
| 388
|
C++
|
.h
| 19
| 17.526316
| 50
| 0.716253
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,029
|
overlays.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlays.h
|
#pragma once
#include "overlay_animation.h"
#include "overlay_controls.h"
#include "Emu/IdManager.h"
#include "Emu/Io/pad_types.h"
#include "Utilities/mutex.h"
#include "Utilities/Timer.h"
#include "../Common/bitfield.hpp"
#include <mutex>
#include <set>
// Definition of user interface implementations
namespace rsx
{
namespace overlays
{
// Bitfield of UI signals to overlay manager
enum status_bits : u32
{
invalidate_image_cache = 0x0001, // Flush the address-based image cache
};
// Non-interactable UI element
struct overlay
{
u32 uid = umax;
u32 type_index = umax;
static constexpr u16 virtual_width = 1280;
static constexpr u16 virtual_height = 720;
u32 min_refresh_duration_us = 16600;
atomic_t<bool> visible = false;
atomic_bitmask_t<status_bits> status_flags = {};
virtual ~overlay() = default;
virtual void update(u64 /*timestamp_us*/) {}
virtual compiled_resource get_compiled() = 0;
void refresh() const;
};
// Interactable UI element
class user_interface : public overlay
{
public:
// Move this somewhere to avoid duplication
enum selection_code
{
ok = 0,
new_save = -1,
canceled = -2,
error = -3,
interrupted = -4,
no = -5
};
static constexpr u64 m_auto_repeat_ms_interval_default = 200;
protected:
Timer m_input_timer;
u64 m_auto_repeat_ms_interval = m_auto_repeat_ms_interval_default;
std::set<pad_button> m_auto_repeat_buttons = {
pad_button::dpad_up,
pad_button::dpad_down,
pad_button::dpad_left,
pad_button::dpad_right,
pad_button::rs_up,
pad_button::rs_down,
pad_button::rs_left,
pad_button::rs_right,
pad_button::ls_up,
pad_button::ls_down,
pad_button::ls_left,
pad_button::ls_right
};
atomic_t<bool> m_stop_input_loop = false;
atomic_t<bool> m_interactive = false;
bool m_start_pad_interception = true;
atomic_t<bool> m_stop_pad_interception = false;
atomic_t<bool> m_input_thread_detached = false;
atomic_t<u32> thread_bits = 0;
bool m_keyboard_input_enabled = false; // Allow keyboard input
bool m_keyboard_pad_handler_active = true; // Initialized as true to prevent keyboard input until proven otherwise.
bool m_allow_input_on_pause = false;
static thread_local u32 g_thread_bit;
u32 alloc_thread_bit();
std::function<void(s32 status)> on_close = nullptr;
class thread_bits_allocator
{
public:
thread_bits_allocator(user_interface* parent)
: m_parent(parent)
{
m_thread_bit = m_parent->alloc_thread_bit();
g_thread_bit = m_thread_bit;
}
~thread_bits_allocator()
{
m_parent->thread_bits &= ~m_thread_bit;
m_parent->thread_bits.notify_all();
}
private:
user_interface* m_parent;
u32 m_thread_bit;
};
public:
s32 return_code = 0; // CELL_OK
bool is_detached() const { return m_input_thread_detached; }
void detach_input() { m_input_thread_detached.store(true); }
void update(u64 /*timestamp_us*/) override {}
compiled_resource get_compiled() override = 0;
virtual void on_button_pressed(pad_button /*button_press*/, bool /*is_auto_repeat*/) {}
virtual void on_key_pressed(u32 /*led*/, u32 /*mkey*/, u32 /*key_code*/, u32 /*out_key_code*/, bool /*pressed*/, std::u32string /*key*/) {}
virtual void close(bool use_callback, bool stop_pad_interception);
s32 run_input_loop(std::function<bool()> check_state = nullptr);
};
struct text_guard_t
{
std::mutex mutex;
std::string text;
bool dirty{false};
void set_text(std::string t)
{
std::lock_guard lock(mutex);
text = std::move(t);
dirty = true;
}
std::pair<bool, std::string> get_text()
{
if (dirty)
{
std::lock_guard lock(mutex);
dirty = false;
return { true, std::move(text) };
}
return { false, {} };
}
};
}
}
| 3,895
|
C++
|
.h
| 132
| 25.393939
| 142
| 0.670866
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,030
|
overlay_trophy_notification.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_trophy_notification.h
|
#pragma once
#include "overlays.h"
#include "Emu/Cell/Modules/sceNpTrophy.h"
namespace rsx
{
namespace overlays
{
struct trophy_notification : public user_interface
{
private:
overlay_element frame;
image_view image;
label text_view;
u64 display_sched_id = 0;
u64 creation_time_us = 0;
std::unique_ptr<image_info> icon_info;
animation_translate sliding_animation;
animation_color_interpolate fade_animation;
public:
trophy_notification();
void update(u64 timestamp_us) override;
compiled_resource get_compiled() override;
s32 show(const SceNpTrophyDetails& trophy, const std::vector<uchar>& trophy_icon_buffer);
};
}
}
| 676
|
C++
|
.h
| 26
| 22.807692
| 92
| 0.752336
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,031
|
overlay_loading_icon.hpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_loading_icon.hpp
|
#pragma once
#include "overlay_animated_icon.h"
namespace rsx
{
namespace overlays
{
class loading_icon24 : public animated_icon
{
public:
loading_icon24()
: animated_icon("spinner-24.png")
{
init_params();
}
loading_icon24(const std::vector<u8>& icon_data)
: animated_icon(icon_data)
{
init_params();
}
private:
void init_params()
{
m_frame_width = m_frame_height = 24;
m_spacing_x = m_spacing_y = 6;
set_size(24, 30);
set_padding(4, 0, 2, 8);
}
};
}
}
| 568
|
C++
|
.h
| 30
| 14.133333
| 52
| 0.593156
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,032
|
overlay_osk.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_osk.h
|
#pragma once
#include "overlays.h"
#include "overlay_edit_text.hpp"
#include "overlay_cursor.h"
#include "overlay_osk_panel.h"
#include "Emu/Cell/Modules/cellOskDialog.h"
namespace rsx
{
namespace overlays
{
struct osk_dialog : public user_interface, public OskDialogBase
{
enum border_flags
{
top = 1,
bottom = 2,
left = 4,
right = 8,
start_cell = top | bottom | left,
end_cell = top | bottom | right,
middle_cell = top | bottom,
default_cell = top | bottom | left | right
};
struct cell
{
position2u pos;
color4f backcolor{};
border_flags flags = default_cell;
button_flags button_flag = button_flags::_default;
bool selected = false;
bool enabled = false;
std::vector<std::vector<std::u32string>> outputs;
callback_t callback;
};
// Mutex for interaction between overlay and cellOskDialog
shared_mutex m_preview_mutex;
// Base UI configuration
bool m_use_separate_windows = false;
bool m_show_panel = true;
osk_window_layout m_layout = {};
osk_window_layout m_input_layout = {}; // Only used with separate windows
osk_window_layout m_panel_layout = {}; // Only used with separate windows
u32 m_input_field_window_width = 0; // Only used with separate windows
f32 m_scaling = 1.0f;
// Base UI
overlay_element m_input_frame;
overlay_element m_panel_frame;
overlay_element m_background;
label m_title;
edit_text m_preview;
image_button m_btn_accept;
image_button m_btn_cancel;
image_button m_btn_shift;
image_button m_btn_space;
image_button m_btn_delete;
// Pointer
cursor_item m_pointer{};
// Analog movement
s16 m_x_input_pos = 0;
s16 m_y_input_pos = 0;
s16 m_x_panel_pos = 0;
s16 m_y_panel_pos = 0;
// Grid
u16 cell_size_x = 0;
u16 cell_size_y = 0;
u16 num_columns = 0;
u16 num_rows = 0;
std::vector<u32> num_shift_layers_by_charset;
u32 selected_x = 0;
u32 selected_y = 0;
u32 selected_z = 0;
u32 m_selected_charset = 0;
std::vector<cell> m_grid;
// Password mode (****)
bool m_password_mode = false;
// Fade in/out
animation_color_interpolate fade_animation;
bool m_reset_pulse = false;
overlay_element m_key_pulse_cache; // Let's use this to store the pulse offset of the key, since we don't seem to cache the keys themselves.
bool m_update = true;
compiled_resource m_cached_resource;
u32 flags = 0;
u32 char_limit = umax;
std::vector<osk_panel> m_panels;
usz m_panel_index = 0;
osk_dialog();
~osk_dialog() override = default;
void Create(const osk_params& params) override;
void Close(s32 status) override;
void Clear(bool clear_all_data) override;
void SetText(const std::u16string& text) override;
void Insert(const std::u16string& text) override;
void initialize_layout(const std::u32string& title, const std::u32string& initial_text);
void add_panel(const osk_panel& panel);
void step_panel(bool next_panel);
void update_panel();
void update_layout();
void update(u64 timestamp_us) override;
void update_controls();
void update_selection_by_index(u32 index);
void set_visible(bool visible);
void on_button_pressed(pad_button button_press, bool is_auto_repeat) override;
void on_key_pressed(u32 led, u32 mkey, u32 key_code, u32 out_key_code, bool pressed, std::u32string key) override;
void on_text_changed();
void on_default_callback(const std::u32string& str);
void on_shift(const std::u32string&);
void on_layer(const std::u32string&);
void on_space(const std::u32string&);
void on_backspace(const std::u32string&);
void on_delete(const std::u32string&);
void on_enter(const std::u32string&);
void on_move_cursor(const std::u32string&, edit_text::direction dir);
std::u32string get_placeholder() const;
std::pair<u32, u32> get_cell_geometry(u32 index);
template <typename T>
s16 get_scaled(T val)
{
return static_cast<s16>(static_cast<f32>(val) * m_scaling);
}
compiled_resource get_compiled() override;
};
}
}
| 4,105
|
C++
|
.h
| 123
| 29.292683
| 143
| 0.694156
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,033
|
overlay_animation.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_animation.h
|
#pragma once
#include "util/types.hpp"
#include "Utilities/geometry.h"
#include "overlay_utils.h"
#include <functional>
namespace rsx
{
namespace overlays
{
struct compiled_resource;
enum class animation_type
{
linear,
ease_in_quad,
ease_out_quad,
ease_in_out_cubic,
};
struct animation_base
{
protected:
u64 timestamp_start_us = 0;
u64 timestamp_end_us = 0;
void begin_animation(u64 timestamp_us);
f32 get_progress_ratio(u64 timestamp_us) const;
template<typename T>
static T lerp(const T& a, const T& b, f32 t)
{
return (a * (1.f - t)) + (b * t);
}
public:
bool active = false;
animation_type type { animation_type::linear };
f32 duration_sec = 1.f; // in seconds
std::function<void()> on_finish;
u64 get_total_duration_us() const;
u64 get_remaining_duration_us(u64 timestamp_us) const;
virtual void apply(compiled_resource&) = 0;
virtual void reset(u64 start_timestamp_us) = 0;
virtual void update(u64 timestamp_us) = 0;
};
struct animation_translate : animation_base
{
private:
vector3f start{}; // Set `current` instead of this
// NOTE: Necessary because update() is called after rendering,
// resulting in one frame of incorrect translation
public:
vector3f current{};
vector3f end{};
void apply(compiled_resource& data) override;
void reset(u64 start_timestamp_us = 0) override;
void update(u64 timestamp_us) override;
void finish();
};
struct animation_color_interpolate : animation_translate
{
private:
color4f start{};
public:
color4f current{};
color4f end{};
void apply(compiled_resource& data) override;
void reset(u64 start_timestamp_us = 0) override;
void update(u64 timestamp_us) override;
void finish();
};
}
}
| 1,842
|
C++
|
.h
| 68
| 23.029412
| 83
| 0.686007
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,034
|
overlay_cursor.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_cursor.h
|
#pragma once
#include "overlays.h"
#include <map>
namespace rsx
{
namespace overlays
{
enum cursor_offset : u32
{
cell_gem = 0, // CELL_GEM_MAX_NUM = 4 Move controllers
last = 4
};
class cursor_item
{
public:
cursor_item();
void set_expiration(u64 expiration_time);
bool set_position(s16 x, s16 y);
bool set_color(color4f color);
bool update_visibility(u64 time);
bool visible() const;
compiled_resource get_compiled();
private:
bool m_visible = false;
overlay_element m_cross_h{};
overlay_element m_cross_v{};
u64 m_expiration_time = 0;
s16 m_x = 0;
s16 m_y = 0;
};
class cursor_manager final : public overlay
{
public:
void update(u64 timestamp_us) override;
compiled_resource get_compiled() override;
void update_cursor(u32 id, s16 x, s16 y, const color4f& color, u64 duration_us, bool force_update);
private:
shared_mutex m_mutex;
std::map<u32, cursor_item> m_cursors;
};
void set_cursor(u32 id, s16 x, s16 y, const color4f& color, u64 duration_us, bool force_update);
} // namespace overlays
} // namespace rsx
| 1,119
|
C++
|
.h
| 43
| 22.627907
| 102
| 0.694549
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,035
|
overlay_osk_panel.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_osk_panel.h
|
#pragma once
#include "Emu/Cell/Modules/cellOskDialog.h"
#include "Utilities/geometry.h"
namespace rsx
{
namespace overlays
{
using callback_t = std::function<void(const std::u32string&)>;
enum button_flags
{
_default = 0,
_return = 1,
_space = 2,
_shift = 3,
_layer = 4
};
struct grid_entry_ctor
{
// TODO: change to array with layer_mode::layer_count
std::vector<std::vector<std::u32string>> outputs;
color4f color;
u32 num_cell_hz;
button_flags type_flags;
callback_t callback;
};
struct osk_panel
{
const color4f default_bg = { 0.7f, 0.7f, 0.7f, 1.f };
const color4f special_bg = { 0.2f, 0.7f, 0.7f, 1.f };
const color4f special2_bg = { 0.83f, 0.81f, 0.57f, 1.f };
u32 osk_panel_mode = 0;
u16 num_rows = 0;
u16 num_columns = 0;
u16 cell_size_x = 0;
u16 cell_size_y = 0;
std::vector<grid_entry_ctor> layout;
osk_panel(u32 panel_mode = 0);
protected:
std::u32string space;
std::u32string backspace;
std::u32string enter; // Return key. Named 'enter' because 'return' is a reserved statement in cpp.
};
struct osk_panel_latin : public osk_panel
{
osk_panel_latin(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb, u32 osk_panel_mode = CELL_OSKDIALOG_PANELMODE_LATIN);
};
struct osk_panel_english : public osk_panel_latin
{
osk_panel_english(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb);
};
struct osk_panel_spanish : public osk_panel_latin
{
osk_panel_spanish(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb);
};
struct osk_panel_italian : public osk_panel_latin
{
osk_panel_italian(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb);
};
struct osk_panel_danish : public osk_panel_latin
{
osk_panel_danish(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb);
};
struct osk_panel_norwegian : public osk_panel_latin
{
osk_panel_norwegian(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb);
};
struct osk_panel_dutch : public osk_panel_latin
{
osk_panel_dutch(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb);
};
struct osk_panel_swedish : public osk_panel_latin
{
osk_panel_swedish(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb);
};
struct osk_panel_finnish : public osk_panel_latin
{
osk_panel_finnish(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb);
};
struct osk_panel_portuguese_pt : public osk_panel_latin
{
osk_panel_portuguese_pt(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb);
};
struct osk_panel_portuguese_br : public osk_panel_latin
{
osk_panel_portuguese_br(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb);
};
struct osk_panel_french : public osk_panel
{
osk_panel_french(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb);
};
struct osk_panel_german : public osk_panel
{
osk_panel_german(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb);
};
struct osk_panel_turkey : public osk_panel
{
osk_panel_turkey(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb);
};
struct osk_panel_polish : public osk_panel
{
osk_panel_polish(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb);
};
struct osk_panel_russian : public osk_panel
{
osk_panel_russian(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb);
};
struct osk_panel_korean : public osk_panel
{
osk_panel_korean(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb);
};
struct osk_panel_chinese : public osk_panel
{
osk_panel_chinese(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb, u32 osk_panel_mode);
};
struct osk_panel_simplified_chinese : public osk_panel_chinese
{
osk_panel_simplified_chinese(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb);
};
struct osk_panel_traditional_chinese : public osk_panel_chinese
{
osk_panel_traditional_chinese(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb);
};
struct osk_panel_japanese : public osk_panel
{
osk_panel_japanese(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb);
};
struct osk_panel_japanese_hiragana : public osk_panel
{
osk_panel_japanese_hiragana(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb);
};
struct osk_panel_japanese_katakana : public osk_panel
{
osk_panel_japanese_katakana(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb);
};
struct osk_panel_alphabet_half_width : public osk_panel
{
osk_panel_alphabet_half_width(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb, u32 osk_panel_mode = CELL_OSKDIALOG_PANELMODE_ALPHABET);
};
struct osk_panel_alphabet_full_width : public osk_panel
{
osk_panel_alphabet_full_width(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb);
};
struct osk_panel_numeral_half_width : public osk_panel
{
osk_panel_numeral_half_width(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb);
};
struct osk_panel_numeral_full_width : public osk_panel
{
osk_panel_numeral_full_width(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb);
};
struct osk_panel_url : public osk_panel
{
osk_panel_url(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb);
};
struct osk_panel_password : public osk_panel_alphabet_half_width
{
osk_panel_password(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb);
};
}
}
| 6,857
|
C++
|
.h
| 160
| 39.38125
| 195
| 0.726385
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,036
|
overlay_manager.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_manager.h
|
#pragma once
#include "overlays.h"
#include "Emu/IdManager.h"
#include "Utilities/mutex.h"
#include "Utilities/Thread.h"
#include "Utilities/Timer.h"
#include <deque>
#include <set>
namespace rsx
{
namespace overlays
{
struct overlay;
class display_manager
{
private:
atomic_t<u32> m_uid_ctr = 0;
std::vector<std::shared_ptr<overlay>> m_iface_list;
std::vector<std::shared_ptr<overlay>> m_dirty_list;
shared_mutex m_list_mutex;
lf_queue<u32> m_uids_to_remove;
lf_queue<u32> m_type_ids_to_remove;
atomic_t<u32> m_pending_removals_count = 0;
bool remove_type(u32 type_id);
bool remove_uid(u32 uid);
void cleanup_internal();
void on_overlay_activated(const std::shared_ptr<overlay>& item);
void on_overlay_removed(const std::shared_ptr<overlay>& item);
public:
// Disable default construction to make it conditionally available in g_fxo
explicit display_manager(int) noexcept;
~display_manager();
// Adds an object to the internal list. Optionally removes other objects of the same type.
// Original handle loses ownership but a usable pointer is returned
template <typename T>
std::shared_ptr<T> add(std::shared_ptr<T>& entry, bool remove_existing = true)
{
std::lock_guard lock(m_list_mutex);
entry->uid = m_uid_ctr.fetch_add(1);
entry->type_index = id_manager::typeinfo::get_index<T>();
if (remove_existing)
{
for (auto It = m_iface_list.begin(); It != m_iface_list.end(); It++)
{
if (It->get()->type_index == entry->type_index)
{
// Replace
m_dirty_list.push_back(std::move(*It));
*It = std::move(entry);
return std::static_pointer_cast<T>(*It);
}
}
}
m_iface_list.push_back(std::move(entry));
on_overlay_activated(m_iface_list.back());
return std::static_pointer_cast<T>(m_iface_list.back());
}
// Allocates object and adds to internal list. Returns pointer to created object
template <typename T, typename ...Args>
std::shared_ptr<T> create(Args&&... args)
{
auto object = std::make_shared<T>(std::forward<Args>(args)...);
return add(object);
}
// Removes item from list if it matches the uid
void remove(u32 uid);
// Removes all objects of this type from the list
template <typename T>
void remove()
{
const auto type_id = id_manager::typeinfo::get_index<T>();
if (m_list_mutex.try_lock())
{
remove_type(type_id);
m_list_mutex.unlock();
return;
}
// Enqueue
m_type_ids_to_remove.push(type_id);
m_pending_removals_count++;
}
// True if any visible elements to draw exist
bool has_visible() const
{
return !m_iface_list.empty();
}
// True if any elements have been deleted but their resources may not have been cleaned up
bool has_dirty() const
{
return !m_dirty_list.empty();
}
// Returns current list for reading. Caller must ensure synchronization by first locking the list
const std::vector<std::shared_ptr<overlay>>& get_views() const
{
return m_iface_list;
}
// Returns current list of removed objects not yet deallocated for reading.
// Caller must ensure synchronization by first locking the list
const std::vector<std::shared_ptr<overlay>>& get_dirty() const
{
return m_dirty_list;
}
// Deallocate object. Object must first be removed via the remove() functions
void dispose(const std::vector<u32>& uids);
// Returns pointer to the object matching the given uid
std::shared_ptr<overlay> get(u32 uid);
// Returns pointer to the first object matching the given type
template <typename T>
std::shared_ptr<T> get()
{
reader_lock lock(m_list_mutex);
const auto type_id = id_manager::typeinfo::get_index<T>();
for (const auto& iface : m_iface_list)
{
if (iface->type_index == type_id)
{
return std::static_pointer_cast<T>(iface);
}
}
return {};
}
// Lock for exclusive access (BasicLockable)
void lock();
// Release lock (BasicLockable). May perform internal cleanup before returning
void unlock();
// Lock for shared access (reader-lock)
void lock_shared();
// Unlock for shared access (reader-lock)
void unlock_shared();
// Enable input thread attach to the specified interface
void attach_thread_input(
u32 uid, // The input target
const std::string_view& name, // The name of the target
std::function<void()> on_input_loop_enter = nullptr, // [optional] What to do before running the input routine
std::function<void(s32)> on_input_loop_exit = nullptr, // [optional] What to do with the result if any
std::function<s32()> input_loop_override = nullptr); // [optional] What to do during the input loop. By default calls user_interface::run_input_loop
private:
struct overlay_input_thread
{
static constexpr auto thread_name = "Overlay Input Thread"sv;
};
struct input_thread_context_t
{
// Ctor
input_thread_context_t(
const std::string_view& name,
std::shared_ptr<user_interface> iface,
std::function<void()> on_input_loop_enter,
std::function<void(s32)> on_input_loop_exit,
std::function<s32()> input_loop_override)
: name(name)
, target(iface)
, input_loop_prologue(on_input_loop_enter)
, input_loop_epilogue(on_input_loop_exit)
, input_loop_override(input_loop_override)
, prologue_completed(false)
{}
// Attributes
std::string_view name;
std::shared_ptr<user_interface> target;
std::function<void()> input_loop_prologue;
std::function<void(s32)> input_loop_epilogue;
std::function<s32()> input_loop_override;
// Runtime stats
bool prologue_completed;
};
lf_queue<input_thread_context_t> m_input_token_stack;
atomic_t<bool> m_input_thread_abort = false;
atomic_t<bool> m_input_thread_interrupted = false;
shared_mutex m_input_stack_guard;
std::shared_ptr<named_thread<overlay_input_thread>> m_input_thread;
void input_thread_loop();
};
}
}
| 6,386
|
C++
|
.h
| 175
| 30.64
| 157
| 0.651982
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,037
|
overlay_controls.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_controls.h
|
#pragma once
#include "overlay_fonts.h"
#include "Emu/localized_string.h"
#include "Emu/Cell/timers.hpp"
#include <memory>
// Definitions for common UI controls and their routines
namespace rsx
{
namespace overlays
{
enum image_resource_id : u8
{
// NOTE: 1 - 252 are user defined
none = 0, // No image
raw_image = 252, // Raw image data passed via image_info struct
font_file = 253, // Font file
game_icon = 254, // Use game icon
backbuffer = 255 // Use current backbuffer contents
};
enum class primitive_type : u8
{
quad_list = 0,
triangle_strip = 1,
line_list = 2,
line_strip = 3,
triangle_fan = 4
};
struct image_info
{
int w = 0, h = 0;
int bpp = 0;
u8* data = nullptr;
image_info(image_info&) = delete;
image_info(const char* filename);
image_info(const std::vector<u8>& bytes);
~image_info();
void load_data(const std::vector<u8>& bytes);
};
struct resource_config
{
enum standard_image_resource : u8
{
fade_top = 1,
fade_bottom,
select,
start,
cross,
circle,
triangle,
square,
L1,
R1,
L2,
R2,
save,
new_entry
};
// Define resources
std::vector<std::unique_ptr<image_info>> texture_raw_data;
resource_config();
void load_files();
void free_resources();
};
struct compiled_resource
{
struct command_config
{
primitive_type primitives = primitive_type::quad_list;
color4f color = { 1.f, 1.f, 1.f, 1.f };
bool pulse_glow = false;
bool disable_vertex_snap = false;
f32 pulse_sinus_offset = 0.0f; // The current pulse offset
f32 pulse_speed_modifier = 0.005f;
areaf clip_rect = {};
bool clip_region = false;
u8 texture_ref = image_resource_id::none;
font* font_ref = nullptr;
void* external_data_ref = nullptr;
u8 blur_strength = 0;
command_config() = default;
void set_image_resource(u8 ref);
void set_font(font *ref);
// Analog to overlay_element::set_sinus_offset
f32 get_sinus_value() const;
};
struct command
{
command_config config;
std::vector<vertex> verts;
};
std::vector<command> draw_commands;
void add(const compiled_resource& other);
void add(const compiled_resource& other, f32 x_offset, f32 y_offset);
void add(const compiled_resource& other, f32 x_offset, f32 y_offset, const areaf& clip_rect);
//! Clear commands list
void clear();
//! Append the command to the back of the commands list and return it
command& append(const command& new_command);
//! Prepend the command to the front of the commands list and return it
command& prepend(const command& new_command);
};
struct overlay_element
{
enum text_align
{
left = 0,
center,
right
};
s16 x = 0;
s16 y = 0;
u16 w = 0;
u16 h = 0;
std::u32string text;
font* font_ref = nullptr;
text_align alignment = left;
bool wrap_text = false;
bool clip_text = true;
color4f back_color = { 0.f, 0.f, 0.f, 1.f };
color4f fore_color = { 1.f, 1.f, 1.f, 1.f };
bool pulse_effect_enabled = false;
f32 pulse_sinus_offset = 0.0f; // The current pulse offset
f32 pulse_speed_modifier = 0.005f;
// Analog to command_config::get_sinus_value
// Apply modifier for sinus pulse. Resets the pulse. For example:
// 0 -> reset to 0.5 rising
// 0.5 -> reset to 0
// 1 -> reset to 0.5 falling
// 1.5 -> reset to 1
void set_sinus_offset(f32 sinus_modifier);
compiled_resource compiled_resources;
bool is_compiled = false;
u16 padding_left = 0;
u16 padding_right = 0;
u16 padding_top = 0;
u16 padding_bottom = 0;
u16 margin_left = 0;
u16 margin_top = 0;
overlay_element() = default;
overlay_element(u16 _w, u16 _h) : w(_w), h(_h) {}
virtual ~overlay_element() = default;
virtual void refresh();
virtual void translate(s16 _x, s16 _y);
virtual void scale(f32 _x, f32 _y, bool origin_scaling);
virtual void set_pos(s16 _x, s16 _y);
virtual void set_size(u16 _w, u16 _h);
virtual void set_padding(u16 left, u16 right, u16 top, u16 bottom);
virtual void set_padding(u16 padding);
// NOTE: Functions as a simple position offset. Top left corner is the anchor.
virtual void set_margin(u16 left, u16 top);
virtual void set_margin(u16 margin);
virtual void set_text(const std::string& text);
virtual void set_unicode_text(const std::u32string& text);
void set_text(localized_string_id id);
virtual void set_font(const char* font_name, u16 font_size);
virtual void align_text(text_align align);
virtual void set_wrap_text(bool state);
virtual font* get_font() const;
virtual std::vector<vertex> render_text(const char32_t* string, f32 x, f32 y);
virtual compiled_resource& get_compiled();
void measure_text(u16& width, u16& height, bool ignore_word_wrap = false) const;
};
struct layout_container : public overlay_element
{
std::vector<std::unique_ptr<overlay_element>> m_items;
u16 advance_pos = 0;
u16 pack_padding = 0;
u16 scroll_offset_value = 0;
bool auto_resize = true;
virtual overlay_element* add_element(std::unique_ptr<overlay_element>&, int = -1) = 0;
layout_container();
void translate(s16 _x, s16 _y) override;
void set_pos(s16 _x, s16 _y) override;
compiled_resource& get_compiled() override;
virtual u16 get_scroll_offset_px() = 0;
void add_spacer();
};
struct vertical_layout : public layout_container
{
overlay_element* add_element(std::unique_ptr<overlay_element>& item, int offset = -1) override;
compiled_resource& get_compiled() override;
u16 get_scroll_offset_px() override;
};
struct horizontal_layout : public layout_container
{
overlay_element* add_element(std::unique_ptr<overlay_element>& item, int offset = -1) override;
compiled_resource& get_compiled() override;
u16 get_scroll_offset_px() override;
};
// Controls
struct spacer : public overlay_element
{
using overlay_element::overlay_element;
compiled_resource& get_compiled() override
{
// No draw
return compiled_resources;
}
};
struct rounded_rect : public overlay_element
{
u8 radius = 5;
u8 num_control_points = 8; // Smoothness control
using overlay_element::overlay_element;
compiled_resource& get_compiled() override;
};
struct image_view : public overlay_element
{
private:
u8 image_resource_ref = image_resource_id::none;
void* external_ref = nullptr;
// Strength of blur effect
u8 blur_strength = 0;
public:
using overlay_element::overlay_element;
compiled_resource& get_compiled() override;
void set_image_resource(u8 resource_id);
void set_raw_image(image_info* raw_image);
void clear_image();
void set_blur_strength(u8 strength);
};
struct image_button : public image_view
{
u16 text_horizontal_offset = 25;
u16 m_text_offset_x = 0;
s16 m_text_offset_y = 0;
image_button();
image_button(u16 _w, u16 _h);
void set_text_vertical_adjust(s16 offset);
void set_size(u16 /*w*/, u16 h) override;
compiled_resource& get_compiled() override;
};
struct label : public overlay_element
{
label() = default;
label(const std::string& text);
bool auto_resize(bool grow_only = false, u16 limit_w = -1, u16 limit_h = -1);
};
struct graph : public overlay_element
{
private:
std::string m_title;
std::vector<f32> m_datapoints;
u32 m_datapoint_count{};
color4f m_color;
f32 m_min{};
f32 m_max{};
f32 m_avg{};
f32 m_1p{};
f32 m_guide_interval{};
label m_label{};
bool m_show_min_max{false};
bool m_show_1p_avg{false};
bool m_1p_sort_high{false};
public:
graph();
void set_pos(s16 _x, s16 _y) override;
void set_size(u16 _w, u16 _h) override;
void set_title(const char* title);
void set_font(const char* font_name, u16 font_size) override;
void set_font_size(u16 font_size);
void set_count(u32 datapoint_count);
void set_color(color4f color);
void set_guide_interval(f32 guide_interval);
void set_labels_visible(bool show_min_max, bool show_1p_avg);
void set_one_percent_sort_high(bool sort_1p_high);
u16 get_height() const;
u32 get_datapoint_count() const;
void record_datapoint(f32 datapoint, bool update_metrics);
void update();
compiled_resource& get_compiled() override;
};
}
}
| 8,450
|
C++
|
.h
| 274
| 26.781022
| 98
| 0.676525
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,038
|
overlay_compile_notification.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_compile_notification.h
|
#pragma once
#include "util/types.hpp"
#include "util/atomic.hpp"
namespace rsx
{
namespace overlays
{
void show_shader_compile_notification();
std::shared_ptr<atomic_t<u32>> show_ppu_compile_notification();
}
}
| 221
|
C++
|
.h
| 11
| 18.272727
| 65
| 0.764423
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,039
|
overlay_list_view.hpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_list_view.hpp
|
#pragma once
#include "overlay_controls.h"
namespace rsx
{
namespace overlays
{
struct list_view : public vertical_layout
{
private:
std::unique_ptr<image_view> m_scroll_indicator_top;
std::unique_ptr<image_view> m_scroll_indicator_bottom;
std::unique_ptr<image_button> m_cancel_btn;
std::unique_ptr<image_button> m_accept_btn;
std::unique_ptr<image_button> m_deny_btn;
std::unique_ptr<overlay_element> m_highlight_box;
u16 m_elements_height = 0;
s32 m_selected_entry = -1;
u16 m_elements_count = 0;
bool m_use_separators = false;
bool m_cancel_only = false;
public:
list_view(u16 width, u16 height, bool use_separators = true, bool can_deny = false);
void update_selection();
void select_entry(s32 entry);
void select_next(u16 count = 1);
void select_previous(u16 count = 1);
void add_entry(std::unique_ptr<overlay_element>& entry);
int get_selected_index() const;
bool get_cancel_only() const;
void set_cancel_only(bool cancel_only);
void translate(s16 _x, s16 _y) override;
compiled_resource& get_compiled() override;
};
}
}
| 1,118
|
C++
|
.h
| 35
| 28.371429
| 87
| 0.712687
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
6,040
|
overlay_fonts.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_fonts.h
|
#pragma once
#include "util/types.hpp"
#include "overlay_utils.h"
#include <memory>
#include <vector>
// STB_IMAGE_IMPLEMENTATION and STB_TRUETYPE_IMPLEMENTATION defined externally
#include <stb_image.h>
#include <stb_truetype.h>
namespace rsx
{
namespace overlays
{
enum class language_class
{
default_ = 0, // Typically latin-1, extended latin, hebrew, arabic and cyrillic
cjk_base = 1, // The thousands of CJK glyphs occupying pages 2E-9F
hangul = 2 // Korean jamo
};
struct glyph_load_setup
{
std::vector<std::string> font_names;
std::vector<std::string> lookup_font_dirs;
};
// Each 'page' holds an indexed block of 256 code points
// The BMP (Basic Multilingual Plane) has 256 allocated pages but not all are necessary
// While there are supplementary planes, the BMP is the most important thing to support
struct codepage
{
static constexpr u32 bitmap_width = 1024;
static constexpr u32 bitmap_height = 1024;
static constexpr u32 char_count = 256; // 16x16 grid at max 48pt
static constexpr u32 oversample = 2;
std::vector<stbtt_packedchar> pack_info;
std::vector<u8> glyph_data;
char32_t glyph_base = 0;
f32 sampler_z = 0.f;
void initialize_glyphs(char32_t codepage_id, f32 font_size, const std::vector<u8>& ttf_data);
stbtt_aligned_quad get_char(char32_t c, f32& x_advance, f32& y_advance);
};
class font
{
private:
f32 size_pt = 12.f;
f32 size_px = 16.f; // Default font 12pt size
f32 em_size = 0.f;
std::string font_name;
std::vector<std::pair<char32_t, std::unique_ptr<codepage>>> m_glyph_map;
bool initialized = false;
struct
{
char32_t codepage_id = 0;
codepage* page = nullptr;
}
codepage_cache;
static language_class classify(char32_t codepage_id);
glyph_load_setup get_glyph_files(language_class class_) const;
codepage* initialize_codepage(char32_t codepage_id);
public:
font(const char* ttf_name, f32 size);
stbtt_aligned_quad get_char(char32_t c, f32& x_advance, f32& y_advance);
std::vector<vertex> render_text_ex(f32& x_advance, f32& y_advance, const char32_t* text, usz char_limit, u16 max_width, bool wrap);
std::vector<vertex> render_text(const char32_t* text, u16 max_width = -1, bool wrap = false);
std::pair<f32, f32> get_char_offset(const char32_t* text, usz max_length, u16 max_width = -1, bool wrap = false);
bool matches(const char* name, int size) const { return font_name == name && static_cast<int>(size_pt) == size; }
std::string_view get_name() const { return font_name; }
f32 get_size_pt() const { return size_pt; }
f32 get_size_px() const { return size_px; }
f32 get_em_size() const { return em_size; }
// Renderer info
size3u get_glyph_data_dimensions() const { return { codepage::bitmap_width, codepage::bitmap_height, ::size32(m_glyph_map) }; }
std::vector<u8> get_glyph_data() const;
};
// TODO: Singletons are cancer
class fontmgr
{
private:
std::vector<std::unique_ptr<font>> fonts;
static fontmgr* m_instance;
font* find(const char* name, int size)
{
for (auto& f : fonts)
{
if (f->matches(name, size))
return f.get();
}
fonts.push_back(std::make_unique<font>(name, static_cast<f32>(size)));
return fonts.back().get();
}
public:
fontmgr() = default;
~fontmgr()
{
if (m_instance)
{
delete m_instance;
m_instance = nullptr;
}
}
static font* get(const char* name, int size)
{
if (m_instance == nullptr)
m_instance = new fontmgr;
return m_instance->find(name, size);
}
};
}
}
| 3,768
|
C++
|
.h
| 107
| 30.093458
| 135
| 0.662233
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,041
|
overlay_media_list_dialog.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_media_list_dialog.h
|
#pragma once
#include "overlays.h"
#include "overlay_list_view.hpp"
#include "Emu/Cell/ErrorCodes.h"
#include "util/media_utils.h"
namespace rsx
{
namespace overlays
{
struct media_list_dialog : public user_interface
{
public:
enum class media_type
{
invalid, // For internal use only
directory, // For internal use only
audio,
video,
photo,
};
struct media_entry
{
media_type type = media_type::invalid;
std::string name;
std::string path;
utils::media_info info;
u32 index = 0;
media_entry* parent = nullptr;
std::vector<media_entry> children;
};
media_list_dialog();
void on_button_pressed(pad_button button_press, bool is_auto_repeat) override;
compiled_resource get_compiled() override;
s32 show(media_entry* root, media_entry& result, const std::string& title, u32 focused, bool enable_overlay);
private:
void reload(const std::string& title, u32 focused);
struct media_list_entry : horizontal_layout
{
public:
media_list_entry(const media_entry& entry);
private:
std::unique_ptr<image_info> icon_data;
};
media_entry* m_media = nullptr;
std::unique_ptr<overlay_element> m_dim_background;
std::unique_ptr<list_view> m_list;
std::unique_ptr<label> m_description;
std::unique_ptr<label> m_no_media_text;
};
error_code show_media_list_dialog(media_list_dialog::media_type type, const std::string& path, const std::string& title, std::function<void(s32 status, utils::media_info info)> on_finished);
}
}
| 1,556
|
C++
|
.h
| 52
| 26.076923
| 192
| 0.705369
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,042
|
overlay_message.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_message.h
|
#pragma once
#include "overlays.h"
#include "overlay_manager.h"
#include <deque>
namespace rsx
{
namespace overlays
{
enum class message_pin_location
{
bottom_right,
bottom_left,
top_right,
top_left
};
class message_item : public rounded_rect
{
public:
template <typename T>
message_item(const T& msg_id, u64 expiration, std::shared_ptr<atomic_t<u32>> refs, std::shared_ptr<overlay_element> icon = {});
void update(usz index, u64 timestamp_us, s16 x_offset, s16 y_offset);
void set_pos(s16 _x, s16 _y) override;
void reset_expiration();
u64 get_expiration() const;
void ensure_expired();
compiled_resource& get_compiled() override;
bool text_matches(const std::u32string& text) const;
private:
label m_text{};
std::shared_ptr<overlay_element> m_icon{};
animation_color_interpolate m_fade_in_animation;
animation_color_interpolate m_fade_out_animation;
u64 m_expiration_time = 0;
u64 m_visible_duration = 0;
std::shared_ptr<atomic_t<u32>> m_refs;
bool m_processed = false;
usz m_cur_pos = umax;
static constexpr u16 m_margin = 6;
};
class message final : public overlay
{
public:
void update(u64 timestamp_us) override;
compiled_resource get_compiled() override;
template <typename T>
void queue_message(
T msg_id,
u64 expiration,
std::shared_ptr<atomic_t<u32>> refs,
message_pin_location location = message_pin_location::top_left,
std::shared_ptr<overlay_element> icon = {},
bool allow_refresh = false)
{
std::lock_guard lock(m_mutex_queue);
auto* queue = &m_ready_queue_top_left;
switch (location)
{
case message_pin_location::bottom_right:
queue = &m_ready_queue_bottom_right;
break;
case message_pin_location::bottom_left:
queue = &m_ready_queue_bottom_left;
break;
case message_pin_location::top_right:
queue = &m_ready_queue_top_right;
break;
case message_pin_location::top_left:
queue = &m_ready_queue_top_left;
break;
}
if constexpr (std::is_same_v<T, std::initializer_list<localized_string_id>>)
{
for (auto id : msg_id)
{
if (!message_exists(location, id, allow_refresh))
{
queue->emplace_back(id, expiration, refs, icon);
}
}
}
else if (!message_exists(location, msg_id, allow_refresh))
{
queue->emplace_back(msg_id, expiration, std::move(refs), icon);
}
visible = true;
refresh();
}
private:
const u32 max_visible_items = 3;
shared_mutex m_mutex_queue;
// Top and bottom enqueued sets
std::deque<message_item> m_ready_queue_bottom_right;
std::deque<message_item> m_ready_queue_bottom_left;
std::deque<message_item> m_ready_queue_top_right;
std::deque<message_item> m_ready_queue_top_left;
// Top and bottom visible sets
std::deque<message_item> m_visible_items_bottom_right;
std::deque<message_item> m_visible_items_bottom_left;
std::deque<message_item> m_visible_items_top_right;
std::deque<message_item> m_visible_items_top_left;
void update_queue(std::deque<message_item>& vis_set, std::deque<message_item>& ready_set, message_pin_location origin);
// Stacking. Extends the lifetime of a message instead of inserting a duplicate
bool message_exists(message_pin_location location, localized_string_id id, bool allow_refresh);
bool message_exists(message_pin_location location, const std::string& msg, bool allow_refresh);
bool message_exists(message_pin_location location, const std::u32string& msg, bool allow_refresh);
};
template <typename T>
void queue_message(
T msg_id,
u64 expiration = 5'000'000,
std::shared_ptr<atomic_t<u32>> refs = {},
message_pin_location location = message_pin_location::top_left,
std::shared_ptr<overlay_element> icon = {},
bool allow_refresh = false)
{
if (auto manager = g_fxo->try_get<rsx::overlays::display_manager>())
{
auto msg_overlay = manager->get<rsx::overlays::message>();
if (!msg_overlay)
{
msg_overlay = std::make_shared<rsx::overlays::message>();
msg_overlay = manager->add(msg_overlay);
}
msg_overlay->queue_message(msg_id, expiration, std::move(refs), location, std::move(icon), allow_refresh);
}
}
void refresh_message_queue();
} // namespace overlays
} // namespace rsx
| 4,365
|
C++
|
.h
| 129
| 29.48062
| 130
| 0.69103
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,043
|
overlay_message_dialog.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_message_dialog.h
|
#pragma once
#include "overlays.h"
#include "overlay_progress_bar.hpp"
#include "Emu/Cell/Modules/cellMsgDialog.h"
namespace rsx
{
namespace overlays
{
class message_dialog : public user_interface
{
msg_dialog_source m_source = msg_dialog_source::_cellMsgDialog;
label text_display;
image_button btn_ok;
image_button btn_cancel;
overlay_element bottom_bar, background;
image_view background_poster;
std::array<progress_bar, 2> progress_bars{};
u8 num_progress_bars = 0;
s32 taskbar_index = 0;
s32 taskbar_limit = 0;
bool interactive = false;
bool ok_only = false;
bool cancel_only = false;
bool custom_background_allowed = false;
u32 background_blur_strength = 0;
u32 background_darkening_strength = 0;
std::unique_ptr<image_info> background_image;
animation_color_interpolate fade_animation;
text_guard_t text_guard{};
std::array<text_guard_t, 2> bar_text_guard{};
public:
message_dialog(bool allow_custom_background = false);
compiled_resource get_compiled() override;
void update(u64 timestamp_us) override;
void on_button_pressed(pad_button button_press, bool is_auto_repeat) override;
void close(bool use_callback, bool stop_pad_interception) override;
error_code show(bool is_blocking, const std::string& text, const MsgDialogType& type, msg_dialog_source source, std::function<void(s32 status)> on_close);
void set_text(std::string text);
void update_custom_background();
u32 progress_bar_count() const;
void progress_bar_set_taskbar_index(s32 index);
error_code progress_bar_set_message(u32 index, std::string msg);
error_code progress_bar_increment(u32 index, f32 value);
error_code progress_bar_set_value(u32 index, f32 value);
error_code progress_bar_reset(u32 index);
error_code progress_bar_set_limit(u32 index, u32 limit);
msg_dialog_source source() const { return m_source; }
};
}
}
| 1,932
|
C++
|
.h
| 50
| 34.96
| 157
| 0.741296
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,044
|
overlay_save_dialog.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_save_dialog.h
|
#pragma once
#include "overlays.h"
#include "overlay_list_view.hpp"
#include "Emu/Cell/Modules/cellSaveData.h"
namespace rsx
{
namespace overlays
{
struct save_dialog : public user_interface
{
private:
struct save_dialog_entry : horizontal_layout
{
private:
std::unique_ptr<image_info> icon_data;
public:
save_dialog_entry(const std::string& text1, const std::string& text2, const std::string& text3, u8 resource_id, const std::vector<u8>& icon_buf);
};
std::unique_ptr<overlay_element> m_dim_background;
std::unique_ptr<list_view> m_list;
std::unique_ptr<label> m_description;
std::unique_ptr<label> m_time_thingy;
std::unique_ptr<label> m_no_saves_text;
bool m_no_saves = false;
animation_color_interpolate fade_animation;
public:
save_dialog();
void update(u64 timestamp_us) override;
void on_button_pressed(pad_button button_press, bool is_auto_repeat) override;
compiled_resource get_compiled() override;
s32 show(std::vector<SaveDataEntry>& save_entries, u32 focused, u32 op, vm::ptr<CellSaveDataListSet> listSet, bool enable_overlay);
};
}
}
| 1,131
|
C++
|
.h
| 34
| 29.852941
| 149
| 0.733211
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,045
|
overlay_utils.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_utils.h
|
#pragma once
#include "util/types.hpp"
#include "Utilities/geometry.h"
#include <string>
struct vertex
{
float values[4];
vertex() = default;
vertex(float x, float y)
{
vec2(x, y);
}
vertex(float x, float y, float z)
{
vec3(x, y, z);
}
vertex(float x, float y, float z, float w)
{
vec4(x, y, z, w);
}
vertex(int x, int y, int z, int w)
{
vec4(static_cast<f32>(x), static_cast<f32>(y), static_cast<f32>(z), static_cast<f32>(w));
}
float& operator[](int index)
{
return values[index];
}
float& x()
{
return values[0];
}
float& y()
{
return values[1];
}
float& z()
{
return values[2];
}
float& w()
{
return values[3];
}
void vec2(float x, float y)
{
values[0] = x;
values[1] = y;
values[2] = 0.f;
values[3] = 1.f;
}
void vec3(float x, float y, float z)
{
values[0] = x;
values[1] = y;
values[2] = z;
values[3] = 1.f;
}
void vec4(float x, float y, float z, float w)
{
values[0] = x;
values[1] = y;
values[2] = z;
values[3] = w;
}
void operator += (const vertex& other)
{
values[0] += other.values[0];
values[1] += other.values[1];
values[2] += other.values[2];
values[3] += other.values[3];
}
void operator -= (const vertex& other)
{
values[0] -= other.values[0];
values[1] -= other.values[1];
values[2] -= other.values[2];
values[3] -= other.values[3];
}
};
template<typename T>
struct vector3_base : public position3_base<T>
{
using position3_base<T>::position3_base;
vector3_base(T x, T y, T z)
{
this->x = x;
this->y = y;
this->z = z;
}
vector3_base(const position3_base<T>& other)
{
this->x = other.x;
this->y = other.y;
this->z = other.z;
}
T dot(const vector3_base& rhs) const
{
return (this->x * rhs.x) + (this->y * rhs.y) + (this->z * rhs.z);
}
T distance(const vector3_base& rhs) const
{
const vector3_base d = *this - rhs;
return d.dot(d);
}
};
template<typename T>
vector3_base<T> operator * (const vector3_base<T>& lhs, const vector3_base<T>& rhs)
{
return { lhs.x * rhs.x, lhs.y * rhs.y, lhs.z * rhs.z };
}
template<typename T>
vector3_base<T> operator * (const vector3_base<T>& lhs, T rhs)
{
return { lhs.x * rhs, lhs.y * rhs, lhs.z * rhs };
}
template<typename T>
vector3_base<T> operator * (T lhs, const vector3_base<T>& rhs)
{
return { lhs * rhs.x, lhs * rhs.y, lhs * rhs.z };
}
template<typename T>
void operator *= (const vector3_base<T>& lhs, const vector3_base<T>& rhs)
{
lhs.x *= rhs.x;
lhs.y *= rhs.y;
lhs.z *= rhs.z;
}
template<typename T>
void operator *= (const vector3_base<T>& lhs, T rhs)
{
lhs.x *= rhs;
lhs.y *= rhs;
lhs.z *= rhs;
}
template<typename T>
void operator < (const vector3_base<T>& lhs, T rhs)
{
return lhs.x < rhs.x && lhs.y < rhs.y && lhs.z < rhs.z;
}
using vector3i = vector3_base<int>;
using vector3f = vector3_base<float>;
std::string utf8_to_ascii8(const std::string& utf8_string);
std::string utf16_to_ascii8(const std::u16string& utf16_string);
std::u16string ascii8_to_utf16(const std::string& ascii_string);
std::u32string utf8_to_u32string(const std::string& utf8_string);
std::u16string u32string_to_utf16(const std::u32string& utf32_string);
std::u32string utf16_to_u32string(const std::u16string& utf16_string);
| 3,240
|
C++
|
.h
| 148
| 19.709459
| 91
| 0.651307
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,046
|
overlay_perf_metrics.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_perf_metrics.h
|
#pragma once
#include "overlays.h"
#include "util/cpu_stats.hpp"
#include "Emu/system_config_types.h"
namespace rsx
{
namespace overlays
{
struct perf_metrics_overlay : overlay
{
private:
// The detail level does not affect frame graphs apart from their width.
// none
// minimal - fps
// low - fps, total cpu usage
// medium - fps, detailed cpu usage
// high - fps, frametime, detailed cpu usage, thread number, rsx load
detail_level m_detail{};
screen_quadrant m_quadrant{};
positioni m_position{};
label m_body{};
label m_titles{};
bool m_framerate_graph_enabled{};
bool m_frametime_graph_enabled{};
graph m_fps_graph;
graph m_frametime_graph;
utils::cpu_stats m_cpu_stats{};
Timer m_update_timer{};
Timer m_frametime_timer{};
u32 m_update_interval{}; // in ms
u32 m_frames{};
std::string m_font{};
u16 m_font_size{};
u32 m_margin_x{}; // horizontal distance to the screen border relative to the screen_quadrant in px
u32 m_margin_y{}; // vertical distance to the screen border relative to the screen_quadrant in px
u32 m_padding{}; // space between overlay elements
f32 m_opacity{}; // 0..1
bool m_center_x{}; // center the overlay horizontally
bool m_center_y{}; // center the overlay vertically
std::string m_color_body;
std::string m_background_body;
std::string m_color_title;
std::string m_background_title;
bool m_force_update{}; // Used to update the overlay metrics without changing the data
bool m_force_repaint{};
bool m_is_initialised{};
const std::string title1_medium{ "CPU Utilization:" };
const std::string title1_high{ "Host Utilization (CPU):" };
const std::string title2{ "Guest Utilization (PS3):" };
f32 m_fps{0};
f32 m_frametime{0};
u64 m_ppu_cycles{0};
u64 m_spu_cycles{0};
u64 m_rsx_cycles{0};
u64 m_total_cycles{0};
u32 m_ppus{0};
u32 m_spus{0};
f32 m_cpu_usage{-1.f};
u32 m_total_threads{0};
f32 m_ppu_usage{0};
f32 m_spu_usage{0};
f32 m_rsx_usage{0};
u32 m_rsx_load{0};
void reset_transform(label& elm) const;
void reset_transforms();
void reset_body();
void reset_titles();
public:
void init();
void set_framerate_graph_enabled(bool enabled);
void set_frametime_graph_enabled(bool enabled);
void set_framerate_datapoint_count(u32 datapoint_count);
void set_frametime_datapoint_count(u32 datapoint_count);
void set_graph_detail_levels(perf_graph_detail_level framerate_level, perf_graph_detail_level frametime_level);
void set_detail_level(detail_level level);
void set_position(screen_quadrant quadrant);
void set_update_interval(u32 update_interval);
void set_font(std::string font);
void set_font_size(u16 font_size);
void set_margins(u32 margin_x, u32 margin_y, bool center_x, bool center_y);
void set_opacity(f32 opacity);
void set_body_colors(std::string color, std::string background);
void set_title_colors(std::string color, std::string background);
void force_next_update();
void update(u64 timestamp_us) override;
compiled_resource get_compiled() override;
};
void reset_performance_overlay();
}
}
| 3,200
|
C++
|
.h
| 90
| 31.677778
| 114
| 0.704016
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,047
|
overlay_edit_text.hpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_edit_text.hpp
|
#pragma once
#include "overlay_controls.h"
namespace rsx
{
namespace overlays
{
struct edit_text : public label
{
enum class direction
{
up,
down,
left,
right
};
usz caret_position = 0;
u16 vertical_scroll_offset = 0;
bool m_reset_caret_pulse = false;
bool password_mode = false;
std::u32string value;
std::u32string placeholder;
using label::label;
void set_text(const std::string& text) override;
void set_unicode_text(const std::u32string& text) override;
void set_placeholder(const std::u32string& placeholder_text);
void move_caret(direction dir);
void insert_text(const std::u32string& str);
void erase();
void del();
compiled_resource& get_compiled() override;
};
}
}
| 764
|
C++
|
.h
| 33
| 19.454545
| 64
| 0.701803
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
6,048
|
overlay_animated_icon.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_animated_icon.h
|
#pragma once
#include "overlays.h"
namespace rsx
{
namespace overlays
{
class animated_icon : public image_view
{
public:
animated_icon(const char* icon_name);
animated_icon(const std::vector<u8>& icon_data);
void update_animation_frame(compiled_resource& result);
compiled_resource& get_compiled() override;
protected:
// Some layout and frame data
u16 m_start_x = 0; // X and Y offset of frame 0
u16 m_start_y = 0;
u16 m_frame_width = 32; // W and H of each animation frame
u16 m_frame_height = 32;
u16 m_spacing_x = 8; // Spacing between frames in X and Y
u16 m_spacing_y = 8;
u16 m_row_length = 12; // Number of animation frames in the X direction in case of a 2D grid of frames
u16 m_total_frames = 12; // Total number of available frames
u64 m_frame_duration_us = 100'000; // Hold duration for each frame
// Animation playback variables
int m_current_frame = 0;
u64 m_current_frame_duration_us = 0;
u64 m_last_update_timestamp_us = 0;
private:
std::unique_ptr<image_info> m_icon;
};
}
}
| 1,119
|
C++
|
.h
| 33
| 29.30303
| 108
| 0.672575
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,049
|
overlay_user_list_dialog.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_user_list_dialog.h
|
#pragma once
#include "overlays.h"
#include "overlay_list_view.hpp"
#include "Emu/Cell/ErrorCodes.h"
namespace rsx
{
namespace overlays
{
struct user_list_dialog : public user_interface
{
private:
struct user_list_entry : horizontal_layout
{
private:
std::unique_ptr<image_info> icon_data;
public:
user_list_entry(const std::string& username, const std::string& user_id, const std::string& avatar_path);
};
std::vector<u32> m_entry_ids;
std::unique_ptr<overlay_element> m_dim_background;
std::unique_ptr<list_view> m_list;
std::unique_ptr<label> m_description;
animation_color_interpolate fade_animation;
public:
user_list_dialog();
void update(u64 timestamp_us) override;
void on_button_pressed(pad_button button_press, bool is_auto_repeat) override;
compiled_resource get_compiled() override;
error_code show(const std::string& title, u32 focused, const std::vector<u32>& user_ids, bool enable_overlay, std::function<void(s32 status)> on_close);
};
}
}
| 1,030
|
C++
|
.h
| 32
| 28.84375
| 155
| 0.732053
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,050
|
overlay_progress_bar.hpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_progress_bar.hpp
|
#pragma once
#include "overlay_controls.h"
namespace rsx
{
namespace overlays
{
struct progress_bar : public overlay_element
{
private:
overlay_element indicator;
label text_view;
f32 m_limit = 100.f;
f32 m_value = 0.f;
public:
progress_bar();
void inc(f32 value);
void dec(f32 value);
void set_limit(f32 limit);
void set_value(f32 value);
void set_pos(s16 _x, s16 _y) override;
void set_size(u16 _w, u16 _h) override;
void translate(s16 dx, s16 dy) override;
void set_text(const std::string& str) override;
compiled_resource& get_compiled() override;
};
}
}
| 619
|
C++
|
.h
| 27
| 19.703704
| 50
| 0.69506
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
6,051
|
overlay_friends_list_dialog.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/FriendsList/overlay_friends_list_dialog.h
|
#pragma once
#include "../overlays.h"
#include "../overlay_list_view.hpp"
#include "../HomeMenu/overlay_home_menu_message_box.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/NP/rpcn_client.h"
namespace rsx
{
namespace overlays
{
enum class friends_list_dialog_page
{
friends,
invites,
blocked
};
struct friends_list_dialog : public user_interface
{
private:
struct friends_list_entry : horizontal_layout
{
private:
std::unique_ptr<image_info> icon_data;
public:
friends_list_entry(friends_list_dialog_page page, const std::string& username, const rpcn::friend_online_data& data);
};
std::mutex m_list_mutex;
std::vector<u32> m_entry_ids;
std::unique_ptr<overlay_element> m_dim_background;
std::unique_ptr<list_view> m_list;
std::unique_ptr<label> m_description;
image_button m_page_btn;
image_button m_extra_btn;
std::shared_ptr<home_menu_message_box> m_message_box;
animation_color_interpolate fade_animation;
std::shared_ptr<rpcn::rpcn_client> m_rpcn;
rpcn::friend_data m_friend_data;
atomic_t<bool> m_list_dirty { true };
atomic_t<friends_list_dialog_page> m_current_page { friends_list_dialog_page::friends };
atomic_t<friends_list_dialog_page> m_last_page { friends_list_dialog_page::friends };
void reload();
public:
friends_list_dialog();
void update(u64 timestamp_us) override;
void on_button_pressed(pad_button button_press, bool is_auto_repeat) override;
compiled_resource get_compiled() override;
error_code show(bool enable_overlay, std::function<void(s32 status)> on_close);
void callback_handler(rpcn::NotificationType ntype, const std::string& username, bool status);
};
}
}
| 1,719
|
C++
|
.h
| 51
| 30.215686
| 121
| 0.728701
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,052
|
shader_loading_dialog_native.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/Shaders/shader_loading_dialog_native.h
|
#pragma once
#include "shader_loading_dialog.h"
class GSRender;
namespace rsx
{
namespace overlays
{
class message_dialog;
}
class thread;
struct shader_loading_dialog_native : rsx::shader_loading_dialog
{
rsx::thread* owner = nullptr;
std::shared_ptr<rsx::overlays::message_dialog> dlg{};
shader_loading_dialog_native(GSRender* ptr);
void create(const std::string& msg, const std::string&/* title*/) override;
void update_msg(u32 index, std::string msg) override;
void inc_value(u32 index, u32 value) override;
void set_value(u32 index, u32 value) override;
void set_limit(u32 index, u32 limit) override;
void refresh() override;
void close() override;
};
}
| 695
|
C++
|
.h
| 24
| 26.458333
| 77
| 0.746988
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,053
|
shader_loading_dialog.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/Shaders/shader_loading_dialog.h
|
#pragma once
class MsgDialogBase;
namespace rsx
{
struct shader_loading_dialog
{
std::shared_ptr<MsgDialogBase> dlg{};
atomic_t<int> ref_cnt{0};
virtual ~shader_loading_dialog() = default;
virtual void create(const std::string& msg, const std::string& title);
virtual void update_msg(u32 index, std::string msg);
virtual void inc_value(u32 index, u32 value);
virtual void set_value(u32 index, u32 value);
virtual void set_limit(u32 index, u32 limit);
virtual void refresh();
virtual void close();
};
}
| 528
|
C++
|
.h
| 18
| 26.888889
| 72
| 0.739645
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,054
|
overlay_home_menu_page.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/HomeMenu/overlay_home_menu_page.h
|
#pragma once
#include "Emu/RSX/Overlays/overlays.h"
#include "Emu/RSX/Overlays/overlay_list_view.hpp"
#include "Emu/RSX/Overlays/HomeMenu/overlay_home_menu_components.h"
#include "Emu/RSX/Overlays/HomeMenu/overlay_home_menu_message_box.h"
namespace rsx
{
namespace overlays
{
struct home_menu_page : public list_view
{
public:
home_menu_page(s16 x, s16 y, u16 width, u16 height, bool use_separators, home_menu_page* parent, const std::string& text);
void set_current_page(home_menu_page* page);
home_menu_page* get_current_page(bool include_this);
page_navigation handle_button_press(pad_button button_press, bool is_auto_repeat, u64 auto_repeat_interval_ms);
void translate(s16 _x, s16 _y) override;
compiled_resource& get_compiled() override;
bool is_current_page = false;
home_menu_page* parent = nullptr;
std::string title;
std::shared_ptr<home_menu_message_box> m_message_box;
std::shared_ptr<bool> m_config_changed;
protected:
void add_page(std::shared_ptr<home_menu_page> page);
void add_item(std::unique_ptr<overlay_element>& element, std::function<page_navigation(pad_button)> callback);
void apply_layout(bool center_vertically = true);
void show_dialog(const std::string& text, std::function<void()> on_accept = nullptr, std::function<void()> on_cancel = nullptr);
std::vector<std::shared_ptr<home_menu_page>> m_pages;
private:
image_button m_save_btn;
image_button m_discard_btn;
std::vector<std::unique_ptr<overlay_element>> m_entries;
std::vector<std::function<page_navigation(pad_button)>> m_callbacks;
};
}
}
| 1,609
|
C++
|
.h
| 37
| 40.216216
| 131
| 0.738964
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,055
|
overlay_home_menu_main_menu.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/HomeMenu/overlay_home_menu_main_menu.h
|
#pragma once
#include "overlay_home_menu_page.h"
#include "overlay_home_menu_settings.h"
namespace rsx
{
namespace overlays
{
struct home_menu_main_menu : public home_menu_page
{
home_menu_main_menu(s16 x, s16 y, u16 width, u16 height, bool use_separators, home_menu_page* parent);
};
}
}
| 303
|
C++
|
.h
| 13
| 21.230769
| 105
| 0.739583
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,056
|
overlay_home_menu_settings.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/HomeMenu/overlay_home_menu_settings.h
|
#pragma once
#include "overlay_home_menu_page.h"
#include "Emu/System.h"
#include "Utilities/Config.h"
namespace rsx
{
namespace overlays
{
struct home_menu_settings : public home_menu_page
{
public:
home_menu_settings(s16 x, s16 y, u16 width, u16 height, bool use_separators, home_menu_page* parent);
private:
std::vector<std::shared_ptr<home_menu_page>> m_settings_pages;
};
struct home_menu_settings_page : public home_menu_page
{
using home_menu_page::home_menu_page;
void add_checkbox(cfg::_bool* setting, localized_string_id loc_id)
{
ensure(setting && setting->get_is_dynamic());
const std::string localized_text = get_localized_string(loc_id);
std::unique_ptr<overlay_element> elem = std::make_unique<home_menu_checkbox>(setting, localized_text);
add_item(elem, [this, setting](pad_button btn) -> page_navigation
{
if (btn != pad_button::cross) return page_navigation::stay;
if (setting)
{
const bool value = !setting->get();
rsx_log.notice("User toggled checkbox in '%s'. Setting '%s' to %d", title, setting->get_name(), value);
setting->set(value);
Emu.GetCallbacks().update_emu_settings();
if (m_config_changed) *m_config_changed = true;
refresh();
}
return page_navigation::stay;
});
}
template <typename T>
void add_dropdown(cfg::_enum<T>* setting, localized_string_id loc_id)
{
ensure(setting && setting->get_is_dynamic());
const std::string localized_text = get_localized_string(loc_id);
std::unique_ptr<overlay_element> elem = std::make_unique<home_menu_dropdown<T>>(setting, localized_text);
add_item(elem, [this, setting](pad_button btn) -> page_navigation
{
if (btn != pad_button::cross) return page_navigation::stay;
if (setting)
{
usz new_index = 0;
const T value = setting->get();
const std::string val = fmt::format("%s", value);
const std::vector<std::string> list = setting->to_list();
for (usz i = 0; i < list.size(); i++)
{
const std::string& entry = list[i];
if (entry == val)
{
new_index = (i + 1) % list.size();
break;
}
}
if (const std::string& next_value = ::at32(list, new_index); setting->from_string(next_value))
{
rsx_log.notice("User toggled dropdown in '%s'. Setting '%s' to %s", title, setting->get_name(), next_value);
}
else
{
rsx_log.error("Can't toggle dropdown in '%s'. Setting '%s' to '%s' failed", title, setting->get_name(), next_value);
}
Emu.GetCallbacks().update_emu_settings();
if (m_config_changed) *m_config_changed = true;
refresh();
}
return page_navigation::stay;
});
}
template <s64 Min, s64 Max>
void add_signed_slider(cfg::_int<Min, Max>* setting, localized_string_id loc_id, const std::string& suffix, s64 step_size, std::map<s64, std::string> special_labels = {}, s64 minimum = Min, s64 maximum = Max)
{
ensure(setting && setting->get_is_dynamic());
const std::string localized_text = get_localized_string(loc_id);
std::unique_ptr<overlay_element> elem = std::make_unique<home_menu_signed_slider<Min, Max>>(setting, localized_text, suffix, special_labels, minimum, maximum);
add_item(elem, [this, setting, step_size, minimum, maximum](pad_button btn) -> page_navigation
{
if (setting)
{
s64 value = setting->get();
switch (btn)
{
case pad_button::dpad_left:
case pad_button::ls_left:
value = std::max(value - step_size, minimum);
break;
case pad_button::dpad_right:
case pad_button::ls_right:
value = std::min(value + step_size, maximum);
break;
default:
return page_navigation::stay;
}
if (value != setting->get())
{
rsx_log.notice("User toggled signed slider in '%s'. Setting '%s' to %d", title, setting->get_name(), value);
setting->set(value);
Emu.GetCallbacks().update_emu_settings();
if (m_config_changed) *m_config_changed = true;
refresh();
}
}
return page_navigation::stay;
});
}
template <u64 Min, u64 Max>
void add_unsigned_slider(cfg::uint<Min, Max>* setting, localized_string_id loc_id, const std::string& suffix, u64 step_size, std::map<u64, std::string> special_labels = {}, const std::set<u64>& exceptions = {}, u64 minimum = Min, u64 maximum = Max)
{
ensure(setting && setting->get_is_dynamic());
ensure(!exceptions.contains(minimum) && !exceptions.contains(maximum));
const std::string localized_text = get_localized_string(loc_id);
std::unique_ptr<overlay_element> elem = std::make_unique<home_menu_unsigned_slider<Min, Max>>(setting, localized_text, suffix, special_labels, minimum, maximum);
add_item(elem, [this, setting, step_size, minimum, maximum, exceptions](pad_button btn) -> page_navigation
{
if (setting)
{
u64 value = setting->get();
switch (btn)
{
case pad_button::dpad_left:
case pad_button::ls_left:
do
{
value = step_size > value ? minimum : std::max(value - step_size, minimum);
}
while (exceptions.contains(value));
break;
case pad_button::dpad_right:
case pad_button::ls_right:
do
{
value = std::min(value + step_size, maximum);
}
while (exceptions.contains(value));
break;
default:
return page_navigation::stay;
}
if (value != setting->get())
{
rsx_log.notice("User toggled unsigned slider in '%s'. Setting '%s' to %d", title, setting->get_name(), value);
setting->set(value);
Emu.GetCallbacks().update_emu_settings();
if (m_config_changed) *m_config_changed = true;
refresh();
}
}
return page_navigation::stay;
});
}
template <s32 Min, s32 Max>
void add_float_slider(cfg::_float<Min, Max>* setting, localized_string_id loc_id, const std::string& suffix, f32 step_size, std::map<f64, std::string> special_labels = {}, s32 minimum = Min, s32 maximum = Max)
{
ensure(setting && setting->get_is_dynamic());
const std::string localized_text = get_localized_string(loc_id);
std::unique_ptr<overlay_element> elem = std::make_unique<home_menu_float_slider<Min, Max>>(setting, localized_text, suffix, special_labels, minimum, maximum);
add_item(elem, [this, setting, step_size, minimum, maximum](pad_button btn) -> page_navigation
{
if (setting)
{
f64 value = setting->get();
switch (btn)
{
case pad_button::dpad_left:
case pad_button::ls_left:
value = std::max(value - step_size, static_cast<f64>(minimum));
break;
case pad_button::dpad_right:
case pad_button::ls_right:
value = std::min(value + step_size, static_cast<f64>(maximum));
break;
default:
return page_navigation::stay;
}
if (value != setting->get())
{
rsx_log.notice("User toggled float slider in '%s'. Setting '%s' to %.2f", title, setting->get_name(), value);
setting->set(value);
Emu.GetCallbacks().update_emu_settings();
if (m_config_changed) *m_config_changed = true;
refresh();
}
}
return page_navigation::stay;
});
}
};
struct home_menu_settings_audio : public home_menu_settings_page
{
home_menu_settings_audio(s16 x, s16 y, u16 width, u16 height, bool use_separators, home_menu_page* parent);
};
struct home_menu_settings_video : public home_menu_settings_page
{
home_menu_settings_video(s16 x, s16 y, u16 width, u16 height, bool use_separators, home_menu_page* parent);
};
struct home_menu_settings_advanced : public home_menu_settings_page
{
home_menu_settings_advanced(s16 x, s16 y, u16 width, u16 height, bool use_separators, home_menu_page* parent);
};
struct home_menu_settings_input : public home_menu_settings_page
{
home_menu_settings_input(s16 x, s16 y, u16 width, u16 height, bool use_separators, home_menu_page* parent);
};
struct home_menu_settings_overlays : public home_menu_settings_page
{
home_menu_settings_overlays(s16 x, s16 y, u16 width, u16 height, bool use_separators, home_menu_page* parent);
};
struct home_menu_settings_performance_overlay : public home_menu_settings_page
{
home_menu_settings_performance_overlay(s16 x, s16 y, u16 width, u16 height, bool use_separators, home_menu_page* parent);
};
struct home_menu_settings_debug : public home_menu_settings_page
{
home_menu_settings_debug(s16 x, s16 y, u16 width, u16 height, bool use_separators, home_menu_page* parent);
};
}
}
| 8,765
|
C++
|
.h
| 225
| 33.097778
| 251
| 0.64389
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,057
|
overlay_home_menu_components.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/HomeMenu/overlay_home_menu_components.h
|
#pragma once
#include "Emu/RSX/Overlays/overlays.h"
#include "Emu/System.h"
#include "Utilities/Config.h"
namespace rsx
{
namespace overlays
{
static constexpr u16 menu_entry_height = 40;
static constexpr u16 menu_entry_margin = 20;
static constexpr u16 available_side_width = (overlay::virtual_width - 6 * menu_entry_margin) / 2;
static constexpr u16 element_height = 25;
enum class page_navigation
{
stay,
back,
next,
exit
};
struct home_menu_entry : horizontal_layout
{
public:
home_menu_entry(const std::string& text);
};
template <typename T, typename C>
struct home_menu_setting : horizontal_layout
{
public:
home_menu_setting(C* setting, const std::string& text) : m_setting(setting)
{
std::unique_ptr<overlay_element> layout = std::make_unique<vertical_layout>();
std::unique_ptr<overlay_element> padding = std::make_unique<spacer>();
std::unique_ptr<overlay_element> title = std::make_unique<label>(text);
padding->set_size(1, 1);
title->set_size(available_side_width, menu_entry_height);
title->set_font("Arial", 16);
title->set_wrap_text(true);
title->align_text(text_align::right);
// Make back color transparent for text
title->back_color.a = 0.f;
static_cast<vertical_layout*>(layout.get())->pack_padding = 5;
static_cast<vertical_layout*>(layout.get())->add_element(padding);
static_cast<vertical_layout*>(layout.get())->add_element(title);
// Pack
this->pack_padding = 15;
add_element(layout);
update_value(true);
}
void update_value(bool initializing = false)
{
if (m_setting)
{
if (const T new_value = m_setting->get(); new_value != m_last_value || initializing)
{
m_last_value = new_value;
is_compiled = false;
}
}
}
protected:
T m_last_value = {};
C* m_setting = nullptr;
};
struct home_menu_checkbox : public home_menu_setting<bool, cfg::_bool>
{
public:
home_menu_checkbox(cfg::_bool* setting, const std::string& text);
compiled_resource& get_compiled() override;
private:
overlay_element m_background;
overlay_element m_checkbox;
};
template <typename T>
struct home_menu_dropdown : public home_menu_setting<T, cfg::_enum<T>>
{
public:
home_menu_dropdown(cfg::_enum<T>* setting, const std::string& text) : home_menu_setting<T, cfg::_enum<T>>(setting, text)
{
m_dropdown.set_size(available_side_width / 2, element_height);
m_dropdown.set_pos(overlay::virtual_width / 2 + menu_entry_margin, 0);
m_dropdown.set_font("Arial", 14);
m_dropdown.align_text(home_menu_dropdown<T>::text_align::center);
m_dropdown.back_color = { 0.3f, 0.3f, 0.3f, 1.0f };
}
compiled_resource& get_compiled() override
{
this->update_value();
if (!this->is_compiled)
{
const std::string value_text = Emu.GetCallbacks().get_localized_setting(home_menu_setting<T, cfg::_enum<T>>::m_setting, static_cast<u32>(this->m_last_value));
m_dropdown.set_text(value_text);
m_dropdown.set_pos(m_dropdown.x, this->y + (this->h - m_dropdown.h) / 2);
this->compiled_resources = horizontal_layout::get_compiled();
this->compiled_resources.add(m_dropdown.get_compiled());
}
return this->compiled_resources;
}
private:
label m_dropdown;
};
template <typename T, typename C>
struct home_menu_slider : public home_menu_setting<T, C>
{
public:
home_menu_slider(C* setting, const std::string& text, const std::string& suffix, std::map<T, std::string> special_labels = {}, T minimum = C::min, T maximum = C::max)
: home_menu_setting<T, C>(setting, text)
, m_suffix(suffix)
, m_special_labels(std::move(special_labels))
, m_minimum(minimum)
, m_maximum(maximum)
{
m_slider.set_size(available_side_width / 2, element_height);
m_slider.set_pos(overlay::virtual_width / 2 + menu_entry_margin, 0);
m_slider.back_color = { 0.3f, 0.3f, 0.3f, 1.0f };
m_handle.set_size(element_height / 2, element_height);
m_handle.set_pos(m_slider.x, 0);
m_handle.back_color = { 1.0f, 1.0f, 1.0f, 1.0f };
m_value_label.back_color = m_slider.back_color;
m_value_label.set_font("Arial", 14);
}
compiled_resource& get_compiled() override
{
this->update_value();
if (!this->is_compiled)
{
const f64 percentage = std::clamp((this->m_last_value - static_cast<T>(m_minimum)) / std::fabs(m_maximum - m_minimum), 0.0, 1.0);
m_slider.set_pos(m_slider.x, this->y + (this->h - m_slider.h) / 2);
m_handle.set_pos(m_slider.x + static_cast<s16>(percentage * (m_slider.w - m_handle.w)), this->y + (this->h - m_handle.h) / 2);
const auto set_label_text = [this]() -> void
{
if (const auto it = m_special_labels.find(this->m_last_value); it != m_special_labels.cend())
{
m_value_label.set_text(it->second);
return;
}
if constexpr (std::is_floating_point_v<T>)
{
m_value_label.set_text(fmt::format("%.2f%s", this->m_last_value, m_suffix));
}
else
{
m_value_label.set_text(fmt::format("%d%s", this->m_last_value, m_suffix));
}
};
set_label_text();
m_value_label.auto_resize();
constexpr u16 handle_margin = 10;
if ((m_handle.x - m_slider.x) > (m_slider.w - (m_handle.w + 2 * handle_margin + m_value_label.w)))
{
m_value_label.set_pos(m_handle.x - (handle_margin + m_value_label.w), m_handle.y);
}
else
{
m_value_label.set_pos(m_handle.x + m_handle.w + handle_margin, m_handle.y);
}
this->compiled_resources = horizontal_layout::get_compiled();
this->compiled_resources.add(m_slider.get_compiled());
this->compiled_resources.add(m_handle.get_compiled());
this->compiled_resources.add(m_value_label.get_compiled());
}
return this->compiled_resources;
}
private:
overlay_element m_slider;
overlay_element m_handle;
label m_value_label;
std::string m_suffix;
std::map<T, std::string> m_special_labels;
T m_minimum{};
T m_maximum{};
};
template <s64 Min, s64 Max>
struct home_menu_signed_slider : public home_menu_slider<s64, cfg::_int<Min, Max>>
{
using home_menu_slider<s64, cfg::_int<Min, Max>>::home_menu_slider;
};
template <u64 Min, u64 Max>
struct home_menu_unsigned_slider : public home_menu_slider<u64, cfg::uint<Min, Max>>
{
using home_menu_slider<u64, cfg::uint<Min, Max>>::home_menu_slider;
};
template <s32 Min, s32 Max>
struct home_menu_float_slider : public home_menu_slider<f64, cfg::_float<Min, Max>>
{
using home_menu_slider<f64, cfg::_float<Min, Max>>::home_menu_slider;
};
}
}
| 6,704
|
C++
|
.h
| 188
| 31.026596
| 169
| 0.65422
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,058
|
overlay_home_menu_message_box.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/HomeMenu/overlay_home_menu_message_box.h
|
#pragma once
#include "overlay_home_menu_components.h"
namespace rsx
{
namespace overlays
{
struct home_menu_message_box : public overlay_element
{
public:
home_menu_message_box(s16 x, s16 y, u16 width, u16 height);
compiled_resource& get_compiled() override;
void show(const std::string& text, std::function<void()> on_accept = nullptr, std::function<void()> on_cancel = nullptr);
void hide();
page_navigation handle_button_press(pad_button button_press);
bool visible() const { return m_visible; }
private:
bool m_visible = false;
label m_label{};
image_button m_accept_btn;
image_button m_cancel_btn;
std::function<void()> m_on_accept;
std::function<void()> m_on_cancel;
};
}
}
| 733
|
C++
|
.h
| 25
| 26.24
| 124
| 0.70922
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,059
|
overlay_home_menu.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/HomeMenu/overlay_home_menu.h
|
#pragma once
#include "Emu/RSX/Overlays/overlays.h"
#include "Emu/Cell/ErrorCodes.h"
#include "overlay_home_menu_main_menu.h"
namespace rsx
{
namespace overlays
{
struct home_menu_dialog : public user_interface
{
public:
home_menu_dialog();
void update(u64 timestamp_us) override;
void on_button_pressed(pad_button button_press, bool is_auto_repeat) override;
compiled_resource get_compiled() override;
error_code show(std::function<void(s32 status)> on_close);
private:
home_menu_main_menu m_main_menu;
overlay_element m_dim_background{};
label m_description{};
label m_time_display{};
animation_color_interpolate fade_animation{};
};
}
}
| 690
|
C++
|
.h
| 25
| 24.6
| 81
| 0.744681
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,060
|
overlay_recvmessage_dialog.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/Network/overlay_recvmessage_dialog.h
|
#pragma once
#include "../overlays.h"
#include "../overlay_list_view.hpp"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/Modules/sceNp.h"
namespace rsx
{
namespace overlays
{
struct recvmessage_dialog : public user_interface, public RecvMessageDialogBase
{
private:
struct list_entry : horizontal_layout
{
public:
list_entry(const std::string& name, const std::string& subj, const std::string& body);
};
shared_mutex m_mutex;
std::vector<std::unique_ptr<overlay_element>> m_entries;
std::vector<u64> m_entry_ids;
std::unique_ptr<overlay_element> m_dim_background;
std::unique_ptr<list_view> m_list;
std::unique_ptr<label> m_description;
animation_color_interpolate fade_animation;
public:
recvmessage_dialog();
void update(u64 timestamp_us) override;
void on_button_pressed(pad_button button_press, bool is_auto_repeat) override;
compiled_resource get_compiled() override;
error_code Exec(SceNpBasicMessageMainType type, SceNpBasicMessageRecvOptions options, SceNpBasicMessageRecvAction& recv_result, u64& chosen_msg_id) override;
void callback_handler(const std::shared_ptr<std::pair<std::string, message_data>> new_msg, u64 msg_id) override;
};
}
}
| 1,232
|
C++
|
.h
| 34
| 33
| 160
| 0.752101
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,061
|
overlay_sendmessage_dialog.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/Network/overlay_sendmessage_dialog.h
|
#pragma once
#include "../overlays.h"
#include "../overlay_list_view.hpp"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/Modules/sceNp.h"
namespace rsx
{
namespace overlays
{
struct sendmessage_dialog : public user_interface, public SendMessageDialogBase
{
private:
struct list_entry : horizontal_layout
{
public:
list_entry(const std::string& msg);
};
shared_mutex m_mutex;
std::vector<std::string> m_entry_names;
std::unique_ptr<overlay_element> m_dim_background;
std::unique_ptr<list_view> m_list;
std::unique_ptr<label> m_description;
atomic_t<bool> m_open_confirmation_dialog = false;
atomic_t<bool> m_confirmation_dialog_open = false;
animation_color_interpolate fade_animation;
std::string get_current_selection() const;
void reload(const std::string& previous_selection);
public:
sendmessage_dialog();
void update(u64 timestamp_us) override;
void on_button_pressed(pad_button button_press, bool is_auto_repeat) override;
compiled_resource get_compiled() override;
error_code Exec(message_data& msg_data, std::set<std::string>& npids) override;
void callback_handler(u16 ntype, const std::string& username, bool status) override;
};
}
}
| 1,235
|
C++
|
.h
| 37
| 30.054054
| 87
| 0.740118
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,062
|
VKRenderTargets.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKRenderTargets.h
|
#pragma once
#include "util/types.hpp"
#include "../Common/surface_store.h"
#include "VKFormats.h"
#include "VKHelpers.h"
#include "vkutils/barriers.h"
#include "vkutils/buffer_object.h"
#include "vkutils/data_heap.h"
#include "vkutils/device.h"
#include "vkutils/image.h"
#include "vkutils/scratch.h"
#include <span>
namespace vk
{
namespace surface_cache_utils
{
void dispose(vk::buffer* buf);
}
void resolve_image(vk::command_buffer& cmd, vk::viewable_image* dst, vk::viewable_image* src);
void unresolve_image(vk::command_buffer& cmd, vk::viewable_image* dst, vk::viewable_image* src);
class image_reference_sync_barrier
{
u32 m_texture_barrier_count = 0;
u32 m_draw_barrier_count = 0;
bool m_allow_skip_barrier = true;
public:
void on_insert_texture_barrier()
{
m_texture_barrier_count++;
m_allow_skip_barrier = false;
}
void on_insert_draw_barrier()
{
// Account for corner case where the same texture can be bound to more than 1 slot
m_draw_barrier_count = std::max(m_draw_barrier_count + 1, m_texture_barrier_count);
}
void allow_skip()
{
m_allow_skip_barrier = true;
}
void reset()
{
m_texture_barrier_count = m_draw_barrier_count = 0ull;
m_allow_skip_barrier = false;
}
bool can_skip() const
{
return m_allow_skip_barrier;
}
bool is_enabled() const
{
return !!m_texture_barrier_count;
}
bool requires_post_loop_barrier() const
{
return is_enabled() && m_texture_barrier_count < m_draw_barrier_count;
}
};
class render_target : public viewable_image, public rsx::render_target_descriptor<vk::viewable_image*>
{
// Cyclic reference hazard tracking
image_reference_sync_barrier m_cyclic_ref_tracker;
// Memory spilling support
std::unique_ptr<vk::buffer> m_spilled_mem;
// MSAA support:
// Get the linear resolve target bound to this surface. Initialize if none exists
vk::viewable_image* get_resolve_target_safe(vk::command_buffer& cmd);
// Resolve the planar MSAA data into a linear block
void resolve(vk::command_buffer& cmd);
// Unresolve the linear data into planar MSAA data
void unresolve(vk::command_buffer& cmd);
// Memory management:
// Default-initialize memory without loading
void clear_memory(vk::command_buffer& cmd, vk::image* surface);
// Load memory from cell and use to initialize the surface
void load_memory(vk::command_buffer& cmd);
// Generic - chooses whether to clear or load.
void initialize_memory(vk::command_buffer& cmd, rsx::surface_access access);
// Spill helpers
// Re-initialize using spilled memory
void unspill(vk::command_buffer& cmd);
// Build spill transfer descriptors
std::vector<VkBufferImageCopy> build_spill_transfer_descriptors(vk::image* target);
public:
u64 frame_tag = 0; // frame id when invalidated, 0 if not invalid
u64 last_rw_access_tag = 0; // timestamp when this object was last used
u64 spill_request_tag = 0; // timestamp when spilling was requested
bool is_bound = false; // set when the surface is bound for rendering
using viewable_image::viewable_image;
vk::viewable_image* get_surface(rsx::surface_access access_type) override;
bool is_depth_surface() const override;
bool matches_dimensions(u16 _width, u16 _height) const;
void reset_surface_counters();
image_view* get_view(const rsx::texture_channel_remap_t& remap,
VkImageAspectFlags mask = VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT) override;
// Memory management
bool spill(vk::command_buffer& cmd, std::vector<std::unique_ptr<vk::viewable_image>>& resolve_cache);
// Synchronization
void texture_barrier(vk::command_buffer& cmd);
void post_texture_barrier(vk::command_buffer& cmd);
void memory_barrier(vk::command_buffer& cmd, rsx::surface_access access);
void read_barrier(vk::command_buffer& cmd) { memory_barrier(cmd, rsx::surface_access::shader_read); }
void write_barrier(vk::command_buffer& cmd) { memory_barrier(cmd, rsx::surface_access::shader_write); }
};
static inline vk::render_target* as_rtt(vk::image* t)
{
return ensure(dynamic_cast<vk::render_target*>(t));
}
static inline const vk::render_target* as_rtt(const vk::image* t)
{
return ensure(dynamic_cast<const vk::render_target*>(t));
}
struct surface_cache_traits
{
using surface_storage_type = std::unique_ptr<vk::render_target>;
using surface_type = vk::render_target*;
using buffer_object_storage_type = std::unique_ptr<vk::buffer>;
using buffer_object_type = vk::buffer*;
using command_list_type = vk::command_buffer&;
using download_buffer_object = void*;
using barrier_descriptor_t = rsx::deferred_clipped_region<vk::render_target*>;
static std::pair<VkImageUsageFlags, VkImageCreateFlags> get_attachment_create_flags(VkFormat format, [[maybe_unused]] u8 samples)
{
if (g_cfg.video.strict_rendering_mode)
{
return {};
}
// If we have driver support for FBO loops, set the usage flag for it.
if (vk::get_current_renderer()->get_framebuffer_loops_support())
{
return { VK_IMAGE_USAGE_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT, 0 };
}
// Workarounds to force transition to GENERAL to decompress.
// Fixes corruption in FBO loops for ANV and RADV.
switch (vk::get_driver_vendor())
{
case driver_vendor::ANV:
if (const auto format_features = vk::get_current_renderer()->get_format_properties(format);
format_features.optimalTilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)
{
// Only set if supported by hw
return { VK_IMAGE_USAGE_STORAGE_BIT, 0 };
}
break;
case driver_vendor::AMD:
case driver_vendor::RADV:
if (vk::get_chip_family() >= chip_class::AMD_navi1x)
{
// Only needed for GFX10+
return { 0, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT };
}
break;
default:
rsx_log.error("Unknown driver vendor!");
[[ fallthrough ]];
case driver_vendor::NVIDIA:
case driver_vendor::INTEL:
case driver_vendor::MVK:
case driver_vendor::DOZEN:
case driver_vendor::LAVAPIPE:
case driver_vendor::V3DV:
break;
}
return {};
}
static std::unique_ptr<vk::render_target> create_new_surface(
u32 address,
rsx::surface_color_format format,
usz width, usz height, usz pitch,
rsx::surface_antialiasing antialias,
vk::render_device& device, vk::command_buffer& cmd)
{
const auto fmt = vk::get_compatible_surface_format(format);
VkFormat requested_format = fmt.first;
u8 samples;
rsx::surface_sample_layout sample_layout;
if (g_cfg.video.antialiasing_level == msaa_level::_auto)
{
samples = get_format_sample_count(antialias);
sample_layout = rsx::surface_sample_layout::ps3;
}
else
{
samples = 1;
sample_layout = rsx::surface_sample_layout::null;
}
auto [usage_flags, create_flags] = get_attachment_create_flags(requested_format, samples);
usage_flags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
if (samples == 1) [[likely]]
{
usage_flags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
}
else
{
usage_flags |= VK_IMAGE_USAGE_STORAGE_BIT;
}
std::unique_ptr<vk::render_target> rtt;
const auto [width_, height_] = rsx::apply_resolution_scale<true>(static_cast<u16>(width), static_cast<u16>(height));
rtt = std::make_unique<vk::render_target>(device, device.get_memory_mapping().device_local,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
VK_IMAGE_TYPE_2D,
requested_format,
static_cast<u32>(width_), static_cast<u32>(height_), 1, 1, 1,
static_cast<VkSampleCountFlagBits>(samples),
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_TILING_OPTIMAL,
usage_flags,
create_flags,
VMM_ALLOCATION_POOL_SURFACE_CACHE,
RSX_FORMAT_CLASS_COLOR);
rtt->set_debug_name(fmt::format("RTV @0x%x, fmt=0x%x", address, static_cast<int>(format)));
rtt->change_layout(cmd, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
rtt->set_format(format);
rtt->set_aa_mode(antialias);
rtt->sample_layout = sample_layout;
rtt->memory_usage_flags = rsx::surface_usage_flags::attachment;
rtt->state_flags = rsx::surface_state_flags::erase_bkgnd;
rtt->native_component_map = fmt.second;
rtt->rsx_pitch = static_cast<u32>(pitch);
rtt->native_pitch = static_cast<u32>(width) * get_format_block_size_in_bytes(format) * rtt->samples_x;
rtt->surface_width = static_cast<u16>(width);
rtt->surface_height = static_cast<u16>(height);
rtt->queue_tag(address);
rtt->add_ref();
return rtt;
}
static std::unique_ptr<vk::render_target> create_new_surface(
u32 address,
rsx::surface_depth_format2 format,
usz width, usz height, usz pitch,
rsx::surface_antialiasing antialias,
vk::render_device& device, vk::command_buffer& cmd)
{
const VkFormat requested_format = vk::get_compatible_depth_surface_format(device.get_formats_support(), format);
u8 samples;
rsx::surface_sample_layout sample_layout;
if (g_cfg.video.antialiasing_level == msaa_level::_auto)
{
samples = get_format_sample_count(antialias);
sample_layout = rsx::surface_sample_layout::ps3;
}
else
{
samples = 1;
sample_layout = rsx::surface_sample_layout::null;
}
auto [usage_flags, create_flags] = get_attachment_create_flags(requested_format, samples);
usage_flags |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
if (samples == 1) [[likely]]
{
usage_flags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
}
std::unique_ptr<vk::render_target> ds;
const auto [width_, height_] = rsx::apply_resolution_scale<true>(static_cast<u16>(width), static_cast<u16>(height));
ds = std::make_unique<vk::render_target>(device, device.get_memory_mapping().device_local,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
VK_IMAGE_TYPE_2D,
requested_format,
static_cast<u32>(width_), static_cast<u32>(height_), 1, 1, 1,
static_cast<VkSampleCountFlagBits>(samples),
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_TILING_OPTIMAL,
usage_flags,
create_flags,
VMM_ALLOCATION_POOL_SURFACE_CACHE,
rsx::classify_format(format));
ds->set_debug_name(fmt::format("DSV @0x%x", address));
ds->change_layout(cmd, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
ds->set_format(format);
ds->set_aa_mode(antialias);
ds->sample_layout = sample_layout;
ds->memory_usage_flags = rsx::surface_usage_flags::attachment;
ds->state_flags = rsx::surface_state_flags::erase_bkgnd;
ds->native_component_map = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_R };
ds->native_pitch = static_cast<u32>(width) * get_format_block_size_in_bytes(format) * ds->samples_x;
ds->rsx_pitch = static_cast<u32>(pitch);
ds->surface_width = static_cast<u16>(width);
ds->surface_height = static_cast<u16>(height);
ds->queue_tag(address);
ds->add_ref();
return ds;
}
static void clone_surface(
vk::command_buffer& cmd,
std::unique_ptr<vk::render_target>& sink, vk::render_target* ref,
u32 address, barrier_descriptor_t& prev)
{
if (!sink)
{
const auto [new_w, new_h] = rsx::apply_resolution_scale<true>(prev.width, prev.height,
ref->get_surface_width<rsx::surface_metrics::pixels>(), ref->get_surface_height<rsx::surface_metrics::pixels>());
auto& dev = cmd.get_command_pool().get_owner();
sink = std::make_unique<vk::render_target>(dev, dev.get_memory_mapping().device_local,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
VK_IMAGE_TYPE_2D,
ref->format(),
new_w, new_h, 1, 1, 1,
static_cast<VkSampleCountFlagBits>(ref->samples()),
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_TILING_OPTIMAL,
ref->info.usage,
ref->info.flags,
VMM_ALLOCATION_POOL_SURFACE_CACHE,
ref->format_class());
sink->add_ref();
sink->set_spp(ref->get_spp());
sink->format_info = ref->format_info;
sink->memory_usage_flags = rsx::surface_usage_flags::storage;
sink->state_flags = rsx::surface_state_flags::erase_bkgnd;
sink->native_component_map = ref->native_component_map;
sink->sample_layout = ref->sample_layout;
sink->stencil_init_flags = ref->stencil_init_flags;
sink->native_pitch = static_cast<u32>(prev.width) * ref->get_bpp() * ref->samples_x;
sink->rsx_pitch = ref->get_rsx_pitch();
sink->surface_width = prev.width;
sink->surface_height = prev.height;
sink->queue_tag(address);
const auto best_layout = (ref->info.usage & VK_IMAGE_USAGE_SAMPLED_BIT) ?
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL :
ref->current_layout;
sink->change_layout(cmd, best_layout);
}
sink->on_clone_from(ref);
if (!sink->old_contents.empty())
{
// Deal with this, likely only needs to clear
if (sink->surface_width > prev.width || sink->surface_height > prev.height)
{
sink->write_barrier(cmd);
}
else
{
sink->clear_rw_barrier();
}
}
prev.target = sink.get();
sink->set_old_contents_region(prev, false);
}
static std::unique_ptr<vk::render_target> convert_pitch(
vk::command_buffer& /*cmd*/,
std::unique_ptr<vk::render_target>& src,
usz /*out_pitch*/)
{
// TODO
src->state_flags = rsx::surface_state_flags::erase_bkgnd;
return {};
}
static bool is_compatible_surface(const vk::render_target* surface, const vk::render_target* ref, u16 width, u16 height, u8 sample_count)
{
return (surface->format() == ref->format() &&
surface->get_spp() == sample_count &&
surface->get_surface_width() == width &&
surface->get_surface_height() == height);
}
static void prepare_surface_for_drawing(vk::command_buffer& cmd, vk::render_target* surface)
{
// Special case barrier
surface->memory_barrier(cmd, rsx::surface_access::gpu_reference);
if (surface->aspect() == VK_IMAGE_ASPECT_COLOR_BIT)
{
surface->change_layout(cmd, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
}
else
{
surface->change_layout(cmd, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
}
surface->reset_surface_counters();
surface->memory_usage_flags |= rsx::surface_usage_flags::attachment;
surface->is_bound = true;
}
static void prepare_surface_for_sampling(vk::command_buffer& /*cmd*/, vk::render_target* surface)
{
surface->is_bound = false;
}
static bool surface_is_pitch_compatible(const std::unique_ptr<vk::render_target>& surface, usz pitch)
{
return surface->rsx_pitch == pitch;
}
static void int_invalidate_surface_contents(
vk::command_buffer& /*cmd*/,
vk::render_target* surface,
u32 address,
usz pitch)
{
surface->rsx_pitch = static_cast<u32>(pitch);
surface->queue_tag(address);
surface->last_use_tag = 0;
surface->stencil_init_flags = 0;
surface->memory_usage_flags = rsx::surface_usage_flags::unknown;
surface->raster_type = rsx::surface_raster_type::linear;
}
static void invalidate_surface_contents(
vk::command_buffer& cmd,
vk::render_target* surface,
rsx::surface_color_format format,
u32 address,
usz pitch)
{
const auto fmt = vk::get_compatible_surface_format(format);
surface->set_format(format);
surface->set_native_component_layout(fmt.second);
surface->set_debug_name(fmt::format("RTV @0x%x, fmt=0x%x", address, static_cast<int>(format)));
int_invalidate_surface_contents(cmd, surface, address, pitch);
}
static void invalidate_surface_contents(
vk::command_buffer& cmd,
vk::render_target* surface,
rsx::surface_depth_format2 format,
u32 address,
usz pitch)
{
surface->set_format(format);
surface->set_debug_name(fmt::format("DSV @0x%x", address));
int_invalidate_surface_contents(cmd, surface, address, pitch);
}
static void notify_surface_invalidated(const std::unique_ptr<vk::render_target>& surface)
{
surface->frame_tag = vk::get_current_frame_id();
if (!surface->frame_tag) surface->frame_tag = 1;
if (!surface->old_contents.empty())
{
// TODO: Retire the deferred writes
surface->clear_rw_barrier();
}
surface->release();
}
static void notify_surface_persist(const std::unique_ptr<vk::render_target>& /*surface*/)
{}
static void notify_surface_reused(const std::unique_ptr<vk::render_target>& surface)
{
surface->state_flags |= rsx::surface_state_flags::erase_bkgnd;
surface->add_ref();
}
static bool int_surface_matches_properties(
const std::unique_ptr<vk::render_target>& surface,
VkFormat format,
usz width, usz height,
rsx::surface_antialiasing antialias,
bool check_refs)
{
if (check_refs && surface->has_refs())
{
// Surface may still have read refs from data 'copy'
return false;
}
return (surface->info.format == format &&
surface->get_spp() == get_format_sample_count(antialias) &&
surface->matches_dimensions(static_cast<u16>(width), static_cast<u16>(height)));
}
static bool surface_matches_properties(
const std::unique_ptr<vk::render_target>& surface,
rsx::surface_color_format format,
usz width, usz height,
rsx::surface_antialiasing antialias,
bool check_refs = false)
{
VkFormat vk_format = vk::get_compatible_surface_format(format).first;
return int_surface_matches_properties(surface, vk_format, width, height, antialias, check_refs);
}
static bool surface_matches_properties(
const std::unique_ptr<vk::render_target>& surface,
rsx::surface_depth_format2 format,
usz width, usz height,
rsx::surface_antialiasing antialias,
bool check_refs = false)
{
auto device = vk::get_current_renderer();
VkFormat vk_format = vk::get_compatible_depth_surface_format(device->get_formats_support(), format);
return int_surface_matches_properties(surface, vk_format, width, height, antialias, check_refs);
}
static void spill_buffer(std::unique_ptr<vk::buffer>& /*bo*/)
{
// TODO
}
static void unspill_buffer(std::unique_ptr<vk::buffer>& /*bo*/)
{
// TODO
}
static void write_render_target_to_memory(
vk::command_buffer& cmd,
vk::buffer* bo,
vk::render_target* surface,
u64 dst_offset_in_buffer,
u64 src_offset_in_buffer,
u64 max_copy_length)
{
surface->read_barrier(cmd);
vk::image* source = surface->get_surface(rsx::surface_access::transfer_read);
const bool is_scaled = surface->width() != surface->surface_width;
if (is_scaled)
{
const areai src_rect = { 0, 0, static_cast<int>(source->width()), static_cast<int>(source->height()) };
const areai dst_rect = { 0, 0, surface->get_surface_width<rsx::surface_metrics::samples, int>(), surface->get_surface_height<rsx::surface_metrics::samples, int>() };
auto scratch = vk::get_typeless_helper(source->format(), source->format_class(), dst_rect.x2, dst_rect.y2);
vk::copy_scaled_image(cmd, source, scratch, src_rect, dst_rect, 1, true, VK_FILTER_NEAREST);
source = scratch;
}
auto dest = bo;
const auto transfer_size = surface->get_memory_range().length();
if (transfer_size > max_copy_length || src_offset_in_buffer || surface->is_depth_surface())
{
auto scratch = vk::get_scratch_buffer(cmd, transfer_size * 4);
dest = scratch;
}
VkBufferImageCopy region =
{
.bufferOffset = (dest == bo) ? dst_offset_in_buffer : 0,
.bufferRowLength = surface->rsx_pitch / surface->get_bpp(),
.bufferImageHeight = 0,
.imageSubresource = { source->aspect(), 0, 0, 1 },
.imageOffset = {},
.imageExtent = {
.width = source->width(),
.height = source->height(),
.depth = 1
}
};
// inject post-transfer barrier
image_readback_options_t options{};
options.sync_region =
{
.offset = src_offset_in_buffer,
.length = max_copy_length
};
vk::copy_image_to_buffer(cmd, source, dest, region, options);
if (dest != bo)
{
VkBufferCopy copy = { src_offset_in_buffer, dst_offset_in_buffer, max_copy_length };
vkCmdCopyBuffer(cmd, dest->value, bo->value, 1, ©);
vk::insert_buffer_memory_barrier(cmd,
bo->value, dst_offset_in_buffer, max_copy_length,
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT);
}
}
template <int BlockSize>
static vk::buffer* merge_bo_list(vk::command_buffer& cmd, std::vector<vk::buffer*>& list)
{
u32 required_bo_size = 0;
for (auto& bo : list)
{
required_bo_size += (bo ? bo->size() : BlockSize);
}
// Create dst
auto& dev = cmd.get_command_pool().get_owner();
auto dst = new vk::buffer(dev,
required_bo_size,
dev.get_memory_mapping().device_local, 0,
VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
0, VMM_ALLOCATION_POOL_SURFACE_CACHE);
// TODO: Initialize the buffer with system RAM contents
// Copy all the data over from the sub-blocks
u32 offset = 0;
for (auto& bo : list)
{
if (!bo)
{
offset += BlockSize;
continue;
}
VkBufferCopy copy = { 0, offset, ::size32(*bo) };
offset += ::size32(*bo);
vkCmdCopyBuffer(cmd, bo->value, dst->value, 1, ©);
// Cleanup
vk::surface_cache_utils::dispose(bo);
}
return dst;
}
template <typename T>
static T* get(const std::unique_ptr<T>& obj)
{
return obj.get();
}
};
class surface_cache : public rsx::surface_store<vk::surface_cache_traits>
{
private:
u64 get_surface_cache_memory_quota(u64 total_device_memory);
public:
void destroy();
bool spill_unused_memory();
bool is_overallocated();
bool can_collapse_surface(const std::unique_ptr<vk::render_target>& surface, rsx::problem_severity severity) override;
bool handle_memory_pressure(vk::command_buffer& cmd, rsx::problem_severity severity) override;
void trim(vk::command_buffer& cmd, rsx::problem_severity memory_pressure);
};
}
//h
| 21,924
|
C++
|
.h
| 588
| 33.258503
| 169
| 0.695499
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,063
|
VKRenderPass.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKRenderPass.h
|
#pragma once
#include "VulkanAPI.h"
#include "Utilities/geometry.h"
namespace vk
{
class image;
class command_buffer;
u64 get_renderpass_key(const std::vector<vk::image*>& images, const std::vector<u8>& input_attachment_ids = {});
u64 get_renderpass_key(const std::vector<vk::image*>& images, u64 previous_key);
u64 get_renderpass_key(VkFormat surface_format);
VkRenderPass get_renderpass(VkDevice dev, u64 renderpass_key);
void clear_renderpass_cache(VkDevice dev);
// Renderpass scope management helpers.
// NOTE: These are not thread safe by design.
void begin_renderpass(VkDevice dev, const vk::command_buffer& cmd, u64 renderpass_key, VkFramebuffer target, const coordu& framebuffer_region);
void begin_renderpass(const vk::command_buffer& cmd, VkRenderPass pass, VkFramebuffer target, const coordu& framebuffer_region);
void end_renderpass(const vk::command_buffer& cmd);
bool is_renderpass_open(const vk::command_buffer& cmd);
using renderpass_op_callback_t = std::function<void(const vk::command_buffer&, VkRenderPass, VkFramebuffer)>;
void renderpass_op(const vk::command_buffer& cmd, const renderpass_op_callback_t& op);
}
| 1,181
|
C++
|
.h
| 21
| 52.952381
| 145
| 0.766551
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,064
|
VKCommandStream.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKCommandStream.h
|
#pragma once
#include "VulkanAPI.h"
namespace vk
{
struct fence;
enum // callback commands
{
rctrl_queue_submit = 0x80000000,
rctrl_run_gc = 0x80000001
};
struct submit_packet
{
// Core components
VkQueue queue;
fence* pfence;
VkSubmitInfo submit_info;
// Pointer redirection storage
VkSemaphore wait_semaphore;
VkSemaphore signal_semaphore;
VkFlags wait_flags;
submit_packet(VkQueue _q, fence* _f, const VkSubmitInfo* info) :
queue(_q), pfence(_f), submit_info(*info),
wait_semaphore(0), signal_semaphore(0), wait_flags(0)
{
if (info->waitSemaphoreCount)
{
wait_semaphore = *info->pWaitSemaphores;
submit_info.pWaitSemaphores = &wait_semaphore;
}
if (info->signalSemaphoreCount)
{
signal_semaphore = *info->pSignalSemaphores;
submit_info.pSignalSemaphores = &signal_semaphore;
}
if (info->pWaitDstStageMask)
{
wait_flags = *info->pWaitDstStageMask;
submit_info.pWaitDstStageMask = &wait_flags;
}
}
};
}
| 1,026
|
C++
|
.h
| 42
| 21.095238
| 66
| 0.70082
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,065
|
VKShaderInterpreter.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKShaderInterpreter.h
|
#pragma once
#include "VKProgramBuffer.h"
#include "vkutils/descriptors.h"
#include <unordered_map>
namespace vk
{
using ::program_hash_util::fragment_program_utils;
using ::program_hash_util::vertex_program_utils;
class shader_interpreter
{
glsl::shader m_vs;
std::vector<glsl::program_input> m_vs_inputs;
std::vector<glsl::program_input> m_fs_inputs;
VkDevice m_device = VK_NULL_HANDLE;
VkDescriptorSetLayout m_shared_descriptor_layout = VK_NULL_HANDLE;
VkPipelineLayout m_shared_pipeline_layout = VK_NULL_HANDLE;
glsl::program* m_current_interpreter = nullptr;
struct pipeline_key
{
u64 compiler_opt;
vk::pipeline_props properties;
bool operator == (const pipeline_key& other) const
{
return other.compiler_opt == compiler_opt && other.properties == properties;
}
};
struct key_hasher
{
usz operator()(const pipeline_key& key) const
{
return rpcs3::hash_struct(key.properties) ^ key.compiler_opt;
}
};
std::unordered_map<pipeline_key, std::unique_ptr<glsl::program>, key_hasher> m_program_cache;
std::unordered_map<u64, std::unique_ptr<glsl::shader>> m_fs_cache;
vk::descriptor_pool m_descriptor_pool;
u32 m_vertex_instruction_start = 0;
u32 m_fragment_instruction_start = 0;
u32 m_fragment_textures_start = 0;
pipeline_key m_current_key{};
std::pair<VkDescriptorSetLayout, VkPipelineLayout> create_layout(VkDevice dev);
void create_descriptor_pools(const vk::render_device& dev);
void build_vs();
glsl::shader* build_fs(u64 compiler_opt);
glsl::program* link(const vk::pipeline_props& properties, u64 compiler_opt);
public:
void init(const vk::render_device& dev);
void destroy();
glsl::program* get(const vk::pipeline_props& properties, const program_hash_util::fragment_program_utils::fragment_program_metadata& metadata);
bool is_interpreter(const glsl::program* prog) const;
u32 get_vertex_instruction_location() const;
u32 get_fragment_instruction_location() const;
void update_fragment_textures(const std::array<VkDescriptorImageInfo, 68>& sampled_images, vk::descriptor_set &set);
VkDescriptorSet allocate_descriptor_set();
};
}
| 2,160
|
C++
|
.h
| 56
| 35.428571
| 145
| 0.749521
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,066
|
VKFramebuffer.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKFramebuffer.h
|
#pragma once
#include "vkutils/framebuffer_object.hpp"
namespace vk
{
struct framebuffer_holder : public vk::framebuffer, public rsx::ref_counted
{
using framebuffer::framebuffer;
};
vk::framebuffer_holder* get_framebuffer(VkDevice dev, u16 width, u16 height, VkBool32 has_input_attachments, VkRenderPass renderpass, const std::vector<vk::image*>& image_list);
vk::framebuffer_holder* get_framebuffer(VkDevice dev, u16 width, u16 height, VkBool32 has_input_attachments, VkRenderPass renderpass, VkFormat format, VkImage attachment);
void remove_unused_framebuffers();
void clear_framebuffer_cache();
}
| 615
|
C++
|
.h
| 13
| 45.307692
| 178
| 0.797659
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,067
|
VKOverlays.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKOverlays.h
|
#pragma once
#include "../Common/simple_array.hpp"
#include "../Overlays/overlay_controls.h"
#include "VKProgramPipeline.h"
#include "VKHelpers.h"
#include "vkutils/data_heap.h"
#include "vkutils/descriptors.h"
#include "vkutils/graphics_pipeline_state.hpp"
#include "Emu/IdManager.h"
#include <unordered_map>
namespace rsx
{
namespace overlays
{
enum class texture_sampling_mode;
struct overlay;
}
}
namespace vk
{
struct framebuffer;
struct sampler;
struct image_view;
class image;
class viewable_image;
class command_buffer;
class render_target;
namespace glsl
{
class program;
}
// TODO: Refactor text print class to inherit from this base class
struct overlay_pass
{
vk::glsl::shader m_vertex_shader;
vk::glsl::shader m_fragment_shader;
vk::descriptor_pool m_descriptor_pool;
descriptor_set m_descriptor_set;
VkDescriptorSetLayout m_descriptor_layout = nullptr;
VkPipelineLayout m_pipeline_layout = nullptr;
VkFilter m_sampler_filter = VK_FILTER_LINEAR;
u32 m_num_usable_samplers = 1;
u32 m_num_input_attachments = 0;
std::unordered_map<u64, std::unique_ptr<vk::glsl::program>> m_program_cache;
std::unique_ptr<vk::sampler> m_sampler;
std::unique_ptr<vk::framebuffer> m_draw_fbo;
vk::data_heap m_vao;
vk::data_heap m_ubo;
const vk::render_device* m_device = nullptr;
std::string vs_src;
std::string fs_src;
graphics_pipeline_state renderpass_config;
bool initialized = false;
bool compiled = false;
u32 num_drawable_elements = 4;
u32 first_vertex = 0;
u32 m_ubo_length = 128;
u32 m_ubo_offset = 0;
u32 m_vao_offset = 0;
overlay_pass();
virtual ~overlay_pass();
u64 get_pipeline_key(VkRenderPass pass);
void check_heap();
void init_descriptors();
virtual void update_uniforms(vk::command_buffer& /*cmd*/, vk::glsl::program* /*program*/) {}
virtual std::vector<vk::glsl::program_input> get_vertex_inputs();
virtual std::vector<vk::glsl::program_input> get_fragment_inputs();
virtual void get_dynamic_state_entries(std::vector<VkDynamicState>& /*state_descriptors*/) {}
virtual std::vector<VkPushConstantRange> get_push_constants()
{
return {};
}
int sampler_location(int index) const { return 1 + index; }
int input_attachment_location(int index) const { return 1 + m_num_usable_samplers + index; }
template <typename T>
void upload_vertex_data(T* data, u32 count)
{
check_heap();
const auto size = count * sizeof(T);
m_vao_offset = static_cast<u32>(m_vao.alloc<16>(size));
auto dst = m_vao.map(m_vao_offset, size);
std::memcpy(dst, data, size);
m_vao.unmap();
}
vk::glsl::program* build_pipeline(u64 storage_key, VkRenderPass render_pass);
void load_program(vk::command_buffer& cmd, VkRenderPass pass, const std::vector<vk::image_view*>& src);
virtual void create(const vk::render_device& dev);
virtual void destroy();
void free_resources();
vk::framebuffer* get_framebuffer(vk::image* target, VkRenderPass render_pass);
virtual void emit_geometry(vk::command_buffer& cmd);
virtual void set_up_viewport(vk::command_buffer& cmd, u32 x, u32 y, u32 w, u32 h);
void run(vk::command_buffer& cmd, const areau& viewport, vk::framebuffer* fbo, const std::vector<vk::image_view*>& src, VkRenderPass render_pass);
void run(vk::command_buffer& cmd, const areau& viewport, vk::image* target, const std::vector<vk::image_view*>& src, VkRenderPass render_pass);
void run(vk::command_buffer& cmd, const areau& viewport, vk::image* target, vk::image_view* src, VkRenderPass render_pass);
};
struct ui_overlay_renderer : public overlay_pass
{
f32 m_time = 0.f;
f32 m_blur_strength = 0.f;
color4f m_scale_offset;
color4f m_color;
bool m_pulse_glow = false;
bool m_clip_enabled = false;
bool m_disable_vertex_snap = false;
rsx::overlays::texture_sampling_mode m_texture_type;
areaf m_clip_region;
coordf m_viewport;
std::vector<std::unique_ptr<vk::image>> resources;
std::unordered_map<u64, std::unique_ptr<vk::image>> font_cache;
std::unordered_map<u64, std::unique_ptr<vk::image_view>> view_cache;
std::unordered_map<u64, std::pair<u32, std::unique_ptr<vk::image>>> temp_image_cache;
std::unordered_map<u64, std::unique_ptr<vk::image_view>> temp_view_cache;
rsx::overlays::primitive_type m_current_primitive_type = rsx::overlays::primitive_type::quad_list;
ui_overlay_renderer();
vk::image_view* upload_simple_texture(vk::render_device& dev, vk::command_buffer& cmd,
vk::data_heap& upload_heap, u64 key, u32 w, u32 h, u32 layers, bool font, bool temp, const void* pixel_src, u32 owner_uid);
void init(vk::command_buffer& cmd, vk::data_heap& upload_heap);
void destroy() override;
void remove_temp_resources(u32 key);
vk::image_view* find_font(rsx::overlays::font* font, vk::command_buffer& cmd, vk::data_heap& upload_heap);
vk::image_view* find_temp_image(rsx::overlays::image_info* desc, vk::command_buffer& cmd, vk::data_heap& upload_heap, u32 owner_uid);
std::vector<VkPushConstantRange> get_push_constants() override;
void update_uniforms(vk::command_buffer& cmd, vk::glsl::program* program) override;
void set_primitive_type(rsx::overlays::primitive_type type);
void emit_geometry(vk::command_buffer& cmd) override;
void run(vk::command_buffer& cmd, const areau& viewport, vk::framebuffer* target, VkRenderPass render_pass,
vk::data_heap& upload_heap, rsx::overlays::overlay& ui);
};
struct attachment_clear_pass : public overlay_pass
{
color4f clear_color = { 0.f, 0.f, 0.f, 0.f };
color4f colormask = { 1.f, 1.f, 1.f, 1.f };
VkRect2D region = {};
attachment_clear_pass();
std::vector<VkPushConstantRange> get_push_constants() override;
void update_uniforms(vk::command_buffer& cmd, vk::glsl::program* program) override;
void set_up_viewport(vk::command_buffer& cmd, u32 x, u32 y, u32 w, u32 h) override;
void run(vk::command_buffer& cmd, vk::framebuffer* target, VkRect2D rect, u32 clearmask, color4f color, VkRenderPass render_pass);
};
struct stencil_clear_pass : public overlay_pass
{
VkRect2D region = {};
stencil_clear_pass();
void set_up_viewport(vk::command_buffer& cmd, u32 x, u32 y, u32 w, u32 h) override;
void run(vk::command_buffer& cmd, vk::render_target* target, VkRect2D rect, u32 stencil_clear, u32 stencil_write_mask, VkRenderPass render_pass);
};
struct video_out_calibration_pass : public overlay_pass
{
union config_t
{
struct
{
float gamma;
int limit_range;
int stereo_display_mode;
int stereo_image_count;
};
float data[4];
}
config = {};
video_out_calibration_pass();
std::vector<VkPushConstantRange> get_push_constants() override;
void update_uniforms(vk::command_buffer& cmd, vk::glsl::program* /*program*/) override;
void run(vk::command_buffer& cmd, const areau& viewport, vk::framebuffer* target,
const rsx::simple_array<vk::viewable_image*>& src, f32 gamma, bool limited_rgb, stereo_render_mode_options stereo_mode, VkRenderPass render_pass);
};
// TODO: Replace with a proper manager
extern std::unordered_map<u32, std::unique_ptr<vk::overlay_pass>> g_overlay_passes;
template<class T>
T* get_overlay_pass()
{
u32 index = id_manager::typeinfo::get_index<T>();
auto& e = g_overlay_passes[index];
if (!e)
{
e = std::make_unique<T>();
e->create(*vk::get_current_renderer());
}
return static_cast<T*>(e.get());
}
void reset_overlay_passes();
}
| 7,463
|
C++
|
.h
| 183
| 37.63388
| 149
| 0.722746
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,068
|
VKCommonDecompiler.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKCommonDecompiler.h
|
#pragma once
#include "../Program/GLSLTypes.h"
namespace vk
{
using namespace ::glsl;
int get_varying_register_location(std::string_view varying_register_name);
}
| 167
|
C++
|
.h
| 7
| 22.285714
| 75
| 0.78481
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,069
|
VKProgramPipeline.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKProgramPipeline.h
|
#pragma once
#include "VulkanAPI.h"
#include "VKCommonDecompiler.h"
#include "vkutils/descriptors.h"
#include <string>
#include <vector>
namespace vk
{
namespace glsl
{
enum program_input_type : u32
{
input_type_uniform_buffer = 0,
input_type_texel_buffer = 1,
input_type_texture = 2,
input_type_storage_buffer = 3,
input_type_max_enum = 4
};
struct bound_sampler
{
VkFormat format;
VkImage image;
VkComponentMapping mapping;
};
struct bound_buffer
{
VkFormat format = VK_FORMAT_UNDEFINED;
VkBuffer buffer = nullptr;
u64 offset = 0;
u64 size = 0;
};
struct program_input
{
::glsl::program_domain domain;
program_input_type type;
bound_buffer as_buffer;
bound_sampler as_sampler;
u32 location;
std::string name;
};
class shader
{
::glsl::program_domain type = ::glsl::program_domain::glsl_vertex_program;
VkShaderModule m_handle = VK_NULL_HANDLE;
std::string m_source;
std::vector<u32> m_compiled;
public:
shader() = default;
~shader() = default;
void create(::glsl::program_domain domain, const std::string& source);
VkShaderModule compile();
void destroy();
const std::string& get_source() const;
const std::vector<u32> get_compiled() const;
VkShaderModule get_handle() const;
};
class program
{
std::array<std::vector<program_input>, input_type_max_enum> uniforms;
VkDevice m_device;
std::array<u32, 16> fs_texture_bindings;
std::array<u32, 16> fs_texture_mirror_bindings;
std::array<u32, 4> vs_texture_bindings;
bool linked;
void create_impl();
public:
VkPipeline pipeline;
VkPipelineLayout pipeline_layout;
u64 attribute_location_mask;
u64 vertex_attributes_mask;
program(VkDevice dev, VkPipeline p, VkPipelineLayout layout, const std::vector<program_input> &vertex_input, const std::vector<program_input>& fragment_inputs);
program(VkDevice dev, VkPipeline p, VkPipelineLayout layout);
program(const program&) = delete;
program(program&& other) = delete;
~program();
program& load_uniforms(const std::vector<program_input>& inputs);
program& link();
bool has_uniform(program_input_type type, const std::string &uniform_name);
void bind_uniform(const VkDescriptorImageInfo &image_descriptor, const std::string &uniform_name, VkDescriptorType type, vk::descriptor_set &set);
void bind_uniform(const VkDescriptorImageInfo &image_descriptor, int texture_unit, ::glsl::program_domain domain, vk::descriptor_set &set, bool is_stencil_mirror = false);
void bind_uniform(const VkDescriptorBufferInfo &buffer_descriptor, u32 binding_point, vk::descriptor_set &set);
void bind_uniform(const VkBufferView &buffer_view, u32 binding_point, vk::descriptor_set &set);
void bind_uniform(const VkBufferView &buffer_view, program_input_type type, const std::string &binding_name, vk::descriptor_set &set);
void bind_buffer(const VkDescriptorBufferInfo &buffer_descriptor, u32 binding_point, VkDescriptorType type, vk::descriptor_set &set);
};
}
}
| 3,067
|
C++
|
.h
| 87
| 31.586207
| 174
| 0.733424
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,070
|
VKFormats.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKFormats.h
|
#pragma once
#include "VulkanAPI.h"
#include "../gcm_enums.h"
#include <tuple>
namespace vk
{
class image;
struct gpu_formats_support;
struct minification_filter
{
VkFilter filter;
VkSamplerMipmapMode mipmap_mode;
bool sample_mipmaps;
};
VkBorderColor get_border_color(u32 color);
VkFormat get_compatible_depth_surface_format(const gpu_formats_support& support, rsx::surface_depth_format2 format);
VkFormat get_compatible_sampler_format(const gpu_formats_support& support, u32 format);
VkFormat get_compatible_srgb_format(VkFormat rgb_format);
u8 get_format_texel_width(VkFormat format);
std::pair<u8, u8> get_format_element_size(VkFormat format);
std::pair<bool, u32> get_format_convert_flags(VkFormat format);
bool formats_are_bitcast_compatible(image* image1, image* image2);
minification_filter get_min_filter(rsx::texture_minify_filter min_filter);
VkFilter get_mag_filter(rsx::texture_magnify_filter mag_filter);
VkSamplerAddressMode vk_wrap_mode(rsx::texture_wrap_mode gcm_wrap);
float max_aniso(rsx::texture_max_anisotropy gcm_aniso);
std::array<VkComponentSwizzle, 4> get_component_mapping(u32 format);
std::pair<VkPrimitiveTopology, bool> get_appropriate_topology(rsx::primitive_type mode);
}
| 1,233
|
C++
|
.h
| 29
| 40.482759
| 117
| 0.796497
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,071
|
VulkanAPI.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VulkanAPI.h
|
#pragma once
#ifdef _WIN32
#define VK_USE_PLATFORM_WIN32_KHR
#elif defined(__APPLE__)
#define VK_USE_PLATFORM_MACOS_MVK
#elif HAVE_X11
#define VK_USE_PLATFORM_XLIB_KHR
#endif
#ifdef _MSC_VER
#pragma warning( push )
#pragma warning( disable : 4005 )
#endif
#include <vulkan/vulkan.h>
#ifdef _MSC_VER
#pragma warning(pop)
#endif
#include <util/types.hpp>
#ifndef VK_EXT_attachment_feedback_loop_layout
#define VK_EXT_attachment_feedback_loop_layout 1
#define VK_EXT_ATTACHMENT_FEEDBACK_LOOP_LAYOUT_EXTENSION_NAME "VK_EXT_attachment_feedback_loop_layout"
#define VK_IMAGE_LAYOUT_ATTACHMENT_FEEDBACK_LOOP_OPTIMAL_EXT static_cast<VkImageLayout>(1000339000)
#define VK_IMAGE_USAGE_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT 0x00080000
#define VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ATTACHMENT_FEEDBACK_LOOP_LAYOUT_FEATURES_EXT static_cast<VkStructureType>(1000339000)
typedef struct VkPhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT {
VkStructureType sType;
void* pNext;
VkBool32 attachmentFeedbackLoopLayout;
} VkPhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT;
#endif
#ifndef VK_KHR_fragment_shader_barycentric
#define VK_KHR_fragment_shader_barycentric 1
#define VK_KHR_FRAGMENT_SHADER_BARYCENTRIC_SPEC_VERSION 1
#define VK_KHR_FRAGMENT_SHADER_BARYCENTRIC_EXTENSION_NAME "VK_KHR_fragment_shader_barycentric"
#define VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_KHR static_cast<VkStructureType>(1000203000)
typedef struct VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR {
VkStructureType sType;
void* pNext;
VkBool32 fragmentShaderBarycentric;
} VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR;
typedef struct VkPhysicalDeviceFragmentShaderBarycentricPropertiesKHR {
VkStructureType sType;
void* pNext;
VkBool32 triStripVertexOrderIndependentOfProvokingVertex;
} VkPhysicalDeviceFragmentShaderBarycentricPropertiesKHR;
#endif
| 1,974
|
C++
|
.h
| 45
| 41.955556
| 127
| 0.815144
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,072
|
VKResolveHelper.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKResolveHelper.h
|
#pragma once
#include "VKCompute.h"
#include "VKOverlays.h"
#include "vkutils/image.h"
namespace vk
{
struct cs_resolve_base : compute_task
{
vk::viewable_image* multisampled = nullptr;
vk::viewable_image* resolve = nullptr;
u32 cs_wave_x = 1;
u32 cs_wave_y = 1;
cs_resolve_base()
{}
virtual ~cs_resolve_base()
{}
// FIXME: move body to cpp
void build(const std::string& kernel, const std::string& format_prefix, int direction)
{
create();
// TODO: Tweak occupancy
switch (optimal_group_size)
{
default:
case 64:
cs_wave_x = 8;
cs_wave_y = 8;
break;
case 32:
cs_wave_x = 8;
cs_wave_y = 4;
break;
}
const std::pair<std::string_view, std::string> syntax_replace[] =
{
{ "%wx", std::to_string(cs_wave_x) },
{ "%wy", std::to_string(cs_wave_y) },
};
m_src =
"#version 430\n"
"layout(local_size_x=%wx, local_size_y=%wy, local_size_z=1) in;\n"
"\n";
m_src = fmt::replace_all(m_src, syntax_replace);
if (direction == 0)
{
m_src +=
"layout(set=0, binding=0, " + format_prefix + ") uniform readonly restrict image2DMS multisampled;\n"
"layout(set=0, binding=1) uniform writeonly restrict image2D resolve;\n";
}
else
{
m_src +=
"layout(set=0, binding=0) uniform writeonly restrict image2DMS multisampled;\n"
"layout(set=0, binding=1, " + format_prefix + ") uniform readonly restrict image2D resolve;\n";
}
m_src +=
"\n"
"void main()\n"
"{\n"
" ivec2 resolve_size = imageSize(resolve);\n"
" ivec2 aa_size = imageSize(multisampled);\n"
" ivec2 sample_count = resolve_size / aa_size;\n"
"\n"
" if (any(greaterThanEqual(gl_GlobalInvocationID.xy, uvec2(resolve_size)))) return;"
"\n"
" ivec2 resolve_coords = ivec2(gl_GlobalInvocationID.xy);\n"
" ivec2 aa_coords = resolve_coords / sample_count;\n"
" ivec2 sample_loc = ivec2(resolve_coords % sample_count);\n"
" int sample_index = sample_loc.x + (sample_loc.y * sample_count.y);\n"
+ kernel +
"}\n";
rsx_log.notice("Compute shader:\n%s", m_src);
}
std::vector<std::pair<VkDescriptorType, u8>> get_descriptor_layout() override
{
return
{
{ VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 2 }
};
}
void declare_inputs() override
{
std::vector<vk::glsl::program_input> inputs =
{
{
::glsl::program_domain::glsl_compute_program,
vk::glsl::program_input_type::input_type_texture,
{}, {},
0,
"multisampled"
},
{
::glsl::program_domain::glsl_compute_program,
vk::glsl::program_input_type::input_type_texture,
{}, {},
1,
"resolve"
}
};
m_program->load_uniforms(inputs);
}
void bind_resources() override
{
auto msaa_view = multisampled->get_view(rsx::default_remap_vector.with_encoding(VK_REMAP_VIEW_MULTISAMPLED));
auto resolved_view = resolve->get_view(rsx::default_remap_vector.with_encoding(VK_REMAP_IDENTITY));
m_program->bind_uniform({ VK_NULL_HANDLE, msaa_view->value, multisampled->current_layout }, "multisampled", VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, m_descriptor_set);
m_program->bind_uniform({ VK_NULL_HANDLE, resolved_view->value, resolve->current_layout }, "resolve", VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, m_descriptor_set);
}
void run(const vk::command_buffer& cmd, vk::viewable_image* msaa_image, vk::viewable_image* resolve_image)
{
ensure(msaa_image->samples() > 1);
ensure(resolve_image->samples() == 1);
multisampled = msaa_image;
resolve = resolve_image;
const u32 invocations_x = utils::align(resolve_image->width(), cs_wave_x) / cs_wave_x;
const u32 invocations_y = utils::align(resolve_image->height(), cs_wave_y) / cs_wave_y;
compute_task::run(cmd, invocations_x, invocations_y, 1);
}
};
struct cs_resolve_task : cs_resolve_base
{
cs_resolve_task(const std::string& format_prefix, bool bgra_swap = false)
{
// Allow rgba->bgra transformation for old GeForce cards
const std::string swizzle = bgra_swap? ".bgra" : "";
std::string kernel =
" vec4 aa_sample = imageLoad(multisampled, aa_coords, sample_index);\n"
" imageStore(resolve, resolve_coords, aa_sample" + swizzle + ");\n";
build(kernel, format_prefix, 0);
}
};
struct cs_unresolve_task : cs_resolve_base
{
cs_unresolve_task(const std::string& format_prefix, bool bgra_swap = false)
{
// Allow rgba->bgra transformation for old GeForce cards
const std::string swizzle = bgra_swap? ".bgra" : "";
std::string kernel =
" vec4 resolved_sample = imageLoad(resolve, resolve_coords);\n"
" imageStore(multisampled, aa_coords, sample_index, resolved_sample" + swizzle + ");\n";
build(kernel, format_prefix, 1);
}
};
struct depth_resolve_base : public overlay_pass
{
u8 samples_x = 1;
u8 samples_y = 1;
s32 static_parameters[4];
s32 static_parameters_width = 2;
depth_resolve_base()
{
renderpass_config.set_depth_mask(true);
renderpass_config.enable_depth_test(VK_COMPARE_OP_ALWAYS);
// Depth-stencil buffers are almost never filterable, and we do not need it here (1:1 mapping)
m_sampler_filter = VK_FILTER_NEAREST;
}
void build(const std::string& kernel, const std::string& extensions, const std::vector<const char*>& inputs)
{
vs_src =
"#version 450\n"
"#extension GL_ARB_separate_shader_objects : enable\n\n"
"\n"
"void main()\n"
"{\n"
" vec2 positions[] = {vec2(-1., -1.), vec2(1., -1.), vec2(-1., 1.), vec2(1., 1.)};\n"
" gl_Position = vec4(positions[gl_VertexIndex % 4], 0., 1.);\n"
"}\n";
fs_src =
"#version 420\n"
"#extension GL_ARB_separate_shader_objects : enable\n";
fs_src += extensions +
"\n"
"layout(push_constant) uniform static_data{ ivec" + std::to_string(static_parameters_width) + " regs[1]; };\n";
int binding = 1;
for (const auto& input : inputs)
{
fs_src += "layout(set=0, binding=" + std::to_string(binding++) + ") uniform " + input + ";\n";
}
fs_src +=
"//layout(pixel_center_integer) in vec4 gl_FragCoord;\n"
"\n"
"void main()\n"
"{\n";
fs_src += kernel +
"}\n";
rsx_log.notice("Resolve shader:\n%s", fs_src);
}
std::vector<VkPushConstantRange> get_push_constants() override
{
VkPushConstantRange constant;
constant.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
constant.offset = 0;
constant.size = 16;
return { constant };
}
void update_uniforms(vk::command_buffer& cmd, vk::glsl::program* /*program*/) override
{
vkCmdPushConstants(cmd, m_pipeline_layout, VK_SHADER_STAGE_FRAGMENT_BIT, 0, static_parameters_width * 4, static_parameters);
}
void update_sample_configuration(vk::image* msaa_image)
{
switch (msaa_image->samples())
{
case 1:
fmt::throw_exception("MSAA input not multisampled!");
case 2:
samples_x = 2;
samples_y = 1;
break;
case 4:
samples_x = samples_y = 2;
break;
default:
fmt::throw_exception("Unsupported sample count %d", msaa_image->samples());
}
static_parameters[0] = samples_x;
static_parameters[1] = samples_y;
}
};
struct depthonly_resolve : depth_resolve_base
{
depthonly_resolve()
{
build(
" ivec2 out_coord = ivec2(gl_FragCoord.xy);\n"
" ivec2 in_coord = (out_coord / regs[0].xy);\n"
" ivec2 sample_loc = out_coord % regs[0].xy;\n"
" int sample_index = sample_loc.x + (sample_loc.y * regs[0].y);\n"
" float frag_depth = texelFetch(fs0, in_coord, sample_index).x;\n"
" gl_FragDepth = frag_depth;\n",
"",
{ "sampler2DMS fs0" });
}
void run(vk::command_buffer& cmd, vk::viewable_image* msaa_image, vk::viewable_image* resolve_image, VkRenderPass render_pass)
{
update_sample_configuration(msaa_image);
auto src_view = msaa_image->get_view(rsx::default_remap_vector.with_encoding(VK_REMAP_VIEW_MULTISAMPLED));
overlay_pass::run(
cmd,
{ 0, 0, resolve_image->width(), resolve_image->height() },
resolve_image, src_view,
render_pass);
}
};
struct depthonly_unresolve : depth_resolve_base
{
depthonly_unresolve()
{
build(
" ivec2 pixel_coord = ivec2(gl_FragCoord.xy);\n"
" pixel_coord *= regs[0].xy;\n"
" pixel_coord.x += (gl_SampleID % regs[0].x);\n"
" pixel_coord.y += (gl_SampleID / regs[0].x);\n"
" float frag_depth = texelFetch(fs0, pixel_coord, 0).x;\n"
" gl_FragDepth = frag_depth;\n",
"",
{ "sampler2D fs0" });
}
void run(vk::command_buffer& cmd, vk::viewable_image* msaa_image, vk::viewable_image* resolve_image, VkRenderPass render_pass)
{
renderpass_config.set_multisample_state(msaa_image->samples(), 0xFFFF, true, false, false);
renderpass_config.set_multisample_shading_rate(1.f);
update_sample_configuration(msaa_image);
auto src_view = resolve_image->get_view(rsx::default_remap_vector.with_encoding(VK_REMAP_IDENTITY));
overlay_pass::run(
cmd,
{ 0, 0, msaa_image->width(), msaa_image->height() },
msaa_image, src_view,
render_pass);
}
};
struct stencilonly_resolve : depth_resolve_base
{
VkClearRect region{};
VkClearAttachment clear_info{};
stencilonly_resolve()
{
renderpass_config.enable_stencil_test(
VK_STENCIL_OP_REPLACE, VK_STENCIL_OP_REPLACE, VK_STENCIL_OP_REPLACE, // Always replace
VK_COMPARE_OP_ALWAYS, // Always pass
0xFF, // Full write-through
0xFF); // Write active bit
renderpass_config.set_stencil_mask(0xFF);
renderpass_config.set_depth_mask(false);
clear_info.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
region.baseArrayLayer = 0;
region.layerCount = 1;
static_parameters_width = 3;
build(
" ivec2 out_coord = ivec2(gl_FragCoord.xy);\n"
" ivec2 in_coord = (out_coord / regs[0].xy);\n"
" ivec2 sample_loc = out_coord % regs[0].xy;\n"
" int sample_index = sample_loc.x + (sample_loc.y * regs[0].y);\n"
" uint frag_stencil = texelFetch(fs0, in_coord, sample_index).x;\n"
" if ((frag_stencil & uint(regs[0].z)) == 0) discard;\n",
"",
{"usampler2DMS fs0"});
}
void get_dynamic_state_entries(std::vector<VkDynamicState>& state_descriptors) override
{
state_descriptors.push_back(VK_DYNAMIC_STATE_STENCIL_WRITE_MASK);
}
void emit_geometry(vk::command_buffer& cmd) override
{
vkCmdClearAttachments(cmd, 1, &clear_info, 1, ®ion);
for (s32 write_mask = 0x1; write_mask <= 0x80; write_mask <<= 1)
{
vkCmdSetStencilWriteMask(cmd, VK_STENCIL_FRONT_AND_BACK, write_mask);
vkCmdPushConstants(cmd, m_pipeline_layout, VK_SHADER_STAGE_FRAGMENT_BIT, 8, 4, &write_mask);
overlay_pass::emit_geometry(cmd);
}
}
void run(vk::command_buffer& cmd, vk::viewable_image* msaa_image, vk::viewable_image* resolve_image, VkRenderPass render_pass)
{
update_sample_configuration(msaa_image);
auto stencil_view = msaa_image->get_view(rsx::default_remap_vector.with_encoding(VK_REMAP_VIEW_MULTISAMPLED), VK_IMAGE_ASPECT_STENCIL_BIT);
region.rect.extent.width = resolve_image->width();
region.rect.extent.height = resolve_image->height();
overlay_pass::run(
cmd,
{ 0, 0, resolve_image->width(), resolve_image->height() },
resolve_image, stencil_view,
render_pass);
}
};
struct stencilonly_unresolve : depth_resolve_base
{
VkClearRect region{};
VkClearAttachment clear_info{};
stencilonly_unresolve()
{
renderpass_config.enable_stencil_test(
VK_STENCIL_OP_REPLACE, VK_STENCIL_OP_REPLACE, VK_STENCIL_OP_REPLACE, // Always replace
VK_COMPARE_OP_ALWAYS, // Always pass
0xFF, // Full write-through
0xFF); // Write active bit
renderpass_config.set_stencil_mask(0xFF);
renderpass_config.set_depth_mask(false);
clear_info.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
region.baseArrayLayer = 0;
region.layerCount = 1;
static_parameters_width = 3;
build(
" ivec2 pixel_coord = ivec2(gl_FragCoord.xy);\n"
" pixel_coord *= regs[0].xy;\n"
" pixel_coord.x += (gl_SampleID % regs[0].x);\n"
" pixel_coord.y += (gl_SampleID / regs[0].x);\n"
" uint frag_stencil = texelFetch(fs0, pixel_coord, 0).x;\n"
" if ((frag_stencil & uint(regs[0].z)) == 0) discard;\n",
"",
{ "usampler2D fs0" });
}
void get_dynamic_state_entries(std::vector<VkDynamicState>& state_descriptors) override
{
state_descriptors.push_back(VK_DYNAMIC_STATE_STENCIL_WRITE_MASK);
}
void emit_geometry(vk::command_buffer& cmd) override
{
vkCmdClearAttachments(cmd, 1, &clear_info, 1, ®ion);
for (s32 write_mask = 0x1; write_mask <= 0x80; write_mask <<= 1)
{
vkCmdSetStencilWriteMask(cmd, VK_STENCIL_FRONT_AND_BACK, write_mask);
vkCmdPushConstants(cmd, m_pipeline_layout, VK_SHADER_STAGE_FRAGMENT_BIT, 8, 4, &write_mask);
overlay_pass::emit_geometry(cmd);
}
}
void run(vk::command_buffer& cmd, vk::viewable_image* msaa_image, vk::viewable_image* resolve_image, VkRenderPass render_pass)
{
renderpass_config.set_multisample_state(msaa_image->samples(), 0xFFFF, true, false, false);
renderpass_config.set_multisample_shading_rate(1.f);
update_sample_configuration(msaa_image);
auto stencil_view = resolve_image->get_view(rsx::default_remap_vector.with_encoding(VK_REMAP_IDENTITY), VK_IMAGE_ASPECT_STENCIL_BIT);
region.rect.extent.width = resolve_image->width();
region.rect.extent.height = resolve_image->height();
overlay_pass::run(
cmd,
{ 0, 0, msaa_image->width(), msaa_image->height() },
msaa_image, stencil_view,
render_pass);
}
};
struct depthstencil_resolve_EXT : depth_resolve_base
{
depthstencil_resolve_EXT()
{
renderpass_config.enable_stencil_test(
VK_STENCIL_OP_REPLACE, VK_STENCIL_OP_REPLACE, VK_STENCIL_OP_REPLACE, // Always replace
VK_COMPARE_OP_ALWAYS, // Always pass
0xFF, // Full write-through
0); // Unused
renderpass_config.set_stencil_mask(0xFF);
m_num_usable_samplers = 2;
build(
" ivec2 out_coord = ivec2(gl_FragCoord.xy);\n"
" ivec2 in_coord = (out_coord / regs[0].xy);\n"
" ivec2 sample_loc = out_coord % ivec2(regs[0].xy);\n"
" int sample_index = sample_loc.x + (sample_loc.y * regs[0].y);\n"
" float frag_depth = texelFetch(fs0, in_coord, sample_index).x;\n"
" uint frag_stencil = texelFetch(fs1, in_coord, sample_index).x;\n"
" gl_FragDepth = frag_depth;\n"
" gl_FragStencilRefARB = int(frag_stencil);\n",
"#extension GL_ARB_shader_stencil_export : enable\n",
{ "sampler2DMS fs0", "usampler2DMS fs1" });
}
void run(vk::command_buffer& cmd, vk::viewable_image* msaa_image, vk::viewable_image* resolve_image, VkRenderPass render_pass)
{
update_sample_configuration(msaa_image);
auto depth_view = msaa_image->get_view(rsx::default_remap_vector.with_encoding(VK_REMAP_VIEW_MULTISAMPLED), VK_IMAGE_ASPECT_DEPTH_BIT);
auto stencil_view = msaa_image->get_view(rsx::default_remap_vector.with_encoding(VK_REMAP_VIEW_MULTISAMPLED), VK_IMAGE_ASPECT_STENCIL_BIT);
overlay_pass::run(
cmd,
{ 0, 0, resolve_image->width(), resolve_image->height() },
resolve_image, { depth_view, stencil_view },
render_pass);
}
};
struct depthstencil_unresolve_EXT : depth_resolve_base
{
depthstencil_unresolve_EXT()
{
renderpass_config.enable_stencil_test(
VK_STENCIL_OP_REPLACE, VK_STENCIL_OP_REPLACE, VK_STENCIL_OP_REPLACE, // Always replace
VK_COMPARE_OP_ALWAYS, // Always pass
0xFF, // Full write-through
0); // Unused
renderpass_config.set_stencil_mask(0xFF);
m_num_usable_samplers = 2;
build(
" ivec2 pixel_coord = ivec2(gl_FragCoord.xy);\n"
" pixel_coord *= regs[0].xy;\n"
" pixel_coord.x += (gl_SampleID % regs[0].x);\n"
" pixel_coord.y += (gl_SampleID / regs[0].x);\n"
" float frag_depth = texelFetch(fs0, pixel_coord, 0).x;\n"
" uint frag_stencil = texelFetch(fs1, pixel_coord, 0).x;\n"
" gl_FragDepth = frag_depth;\n"
" gl_FragStencilRefARB = int(frag_stencil);\n",
"#extension GL_ARB_shader_stencil_export : enable\n",
{ "sampler2D fs0", "usampler2D fs1" });
}
void run(vk::command_buffer& cmd, vk::viewable_image* msaa_image, vk::viewable_image* resolve_image, VkRenderPass render_pass)
{
renderpass_config.set_multisample_state(msaa_image->samples(), 0xFFFF, true, false, false);
renderpass_config.set_multisample_shading_rate(1.f);
update_sample_configuration(msaa_image);
auto depth_view = resolve_image->get_view(rsx::default_remap_vector.with_encoding(VK_REMAP_IDENTITY), VK_IMAGE_ASPECT_DEPTH_BIT);
auto stencil_view = resolve_image->get_view(rsx::default_remap_vector.with_encoding(VK_REMAP_IDENTITY), VK_IMAGE_ASPECT_STENCIL_BIT);
overlay_pass::run(
cmd,
{ 0, 0, msaa_image->width(), msaa_image->height() },
msaa_image, { depth_view, stencil_view },
render_pass);
}
};
//void resolve_image(vk::command_buffer& cmd, vk::viewable_image* dst, vk::viewable_image* src);
//void unresolve_image(vk::command_buffer& cmd, vk::viewable_image* dst, vk::viewable_image* src);
void reset_resolve_resources();
void clear_resolve_helpers();
}
| 17,765
|
C++
|
.h
| 458
| 34.633188
| 163
| 0.646782
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,073
|
VKDMA.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKDMA.h
|
#pragma once
#include "vkutils/buffer_object.h"
#include "vkutils/commands.h"
namespace vk
{
using dma_mapping_handle = std::pair<u32, vk::buffer*>;
dma_mapping_handle map_dma(u32 local_address, u32 length);
void load_dma(u32 local_address, u32 length);
void flush_dma(u32 local_address, u32 length);
void unmap_dma(u32 local_address, u32 length);
void clear_dma_resources();
class dma_block
{
protected:
struct
{
dma_block* parent = nullptr;
u32 block_offset = 0;
}
inheritance_info;
u32 base_address = 0;
u8* memory_mapping = nullptr;
std::unique_ptr<buffer> allocated_memory;
virtual void allocate(const render_device& dev, usz size);
virtual void free();
virtual void* map_range(const utils::address_range& range);
virtual void unmap();
public:
dma_block() = default;
virtual ~dma_block();
virtual void init(const render_device& dev, u32 addr, usz size);
virtual void init(dma_block* parent, u32 addr, usz size);
virtual void flush(const utils::address_range& range);
virtual void load(const utils::address_range& range);
std::pair<u32, buffer*> get(const utils::address_range& range);
u32 start() const;
u32 end() const;
u32 size() const;
dma_block* head();
const dma_block* head() const;
virtual void set_parent(dma_block* parent);
virtual void extend(const render_device& dev, usz new_size);
};
class dma_block_EXT: public dma_block
{
private:
void allocate(const render_device& dev, usz size) override;
void* map_range(const utils::address_range& range) override;
void unmap() override;
public:
void flush(const utils::address_range& range) override;
void load(const utils::address_range& range) override;
};
}
| 1,714
|
C++
|
.h
| 54
| 28.981481
| 66
| 0.732848
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,074
|
VKVertexProgram.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKVertexProgram.h
|
#pragma once
#include "../Program/VertexProgramDecompiler.h"
#include "Utilities/Thread.h"
#include "VulkanAPI.h"
#include "VKProgramPipeline.h"
#include "vkutils/pipeline_binding_table.h"
namespace vk
{
class shader_interpreter;
}
struct VKVertexDecompilerThread : public VertexProgramDecompiler
{
friend class vk::shader_interpreter;
std::string &m_shader;
std::vector<vk::glsl::program_input> inputs;
class VKVertexProgram *vk_prog;
vk::pipeline_binding_table m_binding_table{};
struct
{
bool emulate_conditional_rendering{false};
}
m_device_props;
protected:
std::string getFloatTypeName(usz elementCount) override;
std::string getIntTypeName(usz elementCount) override;
std::string getFunction(FUNCTION) override;
std::string compareFunction(COMPARE, const std::string&, const std::string&, bool scalar) override;
void insertHeader(std::stringstream &OS) override;
void insertInputs(std::stringstream &OS, const std::vector<ParamType> &inputs) override;
void insertConstants(std::stringstream &OS, const std::vector<ParamType> &constants) override;
void insertOutputs(std::stringstream &OS, const std::vector<ParamType> &outputs) override;
void insertMainStart(std::stringstream &OS) override;
void insertMainEnd(std::stringstream &OS) override;
const RSXVertexProgram &rsx_vertex_program;
public:
VKVertexDecompilerThread(const RSXVertexProgram &prog, std::string& shader, ParamArray&, class VKVertexProgram &dst)
: VertexProgramDecompiler(prog)
, m_shader(shader)
, vk_prog(&dst)
, rsx_vertex_program(prog)
{
}
void Task();
const std::vector<vk::glsl::program_input>& get_inputs() { return inputs; }
};
class VKVertexProgram : public rsx::VertexProgramBase
{
public:
VKVertexProgram();
~VKVertexProgram();
ParamArray parr;
VkShaderModule handle = nullptr;
vk::glsl::shader shader;
std::vector<vk::glsl::program_input> uniforms;
void Decompile(const RSXVertexProgram& prog);
void Compile();
void SetInputs(std::vector<vk::glsl::program_input>& inputs);
private:
void Delete();
};
| 2,044
|
C++
|
.h
| 60
| 32.1
| 117
| 0.787525
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,075
|
VKQueryPool.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKQueryPool.h
|
#pragma once
#include "VulkanAPI.h"
#include <deque>
namespace vk
{
class command_buffer;
class query_pool;
class render_device;
class query_pool_manager
{
struct query_slot_info
{
query_pool* pool;
bool any_passed;
bool active;
bool ready;
u32 data;
};
std::vector<std::unique_ptr<query_pool>> m_consumed_pools;
std::unique_ptr<query_pool> m_current_query_pool;
std::deque<u32> m_available_slots;
u32 m_pool_lifetime_counter = 0;
VkQueryType query_type = VK_QUERY_TYPE_OCCLUSION;
VkQueryResultFlags result_flags = VK_QUERY_RESULT_PARTIAL_BIT;
VkQueryControlFlags control_flags = 0;
vk::render_device* owner = nullptr;
std::vector<query_slot_info> query_slot_status;
bool poke_query(query_slot_info& query, u32 index, VkQueryResultFlags flags);
void allocate_new_pool(vk::command_buffer& cmd);
void reallocate_pool(vk::command_buffer& cmd);
void run_pool_cleanup();
public:
query_pool_manager(vk::render_device& dev, VkQueryType type, u32 num_entries);
~query_pool_manager();
void set_control_flags(VkQueryControlFlags control_flags, VkQueryResultFlags result_flags);
void begin_query(vk::command_buffer& cmd, u32 index);
void end_query(vk::command_buffer& cmd, u32 index);
bool check_query_status(u32 index);
u32 get_query_result(u32 index);
void get_query_result_indirect(vk::command_buffer& cmd, u32 index, u32 count, VkBuffer dst, VkDeviceSize dst_offset);
u32 allocate_query(vk::command_buffer& cmd);
void free_query(vk::command_buffer&/*cmd*/, u32 index);
template<template<class> class _List>
void free_queries(vk::command_buffer& cmd, _List<u32>& list)
{
for (const auto index : list)
{
free_query(cmd, index);
}
}
};
};
| 1,801
|
C++
|
.h
| 52
| 30.346154
| 120
| 0.714783
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,076
|
VKFragmentProgram.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKFragmentProgram.h
|
#pragma once
#include "../Program/FragmentProgramDecompiler.h"
#include "../Program/GLSLTypes.h"
#include "VulkanAPI.h"
#include "VKProgramPipeline.h"
#include "vkutils/pipeline_binding_table.h"
namespace vk
{
class shader_interpreter;
}
struct VKFragmentDecompilerThread : public FragmentProgramDecompiler
{
friend class vk::shader_interpreter;
std::string& m_shader;
ParamArray& m_parrDummy;
std::vector<vk::glsl::program_input> inputs;
class VKFragmentProgram *vk_prog;
glsl::shader_properties m_shader_props{};
vk::pipeline_binding_table m_binding_table{};
public:
VKFragmentDecompilerThread(std::string& shader, ParamArray& parr, const RSXFragmentProgram &prog, u32& size, class VKFragmentProgram& dst)
: FragmentProgramDecompiler(prog, size)
, m_shader(shader)
, m_parrDummy(parr)
, vk_prog(&dst)
{
}
void Task();
const std::vector<vk::glsl::program_input>& get_inputs() { return inputs; }
protected:
std::string getFloatTypeName(usz elementCount) override;
std::string getHalfTypeName(usz elementCount) override;
std::string getFunction(FUNCTION) override;
std::string compareFunction(COMPARE, const std::string&, const std::string&) override;
void insertHeader(std::stringstream &OS) override;
void insertInputs(std::stringstream &OS) override;
void insertOutputs(std::stringstream &OS) override;
void insertConstants(std::stringstream &OS) override;
void insertGlobalFunctions(std::stringstream &OS) override;
void insertMainStart(std::stringstream &OS) override;
void insertMainEnd(std::stringstream &OS) override;
};
/** Storage for an Fragment Program in the process of of recompilation.
* This class calls OpenGL functions and should only be used from the RSX/Graphics thread.
*/
class VKFragmentProgram
{
public:
VKFragmentProgram();
~VKFragmentProgram();
ParamArray parr;
VkShaderModule handle = nullptr;
u32 id;
vk::glsl::shader shader;
std::vector<usz> FragmentConstantOffsetCache;
std::array<u32, 4> output_color_masks{ {} };
std::vector<vk::glsl::program_input> uniforms;
void SetInputs(std::vector<vk::glsl::program_input>& inputs);
/**
* Decompile a fragment shader located in the PS3's Memory. This function operates synchronously.
* @param prog RSXShaderProgram specifying the location and size of the shader in memory
*/
void Decompile(const RSXFragmentProgram& prog);
/** Compile the decompiled fragment shader into a format we can use with OpenGL. */
void Compile();
private:
/** Deletes the shader and any stored information */
void Delete();
};
| 2,543
|
C++
|
.h
| 69
| 34.869565
| 139
| 0.781885
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,077
|
VKAsyncScheduler.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKAsyncScheduler.h
|
#pragma once
#include "vkutils/commands.h"
#include "vkutils/sync.h"
#include "Utilities/Thread.h"
#define VK_MAX_ASYNC_COMPUTE_QUEUES 256
namespace vk
{
class AsyncTaskScheduler
{
// Vulkan resources
std::vector<command_buffer> m_async_command_queue;
command_pool m_command_pool;
// Running state
command_buffer* m_last_used_cb = nullptr;
command_buffer* m_current_cb = nullptr;
usz m_next_cb_index = 0;
atomic_t<u64> m_submit_count = 0;
// Scheduler
shared_mutex m_config_mutex;
bool m_options_initialized = false;
bool m_use_host_scheduler = false;
// Sync
event* m_sync_label = nullptr;
atomic_t<bool> m_sync_required = false;
VkDependencyInfoKHR m_dependency_info{};
static constexpr u32 events_pool_size = 16384;
std::vector<std::unique_ptr<vk::event>> m_events_pool;
atomic_t<u64> m_next_event_id = 0;
std::vector<std::unique_ptr<vk::semaphore>> m_semaphore_pool;
atomic_t<u64> m_next_semaphore_id = 0;
shared_mutex m_submit_mutex;
void init_config_options(vk_gpu_scheduler_mode mode, const VkDependencyInfoKHR& queue_dependency);
void delayed_init();
void insert_sync_event();
public:
AsyncTaskScheduler(vk_gpu_scheduler_mode mode, const VkDependencyInfoKHR& queue_dependency);
~AsyncTaskScheduler();
command_buffer* get_current();
event* get_primary_sync_label();
semaphore* get_sema();
void flush(queue_submit_t& submit_info, VkBool32 force_flush);
void destroy();
// Inline getters
inline bool is_recording() const { return m_current_cb != nullptr; }
inline bool is_host_mode() const { return m_use_host_scheduler; }
};
}
| 1,684
|
C++
|
.h
| 47
| 31.617021
| 101
| 0.717486
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,078
|
VKCompute.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKCompute.h
|
#pragma once
#include "VKPipelineCompiler.h"
#include "vkutils/descriptors.h"
#include "vkutils/buffer_object.h"
#include "Emu/IdManager.h"
#include "Utilities/StrUtil.h"
#include "util/asm.hpp"
#include <unordered_map>
namespace vk
{
struct compute_task
{
std::string m_src;
vk::glsl::shader m_shader;
std::unique_ptr<vk::glsl::program> m_program;
std::unique_ptr<vk::buffer> m_param_buffer;
vk::descriptor_pool m_descriptor_pool;
descriptor_set m_descriptor_set;
VkDescriptorSetLayout m_descriptor_layout = nullptr;
VkPipelineLayout m_pipeline_layout = nullptr;
u32 m_used_descriptors = 0;
bool initialized = false;
bool unroll_loops = true;
bool use_push_constants = false;
u32 ssbo_count = 1;
u32 push_constants_size = 0;
u32 optimal_group_size = 1;
u32 optimal_kernel_size = 1;
u32 max_invocations_x = 65535;
compute_task() = default;
virtual ~compute_task() { destroy(); }
virtual std::vector<std::pair<VkDescriptorType, u8>> get_descriptor_layout();
void init_descriptors();
void create();
void destroy();
virtual void bind_resources() {}
virtual void declare_inputs() {}
void load_program(const vk::command_buffer& cmd);
void run(const vk::command_buffer& cmd, u32 invocations_x, u32 invocations_y, u32 invocations_z);
void run(const vk::command_buffer& cmd, u32 num_invocations);
};
struct cs_shuffle_base : compute_task
{
const vk::buffer* m_data;
u32 m_data_offset = 0;
u32 m_data_length = 0;
u32 kernel_size = 1;
std::string variables, work_kernel, loop_advance, suffix;
std::string method_declarations;
cs_shuffle_base();
void build(const char* function_name, u32 _kernel_size = 0);
void bind_resources() override;
void set_parameters(const vk::command_buffer& cmd, const u32* params, u8 count);
void run(const vk::command_buffer& cmd, const vk::buffer* data, u32 data_length, u32 data_offset = 0);
};
struct cs_shuffle_16 : cs_shuffle_base
{
// byteswap ushort
cs_shuffle_16()
{
cs_shuffle_base::build("bswap_u16");
}
};
struct cs_shuffle_32 : cs_shuffle_base
{
// byteswap_ulong
cs_shuffle_32()
{
cs_shuffle_base::build("bswap_u32");
}
};
struct cs_shuffle_32_16 : cs_shuffle_base
{
// byteswap_ulong + byteswap_ushort
cs_shuffle_32_16()
{
cs_shuffle_base::build("bswap_u16_u32");
}
};
struct cs_shuffle_d24x8_f32 : cs_shuffle_base
{
// convert d24x8 to f32
cs_shuffle_d24x8_f32()
{
cs_shuffle_base::build("d24x8_to_f32");
}
};
struct cs_shuffle_se_f32_d24x8 : cs_shuffle_base
{
// convert f32 to d24x8 and swap endianness
cs_shuffle_se_f32_d24x8()
{
cs_shuffle_base::build("f32_to_d24x8_swapped");
}
};
struct cs_shuffle_se_d24x8 : cs_shuffle_base
{
// swap endianness of d24x8
cs_shuffle_se_d24x8()
{
cs_shuffle_base::build("d24x8_to_d24x8_swapped");
}
};
// NOTE: D24S8 layout has the stencil in the MSB! Its actually S8|D24|S8|D24 starting at offset 0
struct cs_interleave_task : cs_shuffle_base
{
u32 m_ssbo_length = 0;
cs_interleave_task();
void bind_resources() override;
void run(const vk::command_buffer& cmd, const vk::buffer* data, u32 data_offset, u32 data_length, u32 zeta_offset, u32 stencil_offset);
};
template<bool _SwapBytes = false>
struct cs_gather_d24x8 : cs_interleave_task
{
cs_gather_d24x8()
{
work_kernel =
" if (index >= block_length)\n"
" return;\n"
"\n"
" depth = data[index + z_offset] & 0x00FFFFFF;\n"
" stencil_offset = (index / 4);\n"
" stencil_shift = (index % 4) * 8;\n"
" stencil = data[stencil_offset + s_offset];\n"
" stencil = (stencil >> stencil_shift) & 0xFF;\n"
" value = (depth << 8) | stencil;\n";
if constexpr (!_SwapBytes)
{
work_kernel +=
" data[index] = value;\n";
}
else
{
work_kernel +=
" data[index] = bswap_u32(value);\n";
}
cs_shuffle_base::build("");
}
};
template<bool _SwapBytes = false, bool _DepthFloat = false>
struct cs_gather_d32x8 : cs_interleave_task
{
cs_gather_d32x8()
{
work_kernel =
" if (index >= block_length)\n"
" return;\n"
"\n";
if constexpr (!_DepthFloat)
{
work_kernel +=
" depth = f32_to_d24(data[index + z_offset]);\n";
}
else
{
work_kernel +=
" depth = f32_to_d24f(data[index + z_offset]);\n";
}
work_kernel +=
" stencil_offset = (index / 4);\n"
" stencil_shift = (index % 4) * 8;\n"
" stencil = data[stencil_offset + s_offset];\n"
" stencil = (stencil >> stencil_shift) & 0xFF;\n"
" value = (depth << 8) | stencil;\n";
if constexpr (!_SwapBytes)
{
work_kernel +=
" data[index] = value;\n";
}
else
{
work_kernel +=
" data[index] = bswap_u32(value);\n";
}
cs_shuffle_base::build("");
}
};
struct cs_scatter_d24x8 : cs_interleave_task
{
cs_scatter_d24x8();
};
template<bool _DepthFloat = false>
struct cs_scatter_d32x8 : cs_interleave_task
{
cs_scatter_d32x8()
{
work_kernel =
" if (index >= block_length)\n"
" return;\n"
"\n"
" value = data[index];\n";
if constexpr (!_DepthFloat)
{
work_kernel +=
" data[index + z_offset] = d24_to_f32(value >> 8);\n";
}
else
{
work_kernel +=
" data[index + z_offset] = d24f_to_f32(value >> 8);\n";
}
work_kernel +=
" stencil_offset = (index / 4);\n"
" stencil_shift = (index % 4) * 8;\n"
" stencil = (value & 0xFF) << stencil_shift;\n"
" atomicOr(data[stencil_offset + s_offset], stencil);\n";
cs_shuffle_base::build("");
}
};
template<typename From, typename To, bool _SwapSrc = false, bool _SwapDst = false>
struct cs_fconvert_task : cs_shuffle_base
{
u32 m_ssbo_length = 0;
void declare_f16_expansion()
{
method_declarations +=
"uvec2 unpack_e4m12_pack16(const in uint value)\n"
"{\n"
" uvec2 result = uvec2(bitfieldExtract(value, 0, 16), bitfieldExtract(value, 16, 16));\n"
" result <<= 11;\n"
" result += (120 << 23);\n"
" return result;\n"
"}\n\n";
}
void declare_f16_contraction()
{
method_declarations +=
"uint pack_e4m12_pack16(const in uvec2 value)\n"
"{\n"
" uvec2 result = (value - (120 << 23)) >> 11;\n"
" return (result.x & 0xFFFF) | (result.y << 16);\n"
"}\n\n";
}
cs_fconvert_task()
{
use_push_constants = true;
push_constants_size = 16;
variables =
" uint block_length = params[0].x >> 2;\n"
" uint in_offset = params[0].y >> 2;\n"
" uint out_offset = params[0].z >> 2;\n"
" uvec4 tmp;\n";
work_kernel =
" if (index >= block_length)\n"
" return;\n";
if constexpr (sizeof(From) == 4)
{
static_assert(sizeof(To) == 2);
declare_f16_contraction();
work_kernel +=
" const uint src_offset = (index * 2) + in_offset;\n"
" const uint dst_offset = index + out_offset;\n"
" tmp.x = data[src_offset];\n"
" tmp.y = data[src_offset + 1];\n";
if constexpr (_SwapSrc)
{
work_kernel +=
" tmp = bswap_u32(tmp);\n";
}
// Convert
work_kernel += " tmp.z = pack_e4m12_pack16(tmp.xy);\n";
if constexpr (_SwapDst)
{
work_kernel += " tmp.z = bswap_u16(tmp.z);\n";
}
work_kernel += " data[dst_offset] = tmp.z;\n";
}
else
{
static_assert(sizeof(To) == 4);
declare_f16_expansion();
work_kernel +=
" const uint src_offset = index + in_offset;\n"
" const uint dst_offset = (index * 2) + out_offset;\n"
" tmp.x = data[src_offset];\n";
if constexpr (_SwapSrc)
{
work_kernel +=
" tmp.x = bswap_u16(tmp.x);\n";
}
// Convert
work_kernel += " tmp.yz = unpack_e4m12_pack16(tmp.x);\n";
if constexpr (_SwapDst)
{
work_kernel += " tmp.yz = bswap_u32(tmp.yz);\n";
}
work_kernel +=
" data[dst_offset] = tmp.y;\n"
" data[dst_offset + 1] = tmp.z;\n";
}
cs_shuffle_base::build("");
}
void bind_resources() override
{
m_program->bind_buffer({ m_data->value, m_data_offset, m_ssbo_length }, 0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, m_descriptor_set);
}
void run(const vk::command_buffer& cmd, const vk::buffer* data, u32 src_offset, u32 src_length, u32 dst_offset)
{
u32 data_offset;
if (src_offset > dst_offset)
{
m_ssbo_length = (src_offset + src_length) - dst_offset;
data_offset = dst_offset;
}
else
{
m_ssbo_length = (dst_offset - src_offset) + (src_length / sizeof(From)) * sizeof(To);
data_offset = src_offset;
}
u32 parameters[4] = { src_length, src_offset - data_offset, dst_offset - data_offset, 0 };
set_parameters(cmd, parameters, 4);
cs_shuffle_base::run(cmd, data, src_length, data_offset);
}
};
// Reverse morton-order block arrangement
struct cs_deswizzle_base : compute_task
{
virtual void run(const vk::command_buffer& cmd, const vk::buffer* dst, u32 out_offset, const vk::buffer* src, u32 in_offset, u32 data_length, u32 width, u32 height, u32 depth, u32 mipmaps) = 0;
};
template <typename _BlockType, typename _BaseType, bool _SwapBytes>
struct cs_deswizzle_3d : cs_deswizzle_base
{
union params_t
{
u32 data[7];
struct
{
u32 width;
u32 height;
u32 depth;
u32 logw;
u32 logh;
u32 logd;
u32 mipmaps;
};
}
params;
const vk::buffer* src_buffer = nullptr;
const vk::buffer* dst_buffer = nullptr;
u32 in_offset = 0;
u32 out_offset = 0;
u32 block_length = 0;
cs_deswizzle_3d()
{
ensure((sizeof(_BlockType) & 3) == 0); // "Unsupported block type"
ssbo_count = 2;
use_push_constants = true;
push_constants_size = 28;
create();
m_src =
#include "../Program/GLSLSnippets/GPUDeswizzle.glsl"
;
std::string transform;
if constexpr (_SwapBytes)
{
if constexpr (sizeof(_BaseType) == 4)
{
transform = "bswap_u32";
}
else if constexpr (sizeof(_BaseType) == 2)
{
transform = "bswap_u16";
}
else
{
fmt::throw_exception("Unreachable");
}
}
const std::pair<std::string_view, std::string> syntax_replace[] =
{
{ "%loc", "0" },
{ "%set", "set = 0" },
{ "%push_block", "push_constant" },
{ "%ws", std::to_string(optimal_group_size) },
{ "%_wordcount", std::to_string(sizeof(_BlockType) / 4) },
{ "%f", transform }
};
m_src = fmt::replace_all(m_src, syntax_replace);
}
void bind_resources() override
{
m_program->bind_buffer({ src_buffer->value, in_offset, block_length }, 0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, m_descriptor_set);
m_program->bind_buffer({ dst_buffer->value, out_offset, block_length }, 1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, m_descriptor_set);
}
void set_parameters(const vk::command_buffer& cmd)
{
vkCmdPushConstants(cmd, m_pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, push_constants_size, params.data);
}
void run(const vk::command_buffer& cmd, const vk::buffer* dst, u32 out_offset, const vk::buffer* src, u32 in_offset, u32 data_length, u32 width, u32 height, u32 depth, u32 mipmaps) override
{
dst_buffer = dst;
src_buffer = src;
this->in_offset = in_offset;
this->out_offset = out_offset;
this->block_length = data_length;
params.width = width;
params.height = height;
params.depth = depth;
params.mipmaps = mipmaps;
params.logw = rsx::ceil_log2(width);
params.logh = rsx::ceil_log2(height);
params.logd = rsx::ceil_log2(depth);
set_parameters(cmd);
const u32 num_bytes_per_invocation = (sizeof(_BlockType) * optimal_group_size);
const u32 linear_invocations = utils::aligned_div(data_length, num_bytes_per_invocation);
compute_task::run(cmd, linear_invocations);
}
};
struct cs_aggregator : compute_task
{
const buffer* src = nullptr;
const buffer* dst = nullptr;
u32 block_length = 0;
u32 word_count = 0;
cs_aggregator();
void bind_resources() override;
void run(const vk::command_buffer& cmd, const vk::buffer* dst, const vk::buffer* src, u32 num_words);
};
enum RSX_detiler_op
{
decode = 0,
encode = 1
};
struct RSX_detiler_config
{
u32 tile_base_address;
u32 tile_base_offset;
u32 tile_rw_offset;
u32 tile_size;
u32 tile_pitch;
u32 bank;
const vk::buffer* dst;
u32 dst_offset;
const vk::buffer* src;
u32 src_offset;
u16 image_width;
u16 image_height;
u32 image_pitch;
u8 image_bpp;
};
template <RSX_detiler_op Op>
struct cs_tile_memcpy : compute_task
{
#pragma pack (push, 1)
struct
{
u32 prime;
u32 factor;
u32 num_tiles_per_row;
u32 tile_base_address;
u32 tile_size;
u32 tile_address_offset;
u32 tile_rw_offset;
u32 tile_pitch;
u32 tile_bank;
u32 image_width;
u32 image_height;
u32 image_pitch;
u32 image_bpp;
} params;
#pragma pack (pop)
const vk::buffer* src_buffer = nullptr;
const vk::buffer* dst_buffer = nullptr;
u32 in_offset = 0;
u32 out_offset = 0;
u32 in_block_length = 0;
u32 out_block_length = 0;
cs_tile_memcpy()
{
ssbo_count = 2;
use_push_constants = true;
push_constants_size = sizeof(params);
create();
m_src =
#include "../Program/GLSLSnippets/RSXMemoryTiling.glsl"
;
const std::pair<std::string_view, std::string> syntax_replace[] =
{
{ "%loc", "0" },
{ "%set", "set = 0" },
{ "%push_block", "push_constant" },
{ "%ws", std::to_string(optimal_group_size) },
{ "%op", std::to_string(Op) }
};
m_src = fmt::replace_all(m_src, syntax_replace);
}
void bind_resources() override
{
const auto op = static_cast<int>(Op);
m_program->bind_buffer({ src_buffer->value, in_offset, in_block_length }, 0 ^ op, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, m_descriptor_set);
m_program->bind_buffer({ dst_buffer->value, out_offset, out_block_length }, 1 ^ op, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, m_descriptor_set);
}
void set_parameters(const vk::command_buffer& cmd)
{
vkCmdPushConstants(cmd, m_pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, push_constants_size, ¶ms);
}
void run(const vk::command_buffer& cmd, const RSX_detiler_config& config)
{
dst_buffer = config.dst;
src_buffer = config.src;
this->in_offset = config.src_offset;
this->out_offset = config.dst_offset;
const auto tile_aligned_height = std::min(
utils::align<u32>(config.image_height, 64),
utils::aligned_div(config.tile_size - config.tile_base_offset, config.tile_pitch)
);
if constexpr (Op == RSX_detiler_op::decode)
{
this->in_block_length = tile_aligned_height * config.tile_pitch;
this->out_block_length = config.image_height * config.image_pitch;
}
else
{
this->in_block_length = config.image_height * config.image_pitch;
this->out_block_length = tile_aligned_height * config.tile_pitch;
}
auto get_prime_factor = [](u32 pitch) -> std::pair<u32, u32>
{
const u32 base = (pitch >> 8);
if ((pitch & (pitch - 1)) == 0)
{
return { 1u, base };
}
for (const auto prime : { 3, 5, 7, 11, 13 })
{
if ((base % prime) == 0)
{
return { prime, base / prime };
}
}
rsx_log.error("Unexpected pitch value 0x%x", pitch);
return {};
};
const auto [prime, factor] = get_prime_factor(config.tile_pitch);
const u32 tiles_per_row = prime * factor;
params.prime = prime;
params.factor = factor;
params.num_tiles_per_row = tiles_per_row;
params.tile_base_address = config.tile_base_address;
params.tile_rw_offset = config.tile_rw_offset;
params.tile_size = config.tile_size;
params.tile_address_offset = config.tile_base_offset;
params.tile_pitch = config.tile_pitch;
params.tile_bank = config.bank;
params.image_width = config.image_width;
params.image_height = (Op == RSX_detiler_op::decode) ? tile_aligned_height : config.image_height;
params.image_pitch = config.image_pitch;
params.image_bpp = config.image_bpp;
set_parameters(cmd);
const u32 subtexels_per_invocation = (config.image_bpp < 4) ? (4 / config.image_bpp) : 1;
const u32 virtual_width = config.image_width / subtexels_per_invocation;
const u32 invocations_x = utils::aligned_div(virtual_width, optimal_group_size);
compute_task::run(cmd, invocations_x, config.image_height, 1);
}
};
// TODO: Replace with a proper manager
extern std::unordered_map<u32, std::unique_ptr<vk::compute_task>> g_compute_tasks;
template<class T>
T* get_compute_task()
{
u32 index = id_manager::typeinfo::get_index<T>();
auto &e = g_compute_tasks[index];
if (!e)
{
e = std::make_unique<T>();
e->create();
}
return static_cast<T*>(e.get());
}
void reset_compute_tasks();
}
| 16,688
|
C++
|
.h
| 571
| 25.334501
| 195
| 0.644714
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,079
|
VKPipelineCompiler.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKPipelineCompiler.h
|
#pragma once
#include "../rsx_utils.h"
#include "Utilities/lockless.h"
#include "VKProgramPipeline.h"
#include "vkutils/graphics_pipeline_state.hpp"
#include "util/fnv_hash.hpp"
namespace vk
{
class render_device;
struct pipeline_props
{
graphics_pipeline_state state;
u64 renderpass_key;
bool operator==(const pipeline_props& other) const
{
if (renderpass_key != other.renderpass_key)
return false;
if (memcmp(&state.ia, &other.state.ia, sizeof(VkPipelineInputAssemblyStateCreateInfo)))
return false;
if (memcmp(&state.rs, &other.state.rs, sizeof(VkPipelineRasterizationStateCreateInfo)))
return false;
// Cannot memcmp cs due to pAttachments being a pointer to memory
if (state.cs.attachmentCount != other.state.cs.attachmentCount ||
state.cs.logicOp != other.state.cs.logicOp ||
state.cs.logicOpEnable != other.state.cs.logicOpEnable ||
memcmp(state.cs.blendConstants, other.state.cs.blendConstants, 4 * sizeof(f32)))
return false;
if (memcmp(state.att_state, &other.state.att_state, state.cs.attachmentCount * sizeof(VkPipelineColorBlendAttachmentState)))
return false;
if (memcmp(&state.ds, &other.state.ds, sizeof(VkPipelineDepthStencilStateCreateInfo)))
return false;
if (state.ms.rasterizationSamples != VK_SAMPLE_COUNT_1_BIT)
{
if (memcmp(&state.ms, &other.state.ms, sizeof(VkPipelineMultisampleStateCreateInfo)))
return false;
if (state.temp_storage.msaa_sample_mask != other.state.temp_storage.msaa_sample_mask)
return false;
}
return true;
}
};
class pipe_compiler
{
public:
enum op_flags
{
COMPILE_DEFAULT = 0,
COMPILE_INLINE = 1,
COMPILE_DEFERRED = 2
};
using callback_t = std::function<void(std::unique_ptr<glsl::program>&)>;
pipe_compiler();
~pipe_compiler();
void initialize(const vk::render_device* pdev);
std::unique_ptr<glsl::program> compile(
const VkComputePipelineCreateInfo& create_info,
VkPipelineLayout pipe_layout,
op_flags flags, callback_t callback = {});
std::unique_ptr<glsl::program> compile(
const VkGraphicsPipelineCreateInfo& create_info,
VkPipelineLayout pipe_layout,
op_flags flags, callback_t callback = {},
const std::vector<glsl::program_input>& vs_inputs = {},
const std::vector<glsl::program_input>& fs_inputs = {});
std::unique_ptr<glsl::program> compile(
const vk::pipeline_props &create_info,
VkShaderModule module_handles[2],
VkPipelineLayout pipe_layout,
op_flags flags, callback_t callback = {},
const std::vector<glsl::program_input>& vs_inputs = {},
const std::vector<glsl::program_input>& fs_inputs = {});
void operator()();
private:
class compute_pipeline_props : public VkComputePipelineCreateInfo
{
// Storage for the entry name
std::string entry_name;
public:
compute_pipeline_props() = default;
compute_pipeline_props(const VkComputePipelineCreateInfo& info)
{
(*static_cast<VkComputePipelineCreateInfo*>(this)) = info;
entry_name = info.stage.pName;
stage.pName = entry_name.c_str();
}
};
struct pipe_compiler_job
{
bool is_graphics_job;
callback_t callback_func;
vk::pipeline_props graphics_data;
compute_pipeline_props compute_data;
VkPipelineLayout pipe_layout;
VkShaderModule graphics_modules[2];
std::vector<glsl::program_input> inputs;
pipe_compiler_job(
const vk::pipeline_props& props,
VkPipelineLayout layout,
VkShaderModule modules[2],
const std::vector<glsl::program_input>& vs_in,
const std::vector<glsl::program_input>& fs_in,
callback_t func)
{
callback_func = func;
graphics_data = props;
pipe_layout = layout;
graphics_modules[0] = modules[0];
graphics_modules[1] = modules[1];
is_graphics_job = true;
inputs.reserve(vs_in.size() + fs_in.size());
inputs.insert(inputs.end(), vs_in.begin(), vs_in.end());
inputs.insert(inputs.end(), fs_in.begin(), fs_in.end());
}
pipe_compiler_job(
const VkComputePipelineCreateInfo& props,
VkPipelineLayout layout,
callback_t func)
{
callback_func = func;
compute_data = props;
pipe_layout = layout;
is_graphics_job = false;
}
};
const vk::render_device* m_device = nullptr;
lf_queue<pipe_compiler_job> m_work_queue;
std::unique_ptr<glsl::program> int_compile_compute_pipe(const VkComputePipelineCreateInfo& create_info, VkPipelineLayout pipe_layout);
std::unique_ptr<glsl::program> int_compile_graphics_pipe(const VkGraphicsPipelineCreateInfo& create_info, VkPipelineLayout pipe_layout,
const std::vector<glsl::program_input>& vs_inputs, const std::vector<glsl::program_input>& fs_inputs);
std::unique_ptr<glsl::program> int_compile_graphics_pipe(const vk::pipeline_props &create_info, VkShaderModule modules[2], VkPipelineLayout pipe_layout,
const std::vector<glsl::program_input>& vs_inputs, const std::vector<glsl::program_input>& fs_inputs);
};
void initialize_pipe_compiler(int num_worker_threads = -1);
void destroy_pipe_compiler();
pipe_compiler* get_pipe_compiler();
}
namespace rpcs3
{
template <>
inline usz hash_struct<vk::pipeline_props>(const vk::pipeline_props& pipelineProperties)
{
usz seed = hash_base(pipelineProperties.renderpass_key);
seed ^= hash_struct(pipelineProperties.state.ia);
seed ^= hash_struct(pipelineProperties.state.ds);
seed ^= hash_struct(pipelineProperties.state.rs);
seed ^= hash_struct(pipelineProperties.state.ms);
seed ^= hash_base(pipelineProperties.state.temp_storage.msaa_sample_mask);
// Do not compare pointers to memory!
VkPipelineColorBlendStateCreateInfo tmp;
memcpy(&tmp, &pipelineProperties.state.cs, sizeof(VkPipelineColorBlendStateCreateInfo));
tmp.pAttachments = nullptr;
for (usz i = 0; i < pipelineProperties.state.cs.attachmentCount; ++i)
{
seed ^= hash_struct(pipelineProperties.state.att_state[i]);
}
return hash_base(seed);
}
}
| 5,932
|
C++
|
.h
| 158
| 33.78481
| 154
| 0.732846
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,080
|
VKGSRenderTypes.hpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKGSRenderTypes.hpp
|
#pragma once
#include "vkutils/commands.h"
#include "vkutils/descriptors.h"
#include "VKResourceManager.h"
#include "Emu/RSX/Common/simple_array.hpp"
#include "Emu/RSX/rsx_utils.h"
#include "Emu/RSX/rsx_cache.h"
#include "Utilities/mutex.h"
#include "util/asm.hpp"
#include <optional>
#include <thread>
// Initial heap allocation values. The heaps are growable and will automatically increase in size to accomodate demands
#define VK_ATTRIB_RING_BUFFER_SIZE_M 64
#define VK_TEXTURE_UPLOAD_RING_BUFFER_SIZE_M 64
#define VK_UBO_RING_BUFFER_SIZE_M 16
#define VK_TRANSFORM_CONSTANTS_BUFFER_SIZE_M 16
#define VK_FRAGMENT_CONSTANTS_BUFFER_SIZE_M 16
#define VK_INDEX_RING_BUFFER_SIZE_M 16
#define VK_MAX_ASYNC_CB_COUNT 512
#define VK_MAX_ASYNC_FRAMES 2
#define FRAME_PRESENT_TIMEOUT 10000000ull // 10 seconds
#define GENERAL_WAIT_TIMEOUT 2000000ull // 2 seconds
namespace vk
{
struct buffer_view;
struct program_cache;
struct pipeline_props;
using vertex_cache = rsx::vertex_cache::default_vertex_cache<rsx::vertex_cache::uploaded_range>;
using weak_vertex_cache = rsx::vertex_cache::weak_vertex_cache;
using null_vertex_cache = vertex_cache;
using shader_cache = rsx::shaders_cache<vk::pipeline_props, vk::program_cache>;
struct vertex_upload_info
{
VkPrimitiveTopology primitive;
u32 vertex_draw_count;
u32 allocated_vertex_count;
u32 first_vertex;
u32 vertex_index_base;
u32 vertex_index_offset;
u32 persistent_window_offset;
u32 volatile_window_offset;
std::optional<std::tuple<VkDeviceSize, VkIndexType>> index_info;
};
struct command_buffer_chunk : public vk::command_buffer
{
u64 eid_tag = 0;
u64 reset_id = 0;
shared_mutex guard_mutex;
command_buffer_chunk() = default;
inline void tag()
{
eid_tag = vk::get_event_id();
}
void reset()
{
if (is_pending && !poke())
{
wait(FRAME_PRESENT_TIMEOUT);
}
++reset_id;
CHECK_RESULT(vkResetCommandBuffer(commands, 0));
}
bool poke()
{
reader_lock lock(guard_mutex);
if (!is_pending)
{
return true;
}
if (!m_submit_fence->flushed)
{
return false;
}
if (vkGetFenceStatus(pool->get_owner(), m_submit_fence->handle) == VK_SUCCESS)
{
lock.upgrade();
if (is_pending)
{
m_submit_fence->reset();
vk::on_event_completed(eid_tag);
is_pending = false;
eid_tag = 0;
}
}
return !is_pending;
}
VkResult wait(u64 timeout = 0ull)
{
reader_lock lock(guard_mutex);
if (!is_pending)
{
return VK_SUCCESS;
}
const auto ret = vk::wait_for_fence(m_submit_fence, timeout);
lock.upgrade();
if (is_pending)
{
m_submit_fence->reset();
vk::on_event_completed(eid_tag);
is_pending = false;
eid_tag = 0;
}
return ret;
}
void flush()
{
reader_lock lock(guard_mutex);
if (!is_pending)
{
return;
}
m_submit_fence->wait_flush();
}
};
struct occlusion_data
{
rsx::simple_array<u32> indices;
command_buffer_chunk* command_buffer_to_wait = nullptr;
u64 command_buffer_sync_id = 0;
bool is_current(command_buffer_chunk* cmd) const
{
return (command_buffer_to_wait == cmd && command_buffer_sync_id == cmd->reset_id);
}
void set_sync_command_buffer(command_buffer_chunk* cmd)
{
command_buffer_to_wait = cmd;
command_buffer_sync_id = cmd->reset_id;
}
void sync()
{
if (command_buffer_to_wait->reset_id == command_buffer_sync_id)
{
// Allocation stack is FIFO and very long so no need to actually wait for fence signal
command_buffer_to_wait->flush();
}
}
};
struct frame_context_t
{
VkSemaphore acquire_signal_semaphore = VK_NULL_HANDLE;
VkSemaphore present_wait_semaphore = VK_NULL_HANDLE;
vk::descriptor_set descriptor_set;
rsx::flags32_t flags = 0;
std::vector<std::unique_ptr<vk::buffer_view>> buffer_views_to_clean;
u32 present_image = -1;
command_buffer_chunk* swap_command_buffer = nullptr;
// Heap pointers
s64 attrib_heap_ptr = 0;
s64 vtx_env_heap_ptr = 0;
s64 frag_env_heap_ptr = 0;
s64 frag_const_heap_ptr = 0;
s64 vtx_const_heap_ptr = 0;
s64 vtx_layout_heap_ptr = 0;
s64 frag_texparam_heap_ptr = 0;
s64 index_heap_ptr = 0;
s64 texture_upload_heap_ptr = 0;
s64 rasterizer_env_heap_ptr = 0;
u64 last_frame_sync_time = 0;
// Copy shareable information
void grab_resources(frame_context_t& other)
{
present_wait_semaphore = other.present_wait_semaphore;
acquire_signal_semaphore = other.acquire_signal_semaphore;
descriptor_set.swap(other.descriptor_set);
flags = other.flags;
attrib_heap_ptr = other.attrib_heap_ptr;
vtx_env_heap_ptr = other.vtx_env_heap_ptr;
frag_env_heap_ptr = other.frag_env_heap_ptr;
vtx_layout_heap_ptr = other.vtx_layout_heap_ptr;
frag_texparam_heap_ptr = other.frag_texparam_heap_ptr;
frag_const_heap_ptr = other.frag_const_heap_ptr;
vtx_const_heap_ptr = other.vtx_const_heap_ptr;
index_heap_ptr = other.index_heap_ptr;
texture_upload_heap_ptr = other.texture_upload_heap_ptr;
rasterizer_env_heap_ptr = other.rasterizer_env_heap_ptr;
}
// Exchange storage (non-copyable)
void swap_storage(frame_context_t& other)
{
std::swap(buffer_views_to_clean, other.buffer_views_to_clean);
}
void tag_frame_end(
s64 attrib_loc, s64 vtxenv_loc, s64 fragenv_loc, s64 vtxlayout_loc,
s64 fragtex_loc, s64 fragconst_loc, s64 vtxconst_loc, s64 index_loc,
s64 texture_loc, s64 rasterizer_loc)
{
attrib_heap_ptr = attrib_loc;
vtx_env_heap_ptr = vtxenv_loc;
frag_env_heap_ptr = fragenv_loc;
vtx_layout_heap_ptr = vtxlayout_loc;
frag_texparam_heap_ptr = fragtex_loc;
frag_const_heap_ptr = fragconst_loc;
vtx_const_heap_ptr = vtxconst_loc;
index_heap_ptr = index_loc;
texture_upload_heap_ptr = texture_loc;
rasterizer_env_heap_ptr = rasterizer_loc;
last_frame_sync_time = rsx::get_shared_tag();
}
void reset_heap_ptrs()
{
last_frame_sync_time = 0;
}
};
struct flush_request_task
{
atomic_t<bool> pending_state{ false }; //Flush request status; true if rsx::thread is yet to service this request
atomic_t<int> num_waiters{ 0 }; //Number of threads waiting for this request to be serviced
bool hard_sync = false;
flush_request_task() = default;
void post(bool _hard_sync)
{
hard_sync = (hard_sync || _hard_sync);
pending_state = true;
num_waiters++;
}
void remove_one()
{
num_waiters--;
}
void clear_pending_flag()
{
hard_sync = false;
pending_state.store(false);
}
bool pending() const
{
return pending_state.load();
}
void consumer_wait() const
{
while (num_waiters.load() != 0)
{
utils::pause();
}
}
void producer_wait() const
{
while (pending_state.load())
{
std::this_thread::yield();
}
}
};
struct present_surface_info
{
u32 address;
u32 format;
u32 width;
u32 height;
u32 pitch;
u8 eye;
};
struct draw_call_t
{
u32 subdraw_id;
};
template<int Count>
class command_buffer_chain
{
atomic_t<u32> m_current_index = 0;
std::array<vk::command_buffer_chunk, Count> m_cb_list;
public:
command_buffer_chain() = default;
void create(command_pool& pool, vk::command_buffer::access_type_hint access)
{
for (auto& cb : m_cb_list)
{
cb.create(pool);
cb.access_hint = access;
}
}
void destroy()
{
for (auto& cb : m_cb_list)
{
cb.destroy();
}
}
void poke_all()
{
for (auto& cb : m_cb_list)
{
cb.poke();
}
}
void wait_all()
{
for (auto& cb : m_cb_list)
{
cb.wait();
}
}
inline command_buffer_chunk* next()
{
const auto result_id = ++m_current_index % Count;
auto result = &m_cb_list[result_id];
if (!result->poke())
{
rsx_log.error("CB chain has run out of free entries!");
}
return result;
}
inline command_buffer_chunk* get()
{
return &m_cb_list[m_current_index % Count];
}
};
}
| 7,943
|
C++
|
.h
| 308
| 22.321429
| 119
| 0.688871
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
6,081
|
VKResourceManager.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKResourceManager.h
|
#pragma once
#include "vkutils/image.h"
#include "vkutils/garbage_collector.h"
#include "vkutils/query_pool.hpp"
#include "vkutils/sampler.h"
#include "Utilities/mutex.h"
#include <unordered_map>
#include <deque>
#include <memory>
namespace vk
{
u64 get_event_id();
u64 current_event_id();
u64 last_completed_event_id();
void on_event_completed(u64 event_id, bool flush = false);
struct eid_scope_t
{
u64 eid;
const vk::render_device* m_device;
std::vector<disposable_t> m_disposables;
std::vector<std::unique_ptr<gpu_debug_marker>> m_debug_markers;
eid_scope_t(u64 _eid):
eid(_eid), m_device(g_render_device)
{}
~eid_scope_t()
{
discard();
}
void swap(eid_scope_t& other)
{
std::swap(eid, other.eid);
std::swap(m_device, other.m_device);
std::swap(m_disposables, other.m_disposables);
std::swap(m_debug_markers, other.m_debug_markers);
}
void discard()
{
m_disposables.clear();
m_debug_markers.clear();
}
};
class resource_manager : public garbage_collector
{
private:
sampler_pool_t m_sampler_pool;
std::deque<eid_scope_t> m_eid_map;
shared_mutex m_eid_map_lock;
std::vector<std::function<void()>> m_exit_handlers;
inline eid_scope_t& get_current_eid_scope()
{
const auto eid = current_event_id();
{
std::lock_guard lock(m_eid_map_lock);
if (m_eid_map.empty() || m_eid_map.back().eid != eid)
{
m_eid_map.emplace_back(eid);
}
}
return m_eid_map.back();
}
public:
resource_manager() = default;
~resource_manager() = default;
void destroy()
{
flush();
// Run the on-exit callbacks
for (const auto& callback : m_exit_handlers)
{
callback();
}
}
void flush()
{
m_eid_map.clear();
m_sampler_pool.clear();
}
vk::sampler* get_sampler(const vk::render_device& dev, vk::sampler* previous,
VkSamplerAddressMode clamp_u, VkSamplerAddressMode clamp_v, VkSamplerAddressMode clamp_w,
VkBool32 unnormalized_coordinates, float mipLodBias, float max_anisotropy, float min_lod, float max_lod,
VkFilter min_filter, VkFilter mag_filter, VkSamplerMipmapMode mipmap_mode, const vk::border_color_t& border_color,
VkBool32 depth_compare = VK_FALSE, VkCompareOp depth_compare_mode = VK_COMPARE_OP_NEVER)
{
const auto key = m_sampler_pool.compute_storage_key(
clamp_u, clamp_v, clamp_w,
unnormalized_coordinates, mipLodBias, max_anisotropy, min_lod, max_lod,
min_filter, mag_filter, mipmap_mode, border_color,
depth_compare, depth_compare_mode);
if (previous)
{
auto as_cached_object = static_cast<cached_sampler_object_t*>(previous);
ensure(as_cached_object->has_refs());
as_cached_object->release();
}
if (const auto found = m_sampler_pool.find(key))
{
found->add_ref();
return found;
}
auto result = std::make_unique<cached_sampler_object_t>(
dev, clamp_u, clamp_v, clamp_w, unnormalized_coordinates,
mipLodBias, max_anisotropy, min_lod, max_lod,
min_filter, mag_filter, mipmap_mode, border_color,
depth_compare, depth_compare_mode);
auto ret = m_sampler_pool.emplace(key, result);
ret->add_ref();
return ret;
}
void add_exit_callback(std::function<void()> callback) override
{
m_exit_handlers.push_back(callback);
}
void dispose(vk::disposable_t& disposable) override
{
get_current_eid_scope().m_disposables.emplace_back(std::move(disposable));
}
inline void dispose(std::unique_ptr<vk::gpu_debug_marker>& object)
{
// Special case as we may need to read these out.
// FIXME: We can manage these markers better and remove this exception.
get_current_eid_scope().m_debug_markers.emplace_back(std::move(object));
}
template<typename T>
inline void dispose(std::unique_ptr<T>& object)
{
auto ptr = vk::disposable_t::make(object.release());
dispose(ptr);
}
void push_down_current_scope()
{
get_current_eid_scope().eid++;
}
void eid_completed(u64 eid)
{
while (!m_eid_map.empty())
{
auto& scope = m_eid_map.front();
if (scope.eid > eid)
{
break;
}
else
{
eid_scope_t tmp(0);
{
std::lock_guard lock(m_eid_map_lock);
m_eid_map.front().swap(tmp);
m_eid_map.pop_front();
}
}
}
}
void trim();
std::vector<const gpu_debug_marker*> gather_debug_markers() const
{
std::vector<const gpu_debug_marker*> result;
for (const auto& scope : m_eid_map)
{
for (const auto& item : scope.m_debug_markers)
{
result.push_back(item.get());
}
}
return result;
}
};
struct vmm_allocation_t
{
u64 size;
u32 type_index;
vmm_allocation_pool pool;
};
resource_manager* get_resource_manager();
}
| 4,717
|
C++
|
.h
| 174
| 23.373563
| 117
| 0.679894
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,082
|
VKHelpers.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKHelpers.h
|
#pragma once
#include "util/types.hpp"
#include <string>
#include <functional>
#include <vector>
#include <memory>
#include <unordered_map>
#include <variant>
#include <stack>
#include <deque>
#include "VulkanAPI.h"
#include "vkutils/chip_class.h"
#include "Utilities/geometry.h"
#include "Emu/RSX/Common/TextureUtils.h"
#include "Emu/RSX/rsx_utils.h"
#define OCCLUSION_MAX_POOL_SIZE DESCRIPTOR_MAX_DRAW_CALLS
namespace rsx
{
struct GCM_tile_reference;
}
namespace vk
{
// Forward declarations
struct buffer;
class command_buffer;
class data_heap;
struct fence;
class image;
class instance;
class render_device;
struct queue_submit_t;
enum runtime_state
{
uninterruptible = 1,
heap_dirty = 2,
heap_changed = 4,
};
struct image_readback_options_t
{
bool swap_bytes = false;
struct
{
u64 offset = 0;
u64 length = 0;
operator bool() const { return length != 0; }
} sync_region {};
};
const vk::render_device *get_current_renderer();
void set_current_renderer(const vk::render_device &device);
// Compatibility workarounds
bool emulate_primitive_restart(rsx::primitive_type type);
bool sanitize_fp_values();
bool fence_reset_disabled();
bool emulate_conditional_rendering();
VkFlags get_heap_compatible_buffer_types();
// Sync helpers around vkQueueSubmit
void acquire_global_submit_lock();
void release_global_submit_lock();
void queue_submit(const vk::queue_submit_t* packet);
template<class T>
T* get_compute_task();
void destroy_global_resources();
void reset_global_resources();
enum image_upload_options
{
upload_contents_async = 1,
initialize_image_layout = 2,
preserve_image_layout = 4,
source_is_gpu_resident = 8,
// meta-flags
upload_contents_inline = 0,
upload_heap_align_default = 0
};
void upload_image(const vk::command_buffer& cmd, vk::image* dst_image,
const std::vector<rsx::subresource_layout>& subresource_layout, int format, bool is_swizzled, u16 layer_count,
VkImageAspectFlags flags, vk::data_heap &upload_heap, u32 heap_align, rsx::flags32_t image_setup_flags);
std::pair<buffer*, u32> detile_memory_block(
const vk::command_buffer& cmd, const rsx::GCM_tile_reference& tiled_region, const utils::address_range& range,
u16 width, u16 height, u8 bpp);
// Other texture management helpers
void copy_image_to_buffer(const vk::command_buffer& cmd, const vk::image* src, const vk::buffer* dst, const VkBufferImageCopy& region, const image_readback_options_t& options = {});
void copy_buffer_to_image(const vk::command_buffer& cmd, const vk::buffer* src, const vk::image* dst, const VkBufferImageCopy& region);
u64 calculate_working_buffer_size(u64 base_size, VkImageAspectFlags aspect);
void copy_image_typeless(const command_buffer &cmd, image *src, image *dst, const areai& src_rect, const areai& dst_rect,
u32 mipmaps, VkImageAspectFlags src_transfer_mask = 0xFF, VkImageAspectFlags dst_transfer_mask = 0xFF);
void copy_image(const vk::command_buffer& cmd, vk::image* src, vk::image* dst,
const areai& src_rect, const areai& dst_rect, u32 mipmaps,
VkImageAspectFlags src_transfer_mask = 0xFF, VkImageAspectFlags dst_transfer_mask = 0xFF);
void copy_scaled_image(const vk::command_buffer& cmd, vk::image* src, vk::image* dst,
const areai& src_rect, const areai& dst_rect, u32 mipmaps,
bool compatible_formats, VkFilter filter = VK_FILTER_LINEAR);
std::pair<VkFormat, VkComponentMapping> get_compatible_surface_format(rsx::surface_color_format color_format);
// Runtime stuff
void raise_status_interrupt(runtime_state status);
void clear_status_interrupt(runtime_state status);
bool test_status_interrupt(runtime_state status);
void enter_uninterruptible();
void leave_uninterruptible();
bool is_uninterruptible();
void advance_completed_frame_counter();
void advance_frame_counter();
u64 get_current_frame_id();
u64 get_last_completed_frame_id();
// Handle unexpected submit with dangling occlusion query
// TODO: Move queries out of the renderer!
void do_query_cleanup(vk::command_buffer& cmd);
struct blitter
{
void scale_image(vk::command_buffer& cmd, vk::image* src, vk::image* dst, areai src_area, areai dst_area, bool interpolate, const rsx::typeless_xfer& xfer_info);
};
}
| 4,263
|
C++
|
.h
| 111
| 36.063063
| 182
| 0.755453
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,083
|
VKProgramBuffer.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKProgramBuffer.h
|
#pragma once
#include "VKVertexProgram.h"
#include "VKFragmentProgram.h"
#include "VKRenderPass.h"
#include "VKPipelineCompiler.h"
#include "../Program/ProgramStateCache.h"
#include "util/fnv_hash.hpp"
namespace vk
{
struct VKTraits
{
using vertex_program_type = VKVertexProgram;
using fragment_program_type = VKFragmentProgram;
using pipeline_type = vk::glsl::program;
using pipeline_storage_type = std::unique_ptr<vk::glsl::program>;
using pipeline_properties = vk::pipeline_props;
static
void recompile_fragment_program(const RSXFragmentProgram& RSXFP, fragment_program_type& fragmentProgramData, usz ID)
{
fragmentProgramData.Decompile(RSXFP);
fragmentProgramData.id = static_cast<u32>(ID);
fragmentProgramData.Compile();
}
static
void recompile_vertex_program(const RSXVertexProgram& RSXVP, vertex_program_type& vertexProgramData, usz ID)
{
vertexProgramData.Decompile(RSXVP);
vertexProgramData.id = static_cast<u32>(ID);
vertexProgramData.Compile();
}
static
void validate_pipeline_properties(const VKVertexProgram&, const VKFragmentProgram& fp, vk::pipeline_props& properties)
{
//Explicitly disable writing to undefined registers
properties.state.att_state[0].colorWriteMask &= fp.output_color_masks[0];
properties.state.att_state[1].colorWriteMask &= fp.output_color_masks[1];
properties.state.att_state[2].colorWriteMask &= fp.output_color_masks[2];
properties.state.att_state[3].colorWriteMask &= fp.output_color_masks[3];
}
static
pipeline_type* build_pipeline(
const vertex_program_type& vertexProgramData,
const fragment_program_type& fragmentProgramData,
const vk::pipeline_props& pipelineProperties,
bool compile_async,
std::function<pipeline_type*(pipeline_storage_type&)> callback,
VkPipelineLayout common_pipeline_layout)
{
const auto compiler_flags = compile_async ? vk::pipe_compiler::COMPILE_DEFERRED : vk::pipe_compiler::COMPILE_INLINE;
VkShaderModule modules[2] = { vertexProgramData.handle, fragmentProgramData.handle };
auto compiler = vk::get_pipe_compiler();
auto result = compiler->compile(
pipelineProperties, modules, common_pipeline_layout,
compiler_flags, callback,
vertexProgramData.uniforms,
fragmentProgramData.uniforms);
return callback(result);
}
};
struct program_cache : public program_state_cache<VKTraits>
{
program_cache(decompiler_callback_t callback)
{
notify_pipeline_compiled = callback;
}
u64 get_hash(const vk::pipeline_props& props)
{
return rpcs3::hash_struct<vk::pipeline_props>(props);
}
u64 get_hash(const RSXVertexProgram& prog)
{
return program_hash_util::vertex_program_utils::get_vertex_program_ucode_hash(prog);
}
u64 get_hash(const RSXFragmentProgram& prog)
{
return program_hash_util::fragment_program_utils::get_fragment_program_ucode_hash(prog);
}
template <typename... Args>
void add_pipeline_entry(RSXVertexProgram& vp, RSXFragmentProgram& fp, vk::pipeline_props& props, Args&& ...args)
{
get_graphics_pipeline(vp, fp, props, false, false, std::forward<Args>(args)...);
}
void preload_programs(RSXVertexProgram& vp, RSXFragmentProgram& fp)
{
search_vertex_program(vp);
search_fragment_program(fp);
}
bool check_cache_missed() const
{
return m_cache_miss_flag;
}
};
}
| 3,355
|
C++
|
.h
| 93
| 32.677419
| 121
| 0.757314
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,084
|
VKGSRender.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKGSRender.h
|
#pragma once
#include "upscalers/upscaling.h"
#include "vkutils/descriptors.h"
#include "vkutils/data_heap.h"
#include "vkutils/instance.hpp"
#include "vkutils/sync.h"
#include "vkutils/swapchain.hpp"
#include "VKGSRenderTypes.hpp"
#include "VKTextureCache.h"
#include "VKRenderTargets.h"
#include "VKFormats.h"
#include "VKOverlays.h"
#include "VKProgramBuffer.h"
#include "VKFramebuffer.h"
#include "VKShaderInterpreter.h"
#include "VKQueryPool.h"
#include "util/asm.hpp"
#include "Emu/RSX/GCM.h"
#include "Emu/RSX/GSRender.h"
#include "Emu/RSX/Host/RSXDMAWriter.h"
#include <thread>
#include <optional>
using namespace vk::vmm_allocation_pool_; // clang workaround.
using namespace vk::upscaling_flags_; // ditto
namespace vk
{
using host_data_t = rsx::host_gpu_context_t;
}
class VKGSRender : public GSRender, public ::rsx::reports::ZCULL_control
{
private:
enum
{
VK_HEAP_CHECK_TEXTURE_UPLOAD_STORAGE = 0x1,
VK_HEAP_CHECK_VERTEX_STORAGE = 0x2,
VK_HEAP_CHECK_VERTEX_ENV_STORAGE = 0x4,
VK_HEAP_CHECK_FRAGMENT_ENV_STORAGE = 0x8,
VK_HEAP_CHECK_TEXTURE_ENV_STORAGE = 0x10,
VK_HEAP_CHECK_VERTEX_LAYOUT_STORAGE = 0x20,
VK_HEAP_CHECK_TRANSFORM_CONSTANTS_STORAGE = 0x40,
VK_HEAP_CHECK_FRAGMENT_CONSTANTS_STORAGE = 0x80,
VK_HEAP_CHECK_MAX_ENUM = VK_HEAP_CHECK_FRAGMENT_CONSTANTS_STORAGE,
VK_HEAP_CHECK_ALL = 0xFF,
};
enum frame_context_state : u32
{
dirty = 1
};
enum flush_queue_state : u32
{
ok = 0,
flushing = 1,
deadlock = 2
};
private:
const VKFragmentProgram *m_fragment_prog = nullptr;
const VKVertexProgram *m_vertex_prog = nullptr;
vk::glsl::program *m_program = nullptr;
vk::glsl::program *m_prev_program = nullptr;
vk::pipeline_props m_pipeline_properties;
vk::texture_cache m_texture_cache;
vk::surface_cache m_rtts;
std::unique_ptr<vk::buffer> null_buffer;
std::unique_ptr<vk::buffer_view> null_buffer_view;
std::unique_ptr<vk::upscaler> m_upscaler;
output_scaling_mode m_output_scaling{output_scaling_mode::bilinear};
std::unique_ptr<vk::buffer> m_cond_render_buffer;
u64 m_cond_render_sync_tag = 0;
shared_mutex m_sampler_mutex;
atomic_t<bool> m_samplers_dirty = { true };
std::unique_ptr<vk::sampler> m_stencil_mirror_sampler;
std::array<std::unique_ptr<rsx::sampled_image_descriptor_base>, rsx::limits::fragment_textures_count> fs_sampler_state = {};
std::array<std::unique_ptr<rsx::sampled_image_descriptor_base>, rsx::limits::vertex_textures_count> vs_sampler_state = {};
std::array<vk::sampler*, rsx::limits::fragment_textures_count> fs_sampler_handles{};
std::array<vk::sampler*, rsx::limits::vertex_textures_count> vs_sampler_handles{};
std::unique_ptr<vk::buffer_view> m_persistent_attribute_storage;
std::unique_ptr<vk::buffer_view> m_volatile_attribute_storage;
std::unique_ptr<vk::buffer_view> m_vertex_layout_storage;
VkDependencyInfoKHR m_async_compute_dependency_info {};
VkMemoryBarrier2KHR m_async_compute_memory_barrier {};
public:
//vk::fbo draw_fbo;
std::unique_ptr<vk::vertex_cache> m_vertex_cache;
std::unique_ptr<vk::shader_cache> m_shaders_cache;
private:
std::unique_ptr<vk::program_cache> m_prog_buffer;
std::unique_ptr<vk::swapchain_base> m_swapchain;
vk::instance m_instance;
vk::render_device *m_device;
//Vulkan internals
std::unique_ptr<vk::query_pool_manager> m_occlusion_query_manager;
bool m_occlusion_query_active = false;
rsx::reports::occlusion_query_info *m_active_query_info = nullptr;
std::vector<vk::occlusion_data> m_occlusion_map;
shared_mutex m_secondary_cb_guard;
vk::command_pool m_secondary_command_buffer_pool;
vk::command_buffer_chain<VK_MAX_ASYNC_CB_COUNT> m_secondary_cb_list;
vk::command_pool m_command_buffer_pool;
vk::command_buffer_chain<VK_MAX_ASYNC_CB_COUNT> m_primary_cb_list;
vk::command_buffer_chunk* m_current_command_buffer = nullptr;
std::unique_ptr<vk::buffer> m_host_object_data;
vk::descriptor_pool m_descriptor_pool;
VkDescriptorSetLayout m_descriptor_layouts = VK_NULL_HANDLE;
VkPipelineLayout m_pipeline_layout = VK_NULL_HANDLE;
vk::framebuffer_holder* m_draw_fbo = nullptr;
sizeu m_swapchain_dims{};
bool swapchain_unavailable = false;
bool should_reinitialize_swapchain = false;
u64 m_last_heap_sync_time = 0;
u32 m_texbuffer_view_size = 0;
vk::data_heap m_attrib_ring_info; // Vertex data
vk::data_heap m_fragment_constants_ring_info; // Fragment program constants
vk::data_heap m_transform_constants_ring_info; // Transform program constants
vk::data_heap m_fragment_env_ring_info; // Fragment environment params
vk::data_heap m_vertex_env_ring_info; // Vertex environment params
vk::data_heap m_fragment_texture_params_ring_info; // Fragment texture params
vk::data_heap m_vertex_layout_ring_info; // Vertex layout structure
vk::data_heap m_index_buffer_ring_info; // Index data
vk::data_heap m_texture_upload_buffer_ring_info; // Texture upload heap
vk::data_heap m_raster_env_ring_info; // Raster control such as polygon and line stipple
vk::data_heap m_fragment_instructions_buffer;
vk::data_heap m_vertex_instructions_buffer;
VkDescriptorBufferInfo m_vertex_env_buffer_info {};
VkDescriptorBufferInfo m_fragment_env_buffer_info {};
VkDescriptorBufferInfo m_vertex_layout_stream_info {};
VkDescriptorBufferInfo m_vertex_constants_buffer_info {};
VkDescriptorBufferInfo m_fragment_constants_buffer_info {};
VkDescriptorBufferInfo m_fragment_texture_params_buffer_info {};
VkDescriptorBufferInfo m_raster_env_buffer_info {};
VkDescriptorBufferInfo m_vertex_instructions_buffer_info {};
VkDescriptorBufferInfo m_fragment_instructions_buffer_info {};
std::array<vk::frame_context_t, VK_MAX_ASYNC_FRAMES> frame_context_storage;
//Temp frame context to use if the real frame queue is overburdened. Only used for storage
vk::frame_context_t m_aux_frame_context;
u32 m_current_queue_index = 0;
vk::frame_context_t* m_current_frame = nullptr;
std::deque<vk::frame_context_t*> m_queued_frames;
VkViewport m_viewport {};
VkRect2D m_scissor {};
std::vector<u8> m_draw_buffers;
shared_mutex m_flush_queue_mutex;
vk::flush_request_task m_flush_requests;
ullong m_last_cond_render_eval_hint = 0;
// Offloader thread deadlock recovery
rsx::atomic_bitmask_t<flush_queue_state> m_queue_status;
utils::address_range m_offloader_fault_range;
rsx::invalidation_cause m_offloader_fault_cause;
vk::draw_call_t m_current_draw {};
u64 m_current_renderpass_key = 0;
VkRenderPass m_cached_renderpass = VK_NULL_HANDLE;
std::vector<vk::image*> m_fbo_images;
//Vertex layout
rsx::vertex_input_layout m_vertex_layout;
vk::shader_interpreter m_shader_interpreter;
u32 m_interpreter_state;
#if defined(HAVE_X11) && defined(HAVE_VULKAN)
Display *m_display_handle = nullptr;
#endif
public:
u64 get_cycles() final;
~VKGSRender() override;
VKGSRender(utils::serial* ar) noexcept;
VKGSRender() noexcept : VKGSRender(nullptr) {}
private:
void prepare_rtts(rsx::framebuffer_creation_context context);
void close_and_submit_command_buffer(
vk::fence* fence = nullptr,
VkSemaphore wait_semaphore = VK_NULL_HANDLE,
VkSemaphore signal_semaphore = VK_NULL_HANDLE,
VkPipelineStageFlags pipeline_stage_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT);
void flush_command_queue(bool hard_sync = false, bool do_not_switch = false);
void queue_swap_request();
void frame_context_cleanup(vk::frame_context_t *ctx);
void advance_queued_frames();
void present(vk::frame_context_t *ctx);
void reinitialize_swapchain();
vk::viewable_image* get_present_source(vk::present_surface_info* info, const rsx::avconf& avconfig);
void begin_render_pass();
void close_render_pass();
VkRenderPass get_render_pass();
void update_draw_state();
void check_heap_status(u32 flags = VK_HEAP_CHECK_ALL);
void check_present_status();
VkDescriptorSet allocate_descriptor_set();
vk::vertex_upload_info upload_vertex_data();
rsx::simple_array<u8> m_scratch_mem;
bool load_program();
void load_program_env();
void update_vertex_env(u32 id, const vk::vertex_upload_info& vertex_info);
void upload_transform_constants(const rsx::io_buffer& buffer);
void load_texture_env();
bool bind_texture_env();
bool bind_interpreter_texture_env();
public:
void init_buffers(rsx::framebuffer_creation_context context, bool skip_reading = false);
void set_viewport();
void set_scissor(bool clip_viewport);
void bind_viewport();
void sync_hint(rsx::FIFO::interrupt_hint hint, rsx::reports::sync_hint_payload_t payload) override;
bool release_GCM_label(u32 address, u32 data) override;
void begin_occlusion_query(rsx::reports::occlusion_query_info* query) override;
void end_occlusion_query(rsx::reports::occlusion_query_info* query) override;
bool check_occlusion_query_status(rsx::reports::occlusion_query_info* query) override;
void get_occlusion_query_result(rsx::reports::occlusion_query_info* query) override;
void discard_occlusion_query(rsx::reports::occlusion_query_info* query) override;
// External callback in case we need to suddenly submit a commandlist unexpectedly, e.g in a violation handler
void emergency_query_cleanup(vk::command_buffer* commands);
// External callback to handle out of video memory problems
bool on_vram_exhausted(rsx::problem_severity severity);
// Handle pool creation failure due to fragmentation
void on_descriptor_pool_fragmentation(bool is_fatal);
// Conditional rendering
void begin_conditional_rendering(const std::vector<rsx::reports::occlusion_query_info*>& sources) override;
void end_conditional_rendering() override;
// Host sync object
std::pair<volatile vk::host_data_t*, VkBuffer> map_host_object_data() const;
void on_guest_texture_read(const vk::command_buffer& cmd);
// GRAPH backend
void patch_transform_constants(rsx::context* ctx, u32 index, u32 count) override;
protected:
void clear_surface(u32 mask) override;
void begin() override;
void end() override;
void emit_geometry(u32 sub_index) override;
void on_init_thread() override;
void on_exit() override;
void flip(const rsx::display_flip_info_t& info) override;
void renderctl(u32 request_code, void* args) override;
void do_local_task(rsx::FIFO::state state) override;
bool scaled_image_from_memory(const rsx::blit_src_info& src, const rsx::blit_dst_info& dst, bool interpolate) override;
void notify_tile_unbound(u32 tile) override;
bool on_access_violation(u32 address, bool is_writing) override;
void on_invalidate_memory_range(const utils::address_range &range, rsx::invalidation_cause cause) override;
void on_semaphore_acquire_wait() override;
};
| 10,630
|
C++
|
.h
| 235
| 43.021277
| 125
| 0.764022
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,085
|
VKTextureCache.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKTextureCache.h
|
#pragma once
#include "VKAsyncScheduler.h"
#include "VKDMA.h"
#include "VKRenderTargets.h"
#include "VKResourceManager.h"
#include "VKRenderPass.h"
#include "vkutils/image_helpers.h"
#include "../Common/texture_cache.h"
#include "../Common/tiled_dma_copy.hpp"
#include "Emu/Cell/timers.hpp"
#include <memory>
#include <vector>
namespace vk
{
class cached_texture_section;
class texture_cache;
struct texture_cache_traits
{
using commandbuffer_type = vk::command_buffer;
using section_storage_type = vk::cached_texture_section;
using texture_cache_type = vk::texture_cache;
using texture_cache_base_type = rsx::texture_cache<texture_cache_type, texture_cache_traits>;
using image_resource_type = vk::image*;
using image_view_type = vk::image_view*;
using image_storage_type = vk::image;
using texture_format = VkFormat;
using viewable_image_type = vk::viewable_image*;
};
class cached_texture_section : public rsx::cached_texture_section<vk::cached_texture_section, vk::texture_cache_traits>
{
using baseclass = typename rsx::cached_texture_section<vk::cached_texture_section, vk::texture_cache_traits>;
friend baseclass;
std::unique_ptr<vk::viewable_image> managed_texture = nullptr;
//DMA relevant data
std::unique_ptr<vk::event> dma_fence;
vk::render_device* m_device = nullptr;
vk::viewable_image* vram_texture = nullptr;
public:
using baseclass::cached_texture_section;
void create(u16 w, u16 h, u16 depth, u16 mipmaps, vk::image* image, u32 rsx_pitch, bool managed, u32 gcm_format, bool pack_swap_bytes = false)
{
if (vram_texture && !managed_texture && get_protection() == utils::protection::no)
{
// In-place image swap, still locked. Likely a color buffer that got rebound as depth buffer or vice-versa.
vk::as_rtt(vram_texture)->on_swap_out();
if (!managed)
{
// Incoming is also an external resource, reference it immediately
vk::as_rtt(image)->on_swap_in(is_locked());
}
}
auto new_texture = static_cast<vk::viewable_image*>(image);
ensure(!exists() || !is_managed() || vram_texture == new_texture);
vram_texture = new_texture;
ensure(rsx_pitch);
width = w;
height = h;
this->depth = depth;
this->mipmaps = mipmaps;
this->rsx_pitch = rsx_pitch;
this->gcm_format = gcm_format;
this->pack_unpack_swap_bytes = pack_swap_bytes;
if (managed)
{
managed_texture.reset(vram_texture);
}
if (auto rtt = dynamic_cast<vk::render_target*>(image))
{
swizzled = (rtt->raster_type != rsx::surface_raster_type::linear);
}
if (synchronized)
{
// Even if we are managing the same vram section, we cannot guarantee contents are static
// The create method is only invoked when a new managed session is required
release_dma_resources();
synchronized = false;
flushed = false;
sync_timestamp = 0ull;
}
// Notify baseclass
baseclass::on_section_resources_created();
}
void release_dma_resources()
{
if (dma_fence)
{
auto gc = vk::get_resource_manager();
gc->dispose(dma_fence);
}
}
void dma_abort() override
{
// Called if a reset occurs, usually via reprotect path after a bad prediction.
// Discard the sync event, the next sync, if any, will properly recreate this.
ensure(synchronized);
ensure(!flushed);
ensure(dma_fence);
vk::get_resource_manager()->dispose(dma_fence);
}
void destroy()
{
if (!exists() && context != rsx::texture_upload_context::dma)
return;
m_tex_cache->on_section_destroyed(*this);
vram_texture = nullptr;
ensure(!managed_texture);
release_dma_resources();
baseclass::on_section_resources_destroyed();
}
bool exists() const
{
return (vram_texture != nullptr);
}
bool is_managed() const
{
return !exists() || managed_texture;
}
vk::image_view* get_view(const rsx::texture_channel_remap_t& remap)
{
ensure(vram_texture != nullptr);
return vram_texture->get_view(remap);
}
vk::image_view* get_raw_view()
{
ensure(vram_texture != nullptr);
return vram_texture->get_view(rsx::default_remap_vector);
}
vk::viewable_image* get_raw_texture()
{
return managed_texture.get();
}
std::unique_ptr<vk::viewable_image>& get_texture()
{
return managed_texture;
}
vk::render_target* get_render_target() const
{
return vk::as_rtt(vram_texture);
}
VkFormat get_format() const
{
if (context == rsx::texture_upload_context::dma)
{
return VK_FORMAT_R32_UINT;
}
ensure(vram_texture != nullptr);
return vram_texture->format();
}
bool is_flushed() const
{
//This memory section was flushable, but a flush has already removed protection
return flushed;
}
void dma_transfer(vk::command_buffer& cmd, vk::image* src, const areai& src_area, const utils::address_range& valid_range, u32 pitch);
void copy_texture(vk::command_buffer& cmd, bool miss)
{
ensure(exists());
if (!miss) [[likely]]
{
ensure(!synchronized);
baseclass::on_speculative_flush();
}
else
{
baseclass::on_miss();
}
if (m_device == nullptr)
{
m_device = &cmd.get_command_pool().get_owner();
}
vk::image* locked_resource = vram_texture;
u32 transfer_width = width;
u32 transfer_height = height;
u32 transfer_x = 0, transfer_y = 0;
if (context == rsx::texture_upload_context::framebuffer_storage)
{
auto surface = vk::as_rtt(vram_texture);
surface->memory_barrier(cmd, rsx::surface_access::transfer_read);
locked_resource = surface->get_surface(rsx::surface_access::transfer_read);
transfer_width *= surface->samples_x;
transfer_height *= surface->samples_y;
}
vk::image* target = locked_resource;
if (transfer_width != locked_resource->width() || transfer_height != locked_resource->height())
{
// TODO: Synchronize access to typeles textures
target = vk::get_typeless_helper(vram_texture->format(), vram_texture->format_class(), transfer_width, transfer_height);
target->change_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
// Allow bilinear filtering on color textures where compatibility is likely
const auto filter = (target->aspect() == VK_IMAGE_ASPECT_COLOR_BIT) ? VK_FILTER_LINEAR : VK_FILTER_NEAREST;
vk::copy_scaled_image(cmd, locked_resource, target,
{ 0, 0, static_cast<s32>(locked_resource->width()), static_cast<s32>(locked_resource->height()) },
{ 0, 0, static_cast<s32>(transfer_width), static_cast<s32>(transfer_height) },
1, true, filter);
target->change_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
}
const auto internal_bpp = vk::get_format_texel_width(vram_texture->format());
const auto valid_range = get_confirmed_range();
if (const auto section_range = get_section_range(); section_range != valid_range)
{
if (const auto offset = (valid_range.start - get_section_base()))
{
transfer_y = offset / rsx_pitch;
transfer_x = (offset % rsx_pitch) / internal_bpp;
ensure(transfer_width >= transfer_x);
ensure(transfer_height >= transfer_y);
transfer_width -= transfer_x;
transfer_height -= transfer_y;
}
if (const auto tail = (section_range.end - valid_range.end))
{
const auto row_count = tail / rsx_pitch;
ensure(transfer_height >= row_count);
transfer_height -= row_count;
}
}
areai src_area;
src_area.x1 = static_cast<s32>(transfer_x);
src_area.y1 = static_cast<s32>(transfer_y);
src_area.x2 = s32(transfer_x + transfer_width);
src_area.y2 = s32(transfer_y + transfer_height);
dma_transfer(cmd, target, src_area, valid_range, rsx_pitch);
}
/**
* Flush
*/
void imp_flush() override
{
AUDIT(synchronized);
// Synchronize, reset dma_fence after waiting
vk::wait_for_event(dma_fence.get(), GENERAL_WAIT_TIMEOUT);
// Calculate smallest range to flush - for framebuffers, the raster region is enough
const auto range = (context == rsx::texture_upload_context::framebuffer_storage) ? get_section_range() : get_confirmed_range();
auto flush_length = range.length();
const auto tiled_region = rsx::get_current_renderer()->get_tiled_memory_region(range);
if (tiled_region)
{
const auto available_tile_size = tiled_region.tile->size - (range.start - tiled_region.base_address);
const auto max_content_size = tiled_region.tile->pitch * utils::align(height, 64);
flush_length = std::min(max_content_size, available_tile_size);
}
vk::flush_dma(range.start, flush_length);
#if DEBUG_DMA_TILING
// Are we a tiled region?
if (const auto tiled_region = rsx::get_current_renderer()->get_tiled_memory_region(range))
{
auto real_data = vm::get_super_ptr<u8>(range.start);
auto out_data = std::vector<u8>(tiled_region.tile->size);
rsx::tile_texel_data<u32>(
out_data.data(),
real_data,
tiled_region.base_address,
range.start - tiled_region.base_address,
tiled_region.tile->size,
tiled_region.tile->bank,
tiled_region.tile->pitch,
width,
height
);
std::memcpy(real_data, out_data.data(), flush_length);
}
#endif
if (is_swizzled())
{
// This format is completely worthless to CPU processing algorithms where cache lines on die are linear.
// If this is happening, usually it means it was not a planned readback (e.g shared pages situation)
rsx_log.trace("[Performance warning] CPU readback of swizzled data");
// Read-modify-write to avoid corrupting already resident memory outside texture region
void* data = get_ptr(range.start);
std::vector<u8> tmp_data(rsx_pitch * height);
std::memcpy(tmp_data.data(), data, tmp_data.size());
switch (gcm_format)
{
case CELL_GCM_TEXTURE_A8R8G8B8:
case CELL_GCM_TEXTURE_DEPTH24_D8:
rsx::convert_linear_swizzle<u32, false>(tmp_data.data(), data, width, height, rsx_pitch);
break;
case CELL_GCM_TEXTURE_R5G6B5:
case CELL_GCM_TEXTURE_DEPTH16:
rsx::convert_linear_swizzle<u16, false>(tmp_data.data(), data, width, height, rsx_pitch);
break;
default:
rsx_log.error("Unexpected swizzled texture format 0x%x", gcm_format);
}
}
}
void* map_synchronized(u32, u32)
{
return nullptr;
}
void finish_flush()
{}
/**
* Misc
*/
void set_unpack_swap_bytes(bool swap_bytes)
{
pack_unpack_swap_bytes = swap_bytes;
}
void set_rsx_pitch(u32 pitch)
{
ensure(!is_locked());
rsx_pitch = pitch;
}
void sync_surface_memory(const std::vector<cached_texture_section*>& surfaces)
{
auto rtt = vk::as_rtt(vram_texture);
rtt->sync_tag();
for (auto& surface : surfaces)
{
rtt->inherit_surface_contents(vk::as_rtt(surface->vram_texture));
}
}
bool has_compatible_format(vk::image* tex) const
{
return vram_texture->info.format == tex->info.format;
}
bool is_depth_texture() const
{
return !!(vram_texture->aspect() & VK_IMAGE_ASPECT_DEPTH_BIT);
}
};
class texture_cache : public rsx::texture_cache<vk::texture_cache, vk::texture_cache_traits>
{
private:
using baseclass = rsx::texture_cache<vk::texture_cache, vk::texture_cache_traits>;
friend baseclass;
struct cached_image_reference_t
{
std::unique_ptr<vk::viewable_image> data;
texture_cache* parent;
cached_image_reference_t(texture_cache* parent, std::unique_ptr<vk::viewable_image>& previous);
~cached_image_reference_t();
};
struct cached_image_t
{
u64 key;
std::unique_ptr<vk::viewable_image> data;
cached_image_t() = default;
cached_image_t(u64 key_, std::unique_ptr<vk::viewable_image>& data_) :
key(key_), data(std::move(data_)) {}
};
public:
enum texture_create_flags : u32
{
initialize_image_contents = 1,
do_not_reuse = 2,
shareable = 4
};
void on_section_destroyed(cached_texture_section& tex) override;
private:
// Vulkan internals
vk::render_device* m_device;
vk::memory_type_mapping m_memory_types;
vk::gpu_formats_support m_formats_support;
VkQueue m_submit_queue;
vk::data_heap* m_texture_upload_heap;
// Stuff that has been dereferenced by the GPU goes into these
const u32 max_cached_image_pool_size = 256;
std::deque<cached_image_t> m_cached_images;
atomic_t<u64> m_cached_memory_size = { 0 };
shared_mutex m_cached_pool_lock;
// Blocks some operations when exiting
atomic_t<bool> m_cache_is_exiting = false;
void clear();
VkComponentMapping apply_component_mapping_flags(u32 gcm_format, rsx::component_order flags, const rsx::texture_channel_remap_t& remap_vector) const;
void copy_transfer_regions_impl(vk::command_buffer& cmd, vk::image* dst, const std::vector<copy_region_descriptor>& sections_to_transfer) const;
vk::image* get_template_from_collection_impl(const std::vector<copy_region_descriptor>& sections_to_transfer) const;
std::unique_ptr<vk::viewable_image> find_cached_image(VkFormat format, u16 w, u16 h, u16 d, u16 mipmaps, VkImageType type, VkImageCreateFlags create_flags, VkImageUsageFlags usage, VkSharingMode sharing);
protected:
vk::image_view* create_temporary_subresource_view_impl(vk::command_buffer& cmd, vk::image* source, VkImageType image_type, VkImageViewType view_type,
u32 gcm_format, u16 x, u16 y, u16 w, u16 h, u16 d, u8 mips, const rsx::texture_channel_remap_t& remap_vector, bool copy);
vk::image_view* create_temporary_subresource_view(vk::command_buffer& cmd, vk::image* source, u32 gcm_format,
u16 x, u16 y, u16 w, u16 h, const rsx::texture_channel_remap_t& remap_vector) override;
vk::image_view* create_temporary_subresource_view(vk::command_buffer& cmd, vk::image** source, u32 gcm_format,
u16 x, u16 y, u16 w, u16 h, const rsx::texture_channel_remap_t& remap_vector) override;
vk::image_view* generate_cubemap_from_images(vk::command_buffer& cmd, u32 gcm_format, u16 size,
const std::vector<copy_region_descriptor>& sections_to_copy, const rsx::texture_channel_remap_t& remap_vector) override;
vk::image_view* generate_3d_from_2d_images(vk::command_buffer& cmd, u32 gcm_format, u16 width, u16 height, u16 depth,
const std::vector<copy_region_descriptor>& sections_to_copy, const rsx::texture_channel_remap_t& remap_vector) override;
vk::image_view* generate_atlas_from_images(vk::command_buffer& cmd, u32 gcm_format, u16 width, u16 height,
const std::vector<copy_region_descriptor>& sections_to_copy, const rsx::texture_channel_remap_t& remap_vector) override;
vk::image_view* generate_2d_mipmaps_from_images(vk::command_buffer& cmd, u32 gcm_format, u16 width, u16 height,
const std::vector<copy_region_descriptor>& sections_to_copy, const rsx::texture_channel_remap_t& remap_vector) override;
void release_temporary_subresource(vk::image_view* view) override;
void update_image_contents(vk::command_buffer& cmd, vk::image_view* dst_view, vk::image* src, u16 width, u16 height) override;
cached_texture_section* create_new_texture(vk::command_buffer& cmd, const utils::address_range& rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch,
u32 gcm_format, rsx::texture_upload_context context, rsx::texture_dimension_extended type, bool swizzled, rsx::component_order swizzle_flags, rsx::flags32_t flags) override;
cached_texture_section* create_nul_section(vk::command_buffer& cmd, const utils::address_range& rsx_range, const rsx::image_section_attributes_t& attrs,
const rsx::GCM_tile_reference& tile, bool memory_load) override;
cached_texture_section* upload_image_from_cpu(vk::command_buffer& cmd, const utils::address_range& rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch, u32 gcm_format,
rsx::texture_upload_context context, const std::vector<rsx::subresource_layout>& subresource_layout, rsx::texture_dimension_extended type, bool swizzled) override;
void set_component_order(cached_texture_section& section, u32 gcm_format, rsx::component_order expected_flags) override;
void insert_texture_barrier(vk::command_buffer& cmd, vk::image* tex, bool strong_ordering) override;
bool render_target_format_is_compatible(vk::image* tex, u32 gcm_format) override;
void prepare_for_dma_transfers(vk::command_buffer& cmd) override;
void cleanup_after_dma_transfers(vk::command_buffer& cmd) override;
public:
using baseclass::texture_cache;
void initialize(vk::render_device& device, VkQueue submit_queue, vk::data_heap& upload_heap);
void destroy() override;
std::unique_ptr<vk::viewable_image> create_temporary_subresource_storage(
rsx::format_class format_class, VkFormat format,
u16 width, u16 height, u16 depth, u16 layers, u8 mips,
VkImageType image_type, VkFlags image_flags, VkFlags usage_flags);
void dispose_reusable_image(std::unique_ptr<vk::viewable_image>& tex);
bool is_depth_texture(u32 rsx_address, u32 rsx_size) override;
void on_frame_end() override;
vk::viewable_image* upload_image_simple(vk::command_buffer& cmd, VkFormat format, u32 address, u32 width, u32 height, u32 pitch);
bool blit(const rsx::blit_src_info& src, const rsx::blit_dst_info& dst, bool interpolate, vk::surface_cache& m_rtts, vk::command_buffer& cmd);
u32 get_unreleased_textures_count() const override;
bool handle_memory_pressure(rsx::problem_severity severity) override;
u64 get_temporary_memory_in_use() const;
bool is_overallocated() const;
};
}
| 17,444
|
C++
|
.h
| 416
| 37.920673
| 206
| 0.709573
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,086
|
nearest_pass.hpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/upscalers/nearest_pass.hpp
|
#pragma once
#include "upscaling.h"
namespace vk
{
struct nearest_upscale_pass : public upscaler
{
vk::viewable_image* scale_output(
const vk::command_buffer& cmd, // CB
vk::viewable_image* src, // Source input
VkImage present_surface, // Present target. May be VK_NULL_HANDLE for some passes
VkImageLayout present_surface_layout, // Present surface layout, or VK_IMAGE_LAYOUT_UNDEFINED if no present target is provided
const VkImageBlit& request, // Scaling request information
rsx::flags32_t mode // Mode
) override
{
if (mode & UPSCALE_AND_COMMIT)
{
ensure(present_surface);
src->push_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
vkCmdBlitImage(cmd, src->value, src->current_layout, present_surface, present_surface_layout, 1, &request, VK_FILTER_NEAREST);
src->pop_layout(cmd);
return nullptr;
}
// Upscaling source only is unsupported
return src;
}
};
}
| 1,006
|
C++
|
.h
| 28
| 32.5
| 131
| 0.672485
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
6,087
|
upscaling.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/upscalers/upscaling.h
|
#pragma once
#include "util/types.hpp"
#include "../vkutils/commands.h"
#include "../vkutils/image.h"
namespace vk
{
namespace upscaling_flags_
{
enum upscaling_flags
{
UPSCALE_DEFAULT_VIEW = (1 << 0),
UPSCALE_LEFT_VIEW = (1 << 0),
UPSCALE_RIGHT_VIEW = (1 << 1),
UPSCALE_AND_COMMIT = (1 << 2)
};
}
using namespace upscaling_flags_;
struct upscaler
{
virtual ~upscaler() {}
virtual vk::viewable_image* scale_output(
const vk::command_buffer& cmd, // CB
vk::viewable_image* src, // Source input
VkImage present_surface, // Present target. May be VK_NULL_HANDLE for some passes
VkImageLayout present_surface_layout, // Present surface layout, or VK_IMAGE_LAYOUT_UNDEFINED if no present target is provided
const VkImageBlit& request, // Scaling request information
rsx::flags32_t mode // Mode
) = 0;
};
}
| 972
|
C++
|
.h
| 30
| 28.366667
| 132
| 0.62043
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,088
|
fsr_pass.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/upscalers/fsr_pass.h
|
#pragma once
#include "../vkutils/sampler.h"
#include "../VKCompute.h"
#include "upscaling.h"
namespace vk
{
namespace FidelityFX
{
class fsr_pass : public compute_task
{
protected:
std::unique_ptr<vk::sampler> m_sampler;
const vk::image_view* m_input_image = nullptr;
const vk::image_view* m_output_image = nullptr;
size2u m_input_size;
size2u m_output_size;
u32 m_constants_buf[20];
std::vector<std::pair<VkDescriptorType, u8>> get_descriptor_layout() override;
void declare_inputs() override;
void bind_resources() override;
virtual void configure(const vk::command_buffer& cmd) = 0;
public:
fsr_pass(const std::string& config_definitions, u32 push_constants_size_);
void run(const vk::command_buffer& cmd, vk::viewable_image* src, vk::viewable_image* dst, const size2u& input_size, const size2u& output_size);
};
class easu_pass : public fsr_pass
{
void configure(const vk::command_buffer& cmd) override;
public:
easu_pass();
};
class rcas_pass : public fsr_pass
{
void configure(const vk::command_buffer& cmd) override;
public:
rcas_pass();
};
}
class fsr_upscale_pass : public upscaler
{
std::unique_ptr<vk::viewable_image> m_output_left;
std::unique_ptr<vk::viewable_image> m_output_right;
std::unique_ptr<vk::viewable_image> m_intermediate_data;
void dispose_images();
void initialize_image(u32 output_w, u32 output_h, rsx::flags32_t mode);
public:
vk::viewable_image* scale_output(
const vk::command_buffer& cmd, // CB
vk::viewable_image* src, // Source input
VkImage present_surface, // Present target. May be VK_NULL_HANDLE for some passes
VkImageLayout present_surface_layout, // Present surface layout, or VK_IMAGE_LAYOUT_UNDEFINED if no present target is provided
const VkImageBlit& request, // Scaling request information
rsx::flags32_t mode // Mode
) override;
};
}
| 2,050
|
C++
|
.h
| 56
| 32.125
| 147
| 0.676321
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,089
|
bilinear_pass.hpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/upscalers/bilinear_pass.hpp
|
#pragma once
#include "upscaling.h"
namespace vk
{
struct bilinear_upscale_pass : public upscaler
{
vk::viewable_image* scale_output(
const vk::command_buffer& cmd, // CB
vk::viewable_image* src, // Source input
VkImage present_surface, // Present target. May be VK_NULL_HANDLE for some passes
VkImageLayout present_surface_layout, // Present surface layout, or VK_IMAGE_LAYOUT_UNDEFINED if no present target is provided
const VkImageBlit& request, // Scaling request information
rsx::flags32_t mode // Mode
) override
{
if (mode & UPSCALE_AND_COMMIT)
{
ensure(present_surface);
src->push_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
vkCmdBlitImage(cmd, src->value, src->current_layout, present_surface, present_surface_layout, 1, &request, VK_FILTER_LINEAR);
src->pop_layout(cmd);
return nullptr;
}
// Upscaling source only is unsupported
return src;
}
};
}
| 1,038
|
C++
|
.h
| 28
| 32.5
| 132
| 0.653693
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
6,090
|
swapchain.hpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/swapchain.hpp
|
#pragma once
#ifdef HAVE_X11
#include <X11/Xutil.h>
#endif
#include "../../display.h"
#include "../VulkanAPI.h"
#include "image.h"
#include <memory>
namespace vk
{
struct swapchain_image_WSI
{
VkImage value = VK_NULL_HANDLE;
};
class swapchain_image_RPCS3 : public image
{
std::unique_ptr<buffer> m_dma_buffer;
u32 m_width = 0;
u32 m_height = 0;
public:
swapchain_image_RPCS3(render_device& dev, const memory_type_mapping& memory_map, u32 width, u32 height)
:image(dev, memory_map.device_local, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, VK_IMAGE_TYPE_2D, VK_FORMAT_B8G8R8A8_UNORM, width, height, 1, 1, 1,
VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, 0, VMM_ALLOCATION_POOL_SWAPCHAIN)
{
m_width = width;
m_height = height;
current_layout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
m_dma_buffer = std::make_unique<buffer>(dev, m_width * m_height * 4, memory_map.host_visible_coherent,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_BUFFER_USAGE_TRANSFER_DST_BIT, 0, VMM_ALLOCATION_POOL_SWAPCHAIN);
}
void do_dma_transfer(command_buffer& cmd)
{
VkBufferImageCopy copyRegion = {};
copyRegion.bufferOffset = 0;
copyRegion.bufferRowLength = m_width;
copyRegion.bufferImageHeight = m_height;
copyRegion.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
copyRegion.imageOffset = {};
copyRegion.imageExtent = { m_width, m_height, 1 };
change_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
vkCmdCopyImageToBuffer(cmd, value, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, m_dma_buffer->value, 1, ©Region);
change_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
}
u32 get_required_memory_size() const
{
return m_width * m_height * 4;
}
void* get_pixels()
{
return m_dma_buffer->map(0, VK_WHOLE_SIZE);
}
void free_pixels()
{
m_dma_buffer->unmap();
}
};
class swapchain_base
{
protected:
render_device dev;
display_handle_t window_handle{};
u32 m_width = 0;
u32 m_height = 0;
VkFormat m_surface_format = VK_FORMAT_B8G8R8A8_UNORM;
virtual void init_swapchain_images(render_device& dev, u32 count) = 0;
public:
swapchain_base(physical_device& gpu, u32 present_queue, u32 graphics_queue, u32 transfer_queue, VkFormat format = VK_FORMAT_B8G8R8A8_UNORM)
{
dev.create(gpu, graphics_queue, present_queue, transfer_queue);
m_surface_format = format;
}
virtual ~swapchain_base() = default;
virtual void create(display_handle_t& handle) = 0;
virtual void destroy(bool full = true) = 0;
virtual bool init() = 0;
virtual u32 get_swap_image_count() const = 0;
virtual VkImage get_image(u32 index) = 0;
virtual VkResult acquire_next_swapchain_image(VkSemaphore semaphore, u64 timeout, u32* result) = 0;
virtual void end_frame(command_buffer& cmd, u32 index) = 0;
virtual VkResult present(VkSemaphore semaphore, u32 index) = 0;
virtual VkImageLayout get_optimal_present_layout() = 0;
virtual bool supports_automatic_wm_reports() const
{
return false;
}
bool init(u32 w, u32 h)
{
m_width = w;
m_height = h;
return init();
}
const vk::render_device& get_device()
{
return dev;
}
VkFormat get_surface_format()
{
return m_surface_format;
}
bool is_headless() const
{
return (dev.get_present_queue() == VK_NULL_HANDLE);
}
};
template<typename T>
class abstract_swapchain_impl : public swapchain_base
{
protected:
std::vector<T> swapchain_images;
public:
abstract_swapchain_impl(physical_device& gpu, u32 present_queue, u32 graphics_queue, u32 transfer_queue, VkFormat format = VK_FORMAT_B8G8R8A8_UNORM)
: swapchain_base(gpu, present_queue, graphics_queue, transfer_queue, format)
{}
~abstract_swapchain_impl() override = default;
u32 get_swap_image_count() const override
{
return ::size32(swapchain_images);
}
using swapchain_base::init;
};
using native_swapchain_base = abstract_swapchain_impl<std::pair<bool, std::unique_ptr<swapchain_image_RPCS3>>>;
using WSI_swapchain_base = abstract_swapchain_impl<swapchain_image_WSI>;
#ifdef _WIN32
class swapchain_WIN32 : public native_swapchain_base
{
HDC hDstDC = NULL;
HDC hSrcDC = NULL;
HBITMAP hDIB = NULL;
LPVOID hPtr = NULL;
public:
swapchain_WIN32(physical_device& gpu, u32 present_queue, u32 graphics_queue, u32 transfer_queue, VkFormat format = VK_FORMAT_B8G8R8A8_UNORM)
: native_swapchain_base(gpu, present_queue, graphics_queue, transfer_queue, format)
{}
~swapchain_WIN32() {}
bool init() override
{
if (hDIB || hSrcDC)
destroy(false);
RECT rect;
GetClientRect(window_handle, &rect);
m_width = rect.right - rect.left;
m_height = rect.bottom - rect.top;
if (m_width == 0 || m_height == 0)
{
rsx_log.error("Invalid window dimensions %d x %d", m_width, m_height);
return false;
}
BITMAPINFO bitmap = {};
bitmap.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
bitmap.bmiHeader.biWidth = m_width;
bitmap.bmiHeader.biHeight = m_height * -1;
bitmap.bmiHeader.biPlanes = 1;
bitmap.bmiHeader.biBitCount = 32;
bitmap.bmiHeader.biCompression = BI_RGB;
hSrcDC = CreateCompatibleDC(hDstDC);
hDIB = CreateDIBSection(hSrcDC, &bitmap, DIB_RGB_COLORS, &hPtr, NULL, 0);
SelectObject(hSrcDC, hDIB);
init_swapchain_images(dev, 3);
return true;
}
void create(display_handle_t& handle) override
{
window_handle = handle;
hDstDC = GetDC(handle);
}
void destroy(bool full = true) override
{
DeleteObject(hDIB);
DeleteDC(hSrcDC);
hDIB = NULL;
hSrcDC = NULL;
swapchain_images.clear();
if (full)
{
ReleaseDC(window_handle, hDstDC);
hDstDC = NULL;
dev.destroy();
}
}
VkResult present(VkSemaphore /*semaphore*/, u32 image) override
{
auto& src = swapchain_images[image];
GdiFlush();
if (hSrcDC)
{
memcpy(hPtr, src.second->get_pixels(), src.second->get_required_memory_size());
BitBlt(hDstDC, 0, 0, m_width, m_height, hSrcDC, 0, 0, SRCCOPY);
src.second->free_pixels();
}
src.first = false;
return VK_SUCCESS;
}
#elif defined(__APPLE__)
class swapchain_MacOS : public native_swapchain_base
{
void* nsView = nullptr;
public:
swapchain_MacOS(physical_device& gpu, u32 present_queue, u32 graphics_queue, u32 transfer_queue, VkFormat format = VK_FORMAT_B8G8R8A8_UNORM)
: native_swapchain_base(gpu, present_queue, graphics_queue, transfer_queue, format)
{}
~swapchain_MacOS() {}
bool init() override
{
//TODO: get from `nsView`
m_width = 0;
m_height = 0;
if (m_width == 0 || m_height == 0)
{
rsx_log.error("Invalid window dimensions %d x %d", m_width, m_height);
return false;
}
init_swapchain_images(dev, 3);
return true;
}
void create(display_handle_t& window_handle) override
{
nsView = window_handle;
}
void destroy(bool full = true) override
{
swapchain_images.clear();
if (full)
dev.destroy();
}
VkResult present(VkSemaphore /*semaphore*/, u32 /*index*/) override
{
fmt::throw_exception("Native macOS swapchain is not implemented yet!");
}
#elif defined(HAVE_X11)
class swapchain_X11 : public native_swapchain_base
{
Display* display = nullptr;
Window window = 0;
XImage* pixmap = nullptr;
GC gc = nullptr;
int bit_depth = 24;
public:
swapchain_X11(physical_device& gpu, u32 present_queue, u32 graphics_queue, u32 transfer_queue, VkFormat format = VK_FORMAT_B8G8R8A8_UNORM)
: native_swapchain_base(gpu, present_queue, graphics_queue, transfer_queue, format)
{}
~swapchain_X11() override = default;
bool init() override
{
if (pixmap)
destroy(false);
Window root;
int x, y;
u32 w = 0, h = 0, border, depth;
if (XGetGeometry(display, window, &root, &x, &y, &w, &h, &border, &depth))
{
m_width = w;
m_height = h;
bit_depth = depth;
}
if (m_width == 0 || m_height == 0)
{
rsx_log.error("Invalid window dimensions %d x %d", m_width, m_height);
return false;
}
XVisualInfo visual{};
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wold-style-cast"
if (!XMatchVisualInfo(display, DefaultScreen(display), bit_depth, TrueColor, &visual))
#pragma GCC diagnostic pop
{
rsx_log.error("Could not find matching visual info!");
return false;
}
pixmap = XCreateImage(display, visual.visual, visual.depth, ZPixmap, 0, nullptr, m_width, m_height, 32, 0);
init_swapchain_images(dev, 3);
return true;
}
void create(display_handle_t& window_handle) override
{
std::visit([&](auto&& p)
{
using T = std::decay_t<decltype(p)>;
if constexpr (std::is_same_v<T, std::pair<Display*, Window>>)
{
display = p.first;
window = p.second;
}
}, window_handle);
if (display == NULL)
{
rsx_log.fatal("Could not create virtual display on this window protocol (Wayland?)");
return;
}
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wold-style-cast"
gc = DefaultGC(display, DefaultScreen(display));
#pragma GCC diagnostic pop
}
void destroy(bool full = true) override
{
pixmap->data = nullptr;
XDestroyImage(pixmap);
pixmap = NULL;
swapchain_images.clear();
if (full)
dev.destroy();
}
VkResult present(VkSemaphore /*semaphore*/, u32 index) override
{
auto& src = swapchain_images[index];
if (pixmap)
{
pixmap->data = static_cast<char*>(src.second->get_pixels());
XPutImage(display, window, gc, pixmap, 0, 0, 0, 0, m_width, m_height);
XFlush(display);
src.second->free_pixels();
}
//Release reference
src.first = false;
return VK_SUCCESS;
}
#else
class swapchain_Wayland : public native_swapchain_base
{
public:
swapchain_Wayland(physical_device& gpu, u32 present_queue, u32 graphics_queue, u32 transfer_queue, VkFormat format = VK_FORMAT_B8G8R8A8_UNORM)
: native_swapchain_base(gpu, present_queue, graphics_queue, transfer_queue, format)
{}
~swapchain_Wayland() {}
bool init() override
{
fmt::throw_exception("Native Wayland swapchain is not implemented yet!");
}
void create(display_handle_t& window_handle) override
{
fmt::throw_exception("Native Wayland swapchain is not implemented yet!");
}
void destroy(bool full = true) override
{
fmt::throw_exception("Native Wayland swapchain is not implemented yet!");
}
VkResult present(VkSemaphore /*semaphore*/, u32 index) override
{
fmt::throw_exception("Native Wayland swapchain is not implemented yet!");
}
#endif
VkResult acquire_next_swapchain_image(VkSemaphore /*semaphore*/, u64 /*timeout*/, u32* result) override
{
u32 index = 0;
for (auto& p : swapchain_images)
{
if (!p.first)
{
p.first = true;
*result = index;
return VK_SUCCESS;
}
++index;
}
return VK_NOT_READY;
}
void end_frame(command_buffer& cmd, u32 index) override
{
swapchain_images[index].second->do_dma_transfer(cmd);
}
VkImage get_image(u32 index) override
{
return swapchain_images[index].second->value;
}
VkImageLayout get_optimal_present_layout() override
{
return VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
}
protected:
void init_swapchain_images(render_device& dev, u32 preferred_count) override
{
swapchain_images.resize(preferred_count);
for (auto& img : swapchain_images)
{
img.second = std::make_unique<swapchain_image_RPCS3>(dev, dev.get_memory_mapping(), m_width, m_height);
img.first = false;
}
}
};
class swapchain_WSI : public WSI_swapchain_base
{
VkSurfaceKHR m_surface = VK_NULL_HANDLE;
VkColorSpaceKHR m_color_space = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
VkSwapchainKHR m_vk_swapchain = nullptr;
PFN_vkCreateSwapchainKHR _vkCreateSwapchainKHR = nullptr;
PFN_vkDestroySwapchainKHR _vkDestroySwapchainKHR = nullptr;
PFN_vkGetSwapchainImagesKHR _vkGetSwapchainImagesKHR = nullptr;
PFN_vkAcquireNextImageKHR _vkAcquireNextImageKHR = nullptr;
PFN_vkQueuePresentKHR _vkQueuePresentKHR = nullptr;
bool m_wm_reports_flag = false;
protected:
void init_swapchain_images(render_device& dev, u32 /*preferred_count*/ = 0) override
{
u32 nb_swap_images = 0;
_vkGetSwapchainImagesKHR(dev, m_vk_swapchain, &nb_swap_images, nullptr);
if (!nb_swap_images) fmt::throw_exception("Driver returned 0 images for swapchain");
std::vector<VkImage> vk_images;
vk_images.resize(nb_swap_images);
_vkGetSwapchainImagesKHR(dev, m_vk_swapchain, &nb_swap_images, vk_images.data());
swapchain_images.resize(nb_swap_images);
for (u32 i = 0; i < nb_swap_images; ++i)
{
swapchain_images[i].value = vk_images[i];
}
}
public:
swapchain_WSI(vk::physical_device& gpu, u32 present_queue, u32 graphics_queue, u32 transfer_queue, VkFormat format, VkSurfaceKHR surface, VkColorSpaceKHR color_space, bool force_wm_reporting_off)
: WSI_swapchain_base(gpu, present_queue, graphics_queue, transfer_queue, format)
{
_vkCreateSwapchainKHR = reinterpret_cast<PFN_vkCreateSwapchainKHR>(vkGetDeviceProcAddr(dev, "vkCreateSwapchainKHR"));
_vkDestroySwapchainKHR = reinterpret_cast<PFN_vkDestroySwapchainKHR>(vkGetDeviceProcAddr(dev, "vkDestroySwapchainKHR"));
_vkGetSwapchainImagesKHR = reinterpret_cast<PFN_vkGetSwapchainImagesKHR>(vkGetDeviceProcAddr(dev, "vkGetSwapchainImagesKHR"));
_vkAcquireNextImageKHR = reinterpret_cast<PFN_vkAcquireNextImageKHR>(vkGetDeviceProcAddr(dev, "vkAcquireNextImageKHR"));
_vkQueuePresentKHR = reinterpret_cast<PFN_vkQueuePresentKHR>(vkGetDeviceProcAddr(dev, "vkQueuePresentKHR"));
m_surface = surface;
m_color_space = color_space;
if (!force_wm_reporting_off)
{
switch (gpu.get_driver_vendor())
{
case driver_vendor::AMD:
case driver_vendor::INTEL:
case driver_vendor::RADV:
case driver_vendor::MVK:
break;
case driver_vendor::ANV:
case driver_vendor::NVIDIA:
m_wm_reports_flag = true;
break;
default:
break;
}
}
}
~swapchain_WSI() override = default;
void create(display_handle_t&) override
{}
void destroy(bool = true) override
{
if (VkDevice pdev = dev)
{
if (m_vk_swapchain)
{
_vkDestroySwapchainKHR(pdev, m_vk_swapchain, nullptr);
}
dev.destroy();
}
}
std::pair<VkSurfaceCapabilitiesKHR, bool> init_surface_capabilities()
{
#ifdef _WIN32
if (g_cfg.video.vk.exclusive_fullscreen_mode != vk_exclusive_fs_mode::unspecified && dev.get_surface_capabilities_2_support())
{
HMONITOR hmonitor = MonitorFromWindow(window_handle, MONITOR_DEFAULTTOPRIMARY);
if (hmonitor)
{
VkSurfaceCapabilities2KHR pSurfaceCapabilities = {};
pSurfaceCapabilities.sType = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR;
VkPhysicalDeviceSurfaceInfo2KHR pSurfaceInfo = {};
pSurfaceInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR;
pSurfaceInfo.surface = m_surface;
VkSurfaceCapabilitiesFullScreenExclusiveEXT full_screen_exclusive_capabilities = {};
VkSurfaceFullScreenExclusiveWin32InfoEXT full_screen_exclusive_win32_info = {};
full_screen_exclusive_capabilities.sType = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_FULL_SCREEN_EXCLUSIVE_EXT;
pSurfaceCapabilities.pNext = &full_screen_exclusive_capabilities;
full_screen_exclusive_win32_info.sType = VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_WIN32_INFO_EXT;
full_screen_exclusive_win32_info.hmonitor = hmonitor;
pSurfaceInfo.pNext = &full_screen_exclusive_win32_info;
auto getPhysicalDeviceSurfaceCapabilities2KHR = reinterpret_cast<PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR>(
vkGetInstanceProcAddr(dev.gpu(), "vkGetPhysicalDeviceSurfaceCapabilities2KHR")
);
ensure(getPhysicalDeviceSurfaceCapabilities2KHR);
CHECK_RESULT(getPhysicalDeviceSurfaceCapabilities2KHR(dev.gpu(), &pSurfaceInfo, &pSurfaceCapabilities));
return { pSurfaceCapabilities.surfaceCapabilities, !!full_screen_exclusive_capabilities.fullScreenExclusiveSupported };
}
else
{
rsx_log.warning("Swapchain: failed to get monitor for the window");
}
}
#endif
VkSurfaceCapabilitiesKHR surface_descriptors = {};
CHECK_RESULT(vkGetPhysicalDeviceSurfaceCapabilitiesKHR(dev.gpu(), m_surface, &surface_descriptors));
return { surface_descriptors, false };
}
using WSI_swapchain_base::init;
bool init() override
{
if (dev.get_present_queue() == VK_NULL_HANDLE)
{
rsx_log.error("Cannot create WSI swapchain without a present queue");
return false;
}
VkSwapchainKHR old_swapchain = m_vk_swapchain;
vk::physical_device& gpu = const_cast<vk::physical_device&>(dev.gpu());
auto [surface_descriptors, should_specify_exclusive_full_screen_mode] = init_surface_capabilities();
if (surface_descriptors.maxImageExtent.width < m_width ||
surface_descriptors.maxImageExtent.height < m_height)
{
rsx_log.error("Swapchain: Swapchain creation failed because dimensions cannot fit. Max = %d, %d, Requested = %d, %d",
surface_descriptors.maxImageExtent.width, surface_descriptors.maxImageExtent.height, m_width, m_height);
return false;
}
if (surface_descriptors.currentExtent.width != umax)
{
if (surface_descriptors.currentExtent.width == 0 || surface_descriptors.currentExtent.height == 0)
{
rsx_log.warning("Swapchain: Current surface extent is a null region. Is the window minimized?");
return false;
}
m_width = surface_descriptors.currentExtent.width;
m_height = surface_descriptors.currentExtent.height;
}
u32 nb_available_modes = 0;
CHECK_RESULT(vkGetPhysicalDeviceSurfacePresentModesKHR(gpu, m_surface, &nb_available_modes, nullptr));
std::vector<VkPresentModeKHR> present_modes(nb_available_modes);
CHECK_RESULT(vkGetPhysicalDeviceSurfacePresentModesKHR(gpu, m_surface, &nb_available_modes, present_modes.data()));
VkPresentModeKHR swapchain_present_mode = VK_PRESENT_MODE_FIFO_KHR;
std::vector<VkPresentModeKHR> preferred_modes;
if (!g_cfg.video.vk.force_fifo)
{
// List of preferred modes in decreasing desirability
// NOTE: Always picks "triple-buffered vsync" types if possible
if (!g_cfg.video.vsync)
{
preferred_modes = { VK_PRESENT_MODE_IMMEDIATE_KHR, VK_PRESENT_MODE_MAILBOX_KHR, VK_PRESENT_MODE_FIFO_RELAXED_KHR };
}
}
bool mode_found = false;
for (VkPresentModeKHR preferred_mode : preferred_modes)
{
//Search for this mode in supported modes
for (VkPresentModeKHR mode : present_modes)
{
if (mode == preferred_mode)
{
swapchain_present_mode = mode;
mode_found = true;
break;
}
}
if (mode_found)
break;
}
rsx_log.notice("Swapchain: present mode %d in use.", static_cast<int>(swapchain_present_mode));
u32 nb_swap_images = surface_descriptors.minImageCount + 1;
if (surface_descriptors.maxImageCount > 0)
{
//Try to negotiate for a triple buffer setup
//In cases where the front-buffer isnt available for present, its better to have a spare surface
nb_swap_images = std::max(surface_descriptors.minImageCount + 2u, 3u);
if (nb_swap_images > surface_descriptors.maxImageCount)
{
// Application must settle for fewer images than desired:
nb_swap_images = surface_descriptors.maxImageCount;
}
}
VkSurfaceTransformFlagBitsKHR pre_transform = surface_descriptors.currentTransform;
if (surface_descriptors.supportedTransforms & VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR)
pre_transform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
VkSwapchainCreateInfoKHR swap_info = {};
swap_info.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
swap_info.surface = m_surface;
swap_info.minImageCount = nb_swap_images;
swap_info.imageFormat = m_surface_format;
swap_info.imageColorSpace = m_color_space;
swap_info.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
swap_info.preTransform = pre_transform;
swap_info.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
swap_info.imageArrayLayers = 1;
swap_info.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
swap_info.presentMode = swapchain_present_mode;
swap_info.oldSwapchain = old_swapchain;
swap_info.clipped = true;
swap_info.imageExtent.width = std::max(m_width, surface_descriptors.minImageExtent.width);
swap_info.imageExtent.height = std::max(m_height, surface_descriptors.minImageExtent.height);
#ifdef _WIN32
VkSurfaceFullScreenExclusiveInfoEXT full_screen_exclusive_info = {};
if (should_specify_exclusive_full_screen_mode)
{
vk_exclusive_fs_mode fs_mode = g_cfg.video.vk.exclusive_fullscreen_mode;
ensure(fs_mode == vk_exclusive_fs_mode::enable || fs_mode == vk_exclusive_fs_mode::disable);
full_screen_exclusive_info.sType = VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_INFO_EXT;
full_screen_exclusive_info.fullScreenExclusive =
fs_mode == vk_exclusive_fs_mode::enable ? VK_FULL_SCREEN_EXCLUSIVE_ALLOWED_EXT : VK_FULL_SCREEN_EXCLUSIVE_DISALLOWED_EXT;
swap_info.pNext = &full_screen_exclusive_info;
}
rsx_log.notice("Swapchain: requesting full screen exclusive mode %d.", static_cast<int>(full_screen_exclusive_info.fullScreenExclusive));
#endif
_vkCreateSwapchainKHR(dev, &swap_info, nullptr, &m_vk_swapchain);
if (old_swapchain)
{
if (!swapchain_images.empty())
{
swapchain_images.clear();
}
_vkDestroySwapchainKHR(dev, old_swapchain, nullptr);
}
init_swapchain_images(dev);
return true;
}
bool supports_automatic_wm_reports() const override
{
return m_wm_reports_flag;
}
VkResult acquire_next_swapchain_image(VkSemaphore semaphore, u64 timeout, u32* result) override
{
return vkAcquireNextImageKHR(dev, m_vk_swapchain, timeout, semaphore, VK_NULL_HANDLE, result);
}
void end_frame(command_buffer& /*cmd*/, u32 /*index*/) override
{
}
VkResult present(VkSemaphore semaphore, u32 image) override
{
VkPresentInfoKHR present = {};
present.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
present.pNext = nullptr;
present.swapchainCount = 1;
present.pSwapchains = &m_vk_swapchain;
present.pImageIndices = ℑ
if (semaphore != VK_NULL_HANDLE)
{
present.waitSemaphoreCount = 1;
present.pWaitSemaphores = &semaphore;
}
return _vkQueuePresentKHR(dev.get_present_queue(), &present);
}
VkImage get_image(u32 index) override
{
return swapchain_images[index].value;
}
VkImageLayout get_optimal_present_layout() override
{
return VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
}
};
}
| 23,673
|
C++
|
.h
| 638
| 31.865204
| 198
| 0.697216
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
6,091
|
graphics_pipeline_state.hpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/graphics_pipeline_state.hpp
|
#pragma once
#include "../VulkanAPI.h"
namespace vk
{
class graphics_pipeline_state
{
public:
VkPipelineInputAssemblyStateCreateInfo ia;
VkPipelineDepthStencilStateCreateInfo ds;
VkPipelineColorBlendAttachmentState att_state[4];
VkPipelineColorBlendStateCreateInfo cs;
VkPipelineRasterizationStateCreateInfo rs;
VkPipelineMultisampleStateCreateInfo ms;
struct extra_parameters
{
VkSampleMask msaa_sample_mask;
}
temp_storage;
graphics_pipeline_state()
{
// NOTE: Vk** structs have padding bytes
memset(this, 0, sizeof(graphics_pipeline_state));
ia.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
cs.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
ds.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
rs.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rs.polygonMode = VK_POLYGON_MODE_FILL;
rs.cullMode = VK_CULL_MODE_NONE;
rs.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
rs.lineWidth = 1.f;
ms.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
ms.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
temp_storage.msaa_sample_mask = 0xFFFFFFFF;
}
graphics_pipeline_state(const graphics_pipeline_state& other)
{
// NOTE: Vk** structs have padding bytes
memcpy(this, &other, sizeof(graphics_pipeline_state));
if (other.cs.pAttachments == other.att_state)
{
// Rebase pointer
cs.pAttachments = att_state;
}
}
~graphics_pipeline_state() = default;
graphics_pipeline_state& operator = (const graphics_pipeline_state& other)
{
if (this != &other)
{
// NOTE: Vk** structs have padding bytes
memcpy(this, &other, sizeof(graphics_pipeline_state));
if (other.cs.pAttachments == other.att_state)
{
// Rebase pointer
cs.pAttachments = att_state;
}
}
return *this;
}
void set_primitive_type(VkPrimitiveTopology type)
{
ia.topology = type;
}
void enable_primitive_restart(VkBool32 enable = VK_TRUE)
{
ia.primitiveRestartEnable = enable;
}
void set_color_mask(int index, bool r, bool g, bool b, bool a)
{
VkColorComponentFlags mask = 0;
if (a) mask |= VK_COLOR_COMPONENT_A_BIT;
if (b) mask |= VK_COLOR_COMPONENT_B_BIT;
if (g) mask |= VK_COLOR_COMPONENT_G_BIT;
if (r) mask |= VK_COLOR_COMPONENT_R_BIT;
att_state[index].colorWriteMask = mask;
}
void set_depth_mask(bool enable)
{
ds.depthWriteEnable = enable ? VK_TRUE : VK_FALSE;
}
void set_stencil_mask(u32 mask)
{
ds.front.writeMask = mask;
ds.back.writeMask = mask;
}
void set_stencil_mask_separate(int face, u32 mask)
{
if (!face)
ds.front.writeMask = mask;
else
ds.back.writeMask = mask;
}
void enable_depth_test(VkCompareOp op)
{
ds.depthTestEnable = VK_TRUE;
ds.depthCompareOp = op;
}
void enable_depth_clamp(bool enable = true)
{
rs.depthClampEnable = enable ? VK_TRUE : VK_FALSE;
}
void enable_depth_bias(bool enable = true)
{
rs.depthBiasEnable = enable ? VK_TRUE : VK_FALSE;
}
void enable_depth_bounds_test(bool enable = true)
{
ds.depthBoundsTestEnable = enable? VK_TRUE : VK_FALSE;
}
void enable_blend(int mrt_index,
VkBlendFactor src_factor_rgb, VkBlendFactor src_factor_a,
VkBlendFactor dst_factor_rgb, VkBlendFactor dst_factor_a,
VkBlendOp equation_rgb, VkBlendOp equation_a)
{
att_state[mrt_index].srcColorBlendFactor = src_factor_rgb;
att_state[mrt_index].srcAlphaBlendFactor = src_factor_a;
att_state[mrt_index].dstColorBlendFactor = dst_factor_rgb;
att_state[mrt_index].dstAlphaBlendFactor = dst_factor_a;
att_state[mrt_index].colorBlendOp = equation_rgb;
att_state[mrt_index].alphaBlendOp = equation_a;
att_state[mrt_index].blendEnable = VK_TRUE;
}
void enable_stencil_test(VkStencilOp fail, VkStencilOp zfail, VkStencilOp pass,
VkCompareOp func, u32 func_mask, u32 ref)
{
ds.front.failOp = fail;
ds.front.passOp = pass;
ds.front.depthFailOp = zfail;
ds.front.compareOp = func;
ds.front.compareMask = func_mask;
ds.front.reference = ref;
ds.back = ds.front;
ds.stencilTestEnable = VK_TRUE;
}
void enable_stencil_test_separate(int face, VkStencilOp fail, VkStencilOp zfail, VkStencilOp pass,
VkCompareOp func, u32 func_mask, u32 ref)
{
auto& face_props = (face ? ds.back : ds.front);
face_props.failOp = fail;
face_props.passOp = pass;
face_props.depthFailOp = zfail;
face_props.compareOp = func;
face_props.compareMask = func_mask;
face_props.reference = ref;
ds.stencilTestEnable = VK_TRUE;
}
void enable_logic_op(VkLogicOp op)
{
cs.logicOpEnable = VK_TRUE;
cs.logicOp = op;
}
void enable_cull_face(VkCullModeFlags cull_mode)
{
rs.cullMode = cull_mode;
}
void set_front_face(VkFrontFace face)
{
rs.frontFace = face;
}
void set_attachment_count(u32 count)
{
cs.attachmentCount = count;
cs.pAttachments = att_state;
}
void set_multisample_state(u8 sample_count, u32 sample_mask, bool msaa_enabled, bool alpha_to_coverage, bool alpha_to_one)
{
temp_storage.msaa_sample_mask = sample_mask;
ms.rasterizationSamples = static_cast<VkSampleCountFlagBits>(sample_count);
ms.alphaToCoverageEnable = alpha_to_coverage;
ms.alphaToOneEnable = alpha_to_one;
if (!msaa_enabled)
{
// This register is likely glMinSampleShading but in reverse; probably sets max sample shading rate of 1
// I (kd-11) suspect its what the control panel setting affects when MSAA is set to disabled
}
}
void set_multisample_shading_rate(float shading_rate)
{
ms.sampleShadingEnable = VK_TRUE;
ms.minSampleShading = shading_rate;
}
};
}
| 5,742
|
C++
|
.h
| 183
| 27.639344
| 124
| 0.723651
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
6,092
|
sampler.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/sampler.h
|
#pragma once
#include "device.h"
#include "shared.h"
namespace vk
{
struct border_color_t
{
u64 storage_key;
VkBorderColor value;
VkFormat format;
VkImageCreateFlags aspect;
color4f color_value;
border_color_t(const color4f& color, VkFormat fmt = VK_FORMAT_UNDEFINED, VkImageAspectFlags aspect = VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT);
border_color_t(VkBorderColor color);
bool operator == (const border_color_t& that) const
{
if (this->value != that.value)
{
return false;
}
switch (this->value)
{
case VK_BORDER_COLOR_FLOAT_CUSTOM_EXT:
case VK_BORDER_COLOR_INT_CUSTOM_EXT:
return this->color_value == that.color_value;
default:
return true;
}
}
};
struct sampler
{
VkSampler value;
VkSamplerCreateInfo info = {};
sampler(const vk::render_device& dev, VkSamplerAddressMode clamp_u, VkSamplerAddressMode clamp_v, VkSamplerAddressMode clamp_w,
VkBool32 unnormalized_coordinates, float mipLodBias, float max_anisotropy, float min_lod, float max_lod,
VkFilter min_filter, VkFilter mag_filter, VkSamplerMipmapMode mipmap_mode, const border_color_t& border_color,
VkBool32 depth_compare = false, VkCompareOp depth_compare_mode = VK_COMPARE_OP_NEVER);
~sampler();
bool matches(VkSamplerAddressMode clamp_u, VkSamplerAddressMode clamp_v, VkSamplerAddressMode clamp_w,
VkBool32 unnormalized_coordinates, float mipLodBias, float max_anisotropy, float min_lod, float max_lod,
VkFilter min_filter, VkFilter mag_filter, VkSamplerMipmapMode mipmap_mode, const border_color_t& border_color,
VkBool32 depth_compare = false, VkCompareOp depth_compare_mode = VK_COMPARE_OP_NEVER);
sampler(const sampler&) = delete;
sampler(sampler&&) = delete;
private:
VkDevice m_device;
border_color_t m_border_color;
};
// Caching helpers
struct sampler_pool_key_t
{
u64 base_key;
u64 border_color_key;
};
struct cached_sampler_object_t : public vk::sampler, public rsx::ref_counted
{
sampler_pool_key_t key;
using vk::sampler::sampler;
};
class sampler_pool_t
{
std::unordered_map<u64, std::unique_ptr<cached_sampler_object_t>> m_generic_sampler_pool;
std::unordered_map<u64, std::unique_ptr<cached_sampler_object_t>> m_custom_color_sampler_pool;
public:
sampler_pool_key_t compute_storage_key(
VkSamplerAddressMode clamp_u, VkSamplerAddressMode clamp_v, VkSamplerAddressMode clamp_w,
VkBool32 unnormalized_coordinates, float mipLodBias, float max_anisotropy, float min_lod, float max_lod,
VkFilter min_filter, VkFilter mag_filter, VkSamplerMipmapMode mipmap_mode, const vk::border_color_t& border_color,
VkBool32 depth_compare, VkCompareOp depth_compare_mode);
void clear();
cached_sampler_object_t* find(const sampler_pool_key_t& key) const;
cached_sampler_object_t* emplace(const sampler_pool_key_t& key, std::unique_ptr<cached_sampler_object_t>& object);
std::vector<std::unique_ptr<cached_sampler_object_t>> collect(std::function<bool(const cached_sampler_object_t&)> predicate);
};
}
| 3,043
|
C++
|
.h
| 76
| 36.842105
| 158
| 0.761113
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,093
|
memory.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/memory.h
|
#pragma once
#include "../VulkanAPI.h"
#include "../../rsx_utils.h"
#include "shared.h"
#include "3rdparty/GPUOpen/include/vk_mem_alloc.h"
namespace vk
{
namespace vmm_allocation_pool_ // Workaround for clang < 13 not supporting enum imports
{
enum vmm_allocation_pool
{
VMM_ALLOCATION_POOL_UNDEFINED = 0,
VMM_ALLOCATION_POOL_SYSTEM,
VMM_ALLOCATION_POOL_SURFACE_CACHE,
VMM_ALLOCATION_POOL_TEXTURE_CACHE,
VMM_ALLOCATION_POOL_SWAPCHAIN,
VMM_ALLOCATION_POOL_SCRATCH,
VMM_ALLOCATION_POOL_SAMPLER,
};
}
using namespace vk::vmm_allocation_pool_;
class render_device;
class memory_type_info
{
std::vector<u32> type_ids;
std::vector<u64> type_sizes;
public:
memory_type_info() = default;
memory_type_info(u32 index, u64 size);
void push(u32 index, u64 size);
using iterator = u32*;
using const_iterator = const u32*;
const_iterator begin() const;
const_iterator end() const;
u32 first() const;
size_t count() const;
operator bool() const;
bool operator == (const memory_type_info& other) const;
memory_type_info get(const render_device& dev, u32 access_flags, u32 type_mask) const;
void rebalance();
};
class mem_allocator_base
{
public:
using mem_handle_t = void*;
mem_allocator_base(const vk::render_device& dev, VkPhysicalDevice /*pdev*/);
virtual ~mem_allocator_base() = default;
virtual void destroy() = 0;
virtual mem_handle_t alloc(u64 block_sz, u64 alignment, const memory_type_info& memory_type, vmm_allocation_pool pool, bool throw_on_fail) = 0;
virtual void free(mem_handle_t mem_handle) = 0;
virtual void* map(mem_handle_t mem_handle, u64 offset, u64 size) = 0;
virtual void unmap(mem_handle_t mem_handle) = 0;
virtual VkDeviceMemory get_vk_device_memory(mem_handle_t mem_handle) = 0;
virtual u64 get_vk_device_memory_offset(mem_handle_t mem_handle) = 0;
virtual f32 get_memory_usage() = 0;
virtual void set_safest_allocation_flags() {}
virtual void set_fastest_allocation_flags() {}
protected:
VkDevice m_device;
VkFlags m_allocation_flags;
};
// Memory Allocator - Vulkan Memory Allocator
// https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator
class mem_allocator_vma : public mem_allocator_base
{
public:
mem_allocator_vma(const vk::render_device& dev, VkPhysicalDevice pdev);
~mem_allocator_vma() override = default;
void destroy() override;
mem_handle_t alloc(u64 block_sz, u64 alignment, const memory_type_info& memory_type, vmm_allocation_pool pool, bool throw_on_fail) override;
void free(mem_handle_t mem_handle) override;
void* map(mem_handle_t mem_handle, u64 offset, u64 /*size*/) override;
void unmap(mem_handle_t mem_handle) override;
VkDeviceMemory get_vk_device_memory(mem_handle_t mem_handle) override;
u64 get_vk_device_memory_offset(mem_handle_t mem_handle) override;
f32 get_memory_usage() override;
void set_safest_allocation_flags() override;
void set_fastest_allocation_flags() override;
private:
VmaAllocator m_allocator;
std::array<VmaBudget, VK_MAX_MEMORY_HEAPS> stats;
};
// Memory Allocator - built-in Vulkan device memory allocate/free
class mem_allocator_vk : public mem_allocator_base
{
public:
mem_allocator_vk(const vk::render_device& dev, VkPhysicalDevice pdev) : mem_allocator_base(dev, pdev) {}
~mem_allocator_vk() override = default;
void destroy() override {}
mem_handle_t alloc(u64 block_sz, u64 /*alignment*/, const memory_type_info& memory_type, vmm_allocation_pool pool, bool throw_on_fail) override;
void free(mem_handle_t mem_handle) override;
void* map(mem_handle_t mem_handle, u64 offset, u64 size) override;
void unmap(mem_handle_t mem_handle) override;
VkDeviceMemory get_vk_device_memory(mem_handle_t mem_handle) override;
u64 get_vk_device_memory_offset(mem_handle_t /*mem_handle*/) override;
f32 get_memory_usage() override;
};
struct memory_block
{
memory_block(VkDevice dev, u64 block_sz, u64 alignment, const memory_type_info& memory_type, vmm_allocation_pool pool, bool nullable = false);
virtual ~memory_block();
virtual VkDeviceMemory get_vk_device_memory();
virtual u64 get_vk_device_memory_offset();
virtual void* map(u64 offset, u64 size);
virtual void unmap();
u64 size() const;
memory_block(const memory_block&) = delete;
memory_block(memory_block&&) = delete;
protected:
memory_block() = default;
private:
VkDevice m_device;
vk::mem_allocator_base* m_mem_allocator = nullptr;
mem_allocator_base::mem_handle_t m_mem_handle;
u64 m_size;
};
struct memory_block_host : public memory_block
{
memory_block_host(VkDevice dev, void* host_pointer, u64 size, const memory_type_info& memory_type);
~memory_block_host();
VkDeviceMemory get_vk_device_memory() override;
u64 get_vk_device_memory_offset() override;
void* map(u64 offset, u64 size) override;
void unmap() override;
memory_block_host(const memory_block_host&) = delete;
memory_block_host(memory_block_host&&) = delete;
memory_block_host() = delete;
private:
VkDevice m_device;
VkDeviceMemory m_mem_handle;
void* m_host_pointer;
};
// Tracking for memory usage. Constrained largely by amount of VRAM + shared video memory.
void vmm_notify_memory_allocated(void* handle, u32 memory_type, u64 memory_size, vmm_allocation_pool pool);
void vmm_notify_memory_freed(void* handle);
void vmm_reset();
void vmm_check_memory_usage();
u64 vmm_get_application_memory_usage(const memory_type_info& memory_type);
u64 vmm_get_application_pool_usage(vmm_allocation_pool pool);
bool vmm_handle_memory_pressure(rsx::problem_severity severity);
rsx::problem_severity vmm_determine_memory_load_severity();
// Tracking for host memory objects. Allocated count is more important than actual memory amount.
void vmm_notify_object_allocated(vmm_allocation_pool pool);
void vmm_notify_object_freed(vmm_allocation_pool pool);
mem_allocator_base* get_current_mem_allocator();
}
| 5,970
|
C++
|
.h
| 146
| 37.965753
| 146
| 0.74974
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,094
|
scratch.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/scratch.h
|
#pragma once
#include "image.h"
namespace vk
{
VkSampler null_sampler();
image_view* null_image_view(const command_buffer& cmd, VkImageViewType type);
image* get_typeless_helper(VkFormat format, rsx::format_class format_class, u32 requested_width, u32 requested_height);
buffer* get_scratch_buffer(const command_buffer& cmd, u64 min_required_size, bool zero_memory = false);
void clear_scratch_resources();
}
| 428
|
C++
|
.h
| 10
| 39.9
| 121
| 0.7657
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,095
|
sync.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/sync.h
|
#pragma once
#include "../VulkanAPI.h"
#include "buffer_object.h"
#include "device.h"
#include "util/atomic.hpp"
namespace vk
{
class command_buffer;
class gpu_label;
class image;
enum class sync_domain
{
host = 0,
gpu = 1
};
struct fence
{
atomic_t<bool> flushed = false;
VkFence handle = VK_NULL_HANDLE;
VkDevice owner = VK_NULL_HANDLE;
fence(VkDevice dev);
~fence();
fence(const fence&) = delete;
void reset();
void signal_flushed();
void wait_flush();
operator bool() const;
};
class event
{
enum class sync_backend
{
events_v1,
events_v2,
gpu_label
};
const vk::render_device* m_device = nullptr;
sync_domain m_domain = sync_domain::host;
sync_backend m_backend = sync_backend::events_v1;
// For events_v1 and events_v2
VkEvent m_vk_event = VK_NULL_HANDLE;
// For gpu_label
std::unique_ptr<gpu_label> m_label{};
void resolve_dependencies(const command_buffer& cmd, const VkDependencyInfoKHR& dependency);
public:
event(const render_device& dev, sync_domain domain);
~event();
event(const event&) = delete;
void signal(const command_buffer& cmd, const VkDependencyInfoKHR& dependency);
void host_signal() const;
void gpu_wait(const command_buffer& cmd, const VkDependencyInfoKHR& dependency) const;
VkResult status() const;
void reset() const;
};
class semaphore
{
VkSemaphore m_handle = VK_NULL_HANDLE;
VkDevice m_device = VK_NULL_HANDLE;
semaphore() = default;
public:
semaphore(const render_device& dev);
~semaphore();
semaphore(const semaphore&) = delete;
operator VkSemaphore() const;
};
// Custom primitives
class gpu_label_pool
{
public:
gpu_label_pool(const vk::render_device& dev, u32 count);
virtual ~gpu_label_pool();
std::tuple<VkBuffer, u64, volatile u32*> allocate();
private:
void create_impl();
const vk::render_device* pdev = nullptr;
std::unique_ptr<buffer> m_buffer{};
volatile u32* m_mapped = nullptr;
u64 m_offset = 0;
u32 m_count = 0;
};
class gpu_label
{
protected:
enum label_constants : u32
{
set_ = 0xCAFEBABE,
reset_ = 0xDEADBEEF
};
VkBuffer m_buffer_handle = VK_NULL_HANDLE;
u64 m_buffer_offset = 0;
volatile u32* m_ptr = nullptr;
public:
gpu_label(gpu_label_pool& pool);
virtual ~gpu_label();
void signal(const vk::command_buffer& cmd, const VkDependencyInfoKHR& dependency);
void reset() { *m_ptr = label_constants::reset_; }
void set() { *m_ptr = label_constants::set_; }
bool signaled() const { return label_constants::set_ == *m_ptr; }
};
class gpu_debug_marker_pool : public gpu_label_pool
{
using gpu_label_pool::gpu_label_pool;
};
class gpu_debug_marker : public gpu_label
{
std::string m_message;
bool m_printed = false;
public:
gpu_debug_marker(gpu_debug_marker_pool& pool, std::string message);
~gpu_debug_marker();
gpu_debug_marker(const event&) = delete;
void dump();
void dump() const;
static void insert(
const vk::render_device& dev,
const vk::command_buffer& cmd,
std::string message,
VkPipelineStageFlags stages = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
VkAccessFlags access = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT);
};
class debug_marker_scope
{
const vk::render_device* m_device;
const vk::command_buffer* m_cb;
std::string m_message;
u64 m_tag;
public:
debug_marker_scope(const vk::command_buffer& cmd, const std::string& text);
~debug_marker_scope();
};
VkResult wait_for_fence(fence* pFence, u64 timeout = 0ull);
VkResult wait_for_event(event* pEvent, u64 timeout = 0ull);
}
| 3,613
|
C++
|
.h
| 133
| 24.255639
| 94
| 0.709434
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,096
|
framebuffer_object.hpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/framebuffer_object.hpp
|
#pragma once
#include "../VulkanAPI.h"
#include "image.h"
#include <memory>
#include <vector>
namespace vk
{
struct framebuffer
{
VkFramebuffer value;
VkFramebufferCreateInfo info = {};
std::vector<std::unique_ptr<vk::image_view>> attachments;
u32 m_width = 0;
u32 m_height = 0;
public:
framebuffer(VkDevice dev, VkRenderPass pass, u32 width, u32 height, std::vector<std::unique_ptr<vk::image_view>>&& atts)
: attachments(std::move(atts))
, m_device(dev)
{
std::vector<VkImageView> image_view_array(attachments.size());
usz i = 0;
for (const auto& att : attachments)
{
image_view_array[i++] = att->value;
}
info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
info.width = width;
info.height = height;
info.attachmentCount = static_cast<u32>(image_view_array.size());
info.pAttachments = image_view_array.data();
info.renderPass = pass;
info.layers = 1;
m_width = width;
m_height = height;
CHECK_RESULT(vkCreateFramebuffer(dev, &info, nullptr, &value));
}
~framebuffer()
{
vkDestroyFramebuffer(m_device, value, nullptr);
}
u32 width()
{
return m_width;
}
u32 height()
{
return m_height;
}
u8 samples()
{
ensure(!attachments.empty());
return attachments[0]->image()->samples();
}
VkFormat format()
{
ensure(!attachments.empty());
return attachments[0]->image()->format();
}
VkFormat depth_format()
{
ensure(!attachments.empty());
return attachments.back()->image()->format();
}
bool matches(std::vector<vk::image*> fbo_images, u32 width, u32 height)
{
if (m_width != width || m_height != height)
return false;
if (fbo_images.size() != attachments.size())
return false;
for (uint n = 0; n < fbo_images.size(); ++n)
{
if (attachments[n]->info.image != fbo_images[n]->value ||
attachments[n]->info.format != fbo_images[n]->info.format)
return false;
}
return true;
}
framebuffer(const framebuffer&) = delete;
framebuffer(framebuffer&&) = delete;
private:
VkDevice m_device;
};
}
| 2,190
|
C++
|
.h
| 83
| 21.626506
| 123
| 0.640889
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
6,097
|
mem_allocator.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/mem_allocator.h
|
#pragma once
#include "../VulkanAPI.h"
#include "../../rsx_utils.h"
#include "shared.h"
#include "3rdparty/GPUOpen/include/vk_mem_alloc.h"
namespace vk
{
// Memory Allocator - base class
}
| 195
|
C++
|
.h
| 9
| 20
| 50
| 0.729282
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,098
|
query_pool.hpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/query_pool.hpp
|
#pragma once
#include "../VulkanAPI.h"
#include "../../rsx_utils.h"
namespace vk
{
class query_pool : public rsx::ref_counted
{
VkQueryPool m_query_pool;
VkDevice m_device;
public:
query_pool(VkDevice dev, VkQueryType type, u32 size)
: m_query_pool(VK_NULL_HANDLE)
, m_device(dev)
{
VkQueryPoolCreateInfo info{};
info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
info.queryType = type;
info.queryCount = size;
vkCreateQueryPool(dev, &info, nullptr, &m_query_pool);
// Take 'size' references on this object
ref_count.release(static_cast<s32>(size));
}
~query_pool()
{
vkDestroyQueryPool(m_device, m_query_pool, nullptr);
}
operator VkQueryPool()
{
return m_query_pool;
}
};
}
| 751
|
C++
|
.h
| 32
| 20.4375
| 62
| 0.69425
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
6,099
|
commands.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/commands.h
|
#pragma once
#include "../VulkanAPI.h"
#include "device.h"
#include "sync.h"
namespace vk
{
class command_pool
{
vk::render_device* owner = nullptr;
VkCommandPool pool = nullptr;
u32 queue_family = 0;
public:
command_pool() = default;
~command_pool() = default;
void create(vk::render_device& dev, u32 queue_family);
void destroy();
vk::render_device& get_owner() const;
u32 get_queue_family() const;
operator VkCommandPool() const;
};
struct queue_submit_t
{
VkQueue queue = VK_NULL_HANDLE;
fence* pfence = nullptr;
VkCommandBuffer commands = VK_NULL_HANDLE;
std::array<VkSemaphore, 4> wait_semaphores;
std::array<VkSemaphore, 4> signal_semaphores;
std::array<VkPipelineStageFlags, 4> wait_stages;
u32 wait_semaphores_count = 0;
u32 signal_semaphores_count = 0;
queue_submit_t() = default;
queue_submit_t(VkQueue queue_, vk::fence* fence_)
: queue(queue_), pfence(fence_) {}
queue_submit_t(const queue_submit_t& other)
{
std::memcpy(this, &other, sizeof(queue_submit_t));
}
inline queue_submit_t& wait_on(VkSemaphore semaphore, VkPipelineStageFlags stage)
{
ensure(wait_semaphores_count < 4);
wait_semaphores[wait_semaphores_count] = semaphore;
wait_stages[wait_semaphores_count++] = stage;
return *this;
}
inline queue_submit_t& queue_signal(VkSemaphore semaphore)
{
ensure(signal_semaphores_count < 4);
signal_semaphores[signal_semaphores_count++] = semaphore;
return *this;
}
};
class command_buffer
{
protected:
bool is_open = false;
bool is_pending = false;
fence* m_submit_fence = nullptr;
command_pool* pool = nullptr;
VkCommandBuffer commands = nullptr;
public:
enum access_type_hint
{
flush_only, // Only to be submitted/opened/closed via command flush
all // Auxiliary, can be submitted/opened/closed at any time
}
access_hint = flush_only;
enum command_buffer_data_flag : u32
{
cb_has_occlusion_task = 0x01,
cb_has_blit_transfer = 0x02,
cb_has_dma_transfer = 0x04,
cb_has_open_query = 0x08,
cb_load_occluson_task = 0x10,
cb_has_conditional_render = 0x20,
cb_reload_dynamic_state = 0x40
};
u32 flags = 0;
public:
command_buffer() = default;
~command_buffer() = default;
void create(command_pool& cmd_pool);
void destroy();
void begin();
void end();
void submit(queue_submit_t& submit_info, VkBool32 flush = VK_FALSE);
// Properties
command_pool& get_command_pool() const
{
return *pool;
}
u32 get_queue_family() const
{
return pool->get_queue_family();
}
void clear_flags()
{
flags = 0;
}
void set_flag(command_buffer_data_flag flag)
{
flags |= flag;
}
operator VkCommandBuffer() const
{
return commands;
}
bool is_recording() const
{
return is_open;
}
};
}
| 2,846
|
C++
|
.h
| 112
| 22.223214
| 83
| 0.694834
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,100
|
pipeline_binding_table.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/pipeline_binding_table.h
|
#pragma once
#include "util/types.hpp"
namespace vk
{
struct pipeline_binding_table
{
u8 vertex_params_bind_slot = 0;
u8 vertex_constant_buffers_bind_slot = 1;
u8 fragment_constant_buffers_bind_slot = 2;
u8 fragment_state_bind_slot = 3;
u8 fragment_texture_params_bind_slot = 4;
u8 vertex_buffers_first_bind_slot = 5;
u8 conditional_render_predicate_slot = 8;
u8 rasterizer_env_bind_slot = 9;
u8 textures_first_bind_slot = 10;
u8 vertex_textures_first_bind_slot = 10; // Invalid, has to be initialized properly
u8 total_descriptor_bindings = vertex_textures_first_bind_slot; // Invalid, has to be initialized properly
};
}
| 749
|
C++
|
.h
| 19
| 37
| 118
| 0.646978
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,101
|
data_heap.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/data_heap.h
|
#pragma once
#include "../../Common/ring_buffer_helper.h"
#include "../VulkanAPI.h"
#include "buffer_object.h"
#include "commands.h"
#include <memory>
#include <vector>
namespace vk
{
class data_heap : public ::data_heap
{
private:
usz initial_size = 0;
bool mapped = false;
void* _ptr = nullptr;
bool notify_on_grow = false;
std::unique_ptr<buffer> shadow;
std::vector<VkBufferCopy> dirty_ranges;
protected:
bool grow(usz size) override;
public:
std::unique_ptr<buffer> heap;
// NOTE: Some drivers (RADV) use heavyweight OS map/unmap routines that are insanely slow
// Avoid mapping/unmapping to keep these drivers from stalling
// NOTE2: HOST_CACHED flag does not keep the mapped ptr around in the driver either
void create(VkBufferUsageFlags usage, usz size, const char* name, usz guard = 0x10000, VkBool32 notify = VK_FALSE);
void destroy();
void* map(usz offset, usz size);
void unmap(bool force = false);
void sync(const vk::command_buffer& cmd);
// Properties
bool is_dirty() const;
bool is_critical() const override;
};
extern data_heap* get_upload_heap();
}
| 1,174
|
C++
|
.h
| 36
| 28.638889
| 118
| 0.708633
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,102
|
device.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/device.h
|
#pragma once
#include "../VulkanAPI.h"
#include "chip_class.h"
#include "pipeline_binding_table.h"
#include "memory.h"
#include <string>
#include <vector>
#include <unordered_map>
#define DESCRIPTOR_MAX_DRAW_CALLS 32768
namespace vk
{
struct gpu_formats_support
{
bool d24_unorm_s8 : 1;
bool d32_sfloat_s8 : 1;
bool bgra8_linear : 1;
bool argb8_linear : 1;
};
struct gpu_shader_types_support
{
bool allow_float64 : 1;
bool allow_float16 : 1;
bool allow_int8 : 1;
};
struct memory_type_mapping
{
memory_type_info host_visible_coherent;
memory_type_info device_local;
memory_type_info device_bar;
u64 device_local_total_bytes;
u64 host_visible_total_bytes;
u64 device_bar_total_bytes;
PFN_vkGetMemoryHostPointerPropertiesEXT _vkGetMemoryHostPointerPropertiesEXT;
};
struct descriptor_indexing_features
{
bool supported = false;
u64 update_after_bind_mask = 0;
descriptor_indexing_features(bool supported = false)
: supported(supported) {}
operator bool() { return supported; }
};
class physical_device
{
VkInstance parent = VK_NULL_HANDLE;
VkPhysicalDevice dev = VK_NULL_HANDLE;
VkPhysicalDeviceProperties props;
VkPhysicalDeviceFeatures features;
VkPhysicalDeviceMemoryProperties memory_properties;
std::vector<VkQueueFamilyProperties> queue_props;
mutable std::unordered_map<VkFormat, VkFormatProperties> format_properties;
gpu_shader_types_support shader_types_support{};
VkPhysicalDeviceDriverPropertiesKHR driver_properties{};
u32 descriptor_max_draw_calls = DESCRIPTOR_MAX_DRAW_CALLS;
descriptor_indexing_features descriptor_indexing_support{};
struct
{
bool barycentric_coords = false;
bool conditional_rendering = false;
bool custom_border_color = false;
bool debug_utils = false;
bool external_memory_host = false;
bool framebuffer_loops = false;
bool sampler_mirror_clamped = false;
bool shader_stencil_export = false;
bool surface_capabilities_2 = false;
bool synchronization_2 = false;
bool unrestricted_depth_range = false;
} optional_features_support;
friend class render_device;
private:
void get_physical_device_features(bool allow_extensions);
void get_physical_device_properties(bool allow_extensions);
public:
physical_device() = default;
~physical_device() = default;
void create(VkInstance context, VkPhysicalDevice pdev, bool allow_extensions);
std::string get_name() const;
driver_vendor get_driver_vendor() const;
std::string get_driver_version() const;
chip_class get_chip_class() const;
u32 get_queue_count() const;
// Device properties. These structs can be large so use with care.
const VkQueueFamilyProperties& get_queue_properties(u32 queue);
const VkPhysicalDeviceMemoryProperties& get_memory_properties() const;
const VkPhysicalDeviceLimits& get_limits() const;
operator VkPhysicalDevice() const;
operator VkInstance() const;
};
class render_device
{
physical_device* pgpu = nullptr;
memory_type_mapping memory_map{};
gpu_formats_support m_formats_support{};
pipeline_binding_table m_pipeline_binding_table{};
std::unique_ptr<mem_allocator_base> m_allocator;
VkDevice dev = VK_NULL_HANDLE;
VkQueue m_graphics_queue = VK_NULL_HANDLE;
VkQueue m_present_queue = VK_NULL_HANDLE;
VkQueue m_transfer_queue = VK_NULL_HANDLE;
u32 m_graphics_queue_family = 0;
u32 m_present_queue_family = 0;
u32 m_transfer_queue_family = 0;
void dump_debug_info(
const std::vector<const char*>& requested_extensions,
const VkPhysicalDeviceFeatures& requested_features) const;
public:
// Exported device endpoints
PFN_vkCmdBeginConditionalRenderingEXT _vkCmdBeginConditionalRenderingEXT = nullptr;
PFN_vkCmdEndConditionalRenderingEXT _vkCmdEndConditionalRenderingEXT = nullptr;
PFN_vkSetDebugUtilsObjectNameEXT _vkSetDebugUtilsObjectNameEXT = nullptr;
PFN_vkQueueInsertDebugUtilsLabelEXT _vkQueueInsertDebugUtilsLabelEXT = nullptr;
PFN_vkCmdInsertDebugUtilsLabelEXT _vkCmdInsertDebugUtilsLabelEXT = nullptr;
PFN_vkCmdSetEvent2KHR _vkCmdSetEvent2KHR = nullptr;
PFN_vkCmdWaitEvents2KHR _vkCmdWaitEvents2KHR = nullptr;
PFN_vkCmdPipelineBarrier2KHR _vkCmdPipelineBarrier2KHR = nullptr;
public:
render_device() = default;
~render_device() = default;
void create(vk::physical_device& pdev, u32 graphics_queue_idx, u32 present_queue_idx, u32 transfer_queue_idx);
void destroy();
const VkFormatProperties get_format_properties(VkFormat format) const;
bool get_compatible_memory_type(u32 typeBits, u32 desired_mask, u32* type_index) const;
void rebalance_memory_type_usage();
const physical_device& gpu() const { return *pgpu; }
const memory_type_mapping& get_memory_mapping() const { return memory_map; }
const gpu_formats_support& get_formats_support() const { return m_formats_support; }
const pipeline_binding_table& get_pipeline_binding_table() const { return m_pipeline_binding_table; }
const gpu_shader_types_support& get_shader_types_support() const { return pgpu->shader_types_support; }
bool get_shader_stencil_export_support() const { return pgpu->optional_features_support.shader_stencil_export; }
bool get_depth_bounds_support() const { return pgpu->features.depthBounds != VK_FALSE; }
bool get_alpha_to_one_support() const { return pgpu->features.alphaToOne != VK_FALSE; }
bool get_anisotropic_filtering_support() const { return pgpu->features.samplerAnisotropy != VK_FALSE; }
bool get_wide_lines_support() const { return pgpu->features.wideLines != VK_FALSE; }
bool get_conditional_render_support() const { return pgpu->optional_features_support.conditional_rendering; }
bool get_unrestricted_depth_range_support() const { return pgpu->optional_features_support.unrestricted_depth_range; }
bool get_external_memory_host_support() const { return pgpu->optional_features_support.external_memory_host; }
bool get_surface_capabilities_2_support() const { return pgpu->optional_features_support.surface_capabilities_2; }
bool get_debug_utils_support() const { return g_cfg.video.renderdoc_compatiblity && pgpu->optional_features_support.debug_utils; }
bool get_descriptor_indexing_support() const { return pgpu->descriptor_indexing_support; }
bool get_framebuffer_loops_support() const { return pgpu->optional_features_support.framebuffer_loops; }
bool get_barycoords_support() const { return pgpu->optional_features_support.barycentric_coords; }
bool get_custom_border_color_support() const { return pgpu->optional_features_support.custom_border_color; }
bool get_synchronization2_support() const { return pgpu->optional_features_support.synchronization_2; }
u64 get_descriptor_update_after_bind_support() const { return pgpu->descriptor_indexing_support.update_after_bind_mask; }
u32 get_descriptor_max_draw_calls() const { return pgpu->descriptor_max_draw_calls; }
VkQueue get_present_queue() const { return m_present_queue; }
VkQueue get_graphics_queue() const { return m_graphics_queue; }
VkQueue get_transfer_queue() const { return m_transfer_queue; }
u32 get_graphics_queue_family() const { return m_graphics_queue_family; }
u32 get_present_queue_family() const { return m_graphics_queue_family; }
u32 get_transfer_queue_family() const { return m_transfer_queue_family; }
mem_allocator_base* get_allocator() const { return m_allocator.get(); }
operator VkDevice() const { return dev; }
};
memory_type_mapping get_memory_mapping(const physical_device& dev);
gpu_formats_support get_optimal_tiling_supported_formats(const physical_device& dev);
pipeline_binding_table get_pipeline_binding_table(const physical_device& dev);
extern const render_device* g_render_device;
}
| 7,706
|
C++
|
.h
| 160
| 45.13125
| 132
| 0.776815
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,103
|
instance.hpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/instance.hpp
|
#pragma once
#include "../VulkanAPI.h"
#include "swapchain.hpp"
#include <algorithm>
#include <vector>
#ifdef __APPLE__
#include <MoltenVK/mvk_vulkan.h>
#include <MoltenVK/mvk_private_api.h>
#endif
namespace vk
{
class supported_extensions
{
private:
std::vector<VkExtensionProperties> m_vk_exts;
public:
enum enumeration_class
{
instance = 0,
device = 1
};
supported_extensions(enumeration_class _class, const char* layer_name = nullptr, VkPhysicalDevice pdev = VK_NULL_HANDLE)
{
u32 count;
if (_class == enumeration_class::instance)
{
if (vkEnumerateInstanceExtensionProperties(layer_name, &count, nullptr) != VK_SUCCESS)
return;
}
else
{
ensure(pdev);
if (vkEnumerateDeviceExtensionProperties(pdev, layer_name, &count, nullptr) != VK_SUCCESS)
return;
}
m_vk_exts.resize(count);
if (_class == enumeration_class::instance)
{
vkEnumerateInstanceExtensionProperties(layer_name, &count, m_vk_exts.data());
}
else
{
vkEnumerateDeviceExtensionProperties(pdev, layer_name, &count, m_vk_exts.data());
}
}
bool is_supported(std::string_view ext)
{
return std::any_of(m_vk_exts.cbegin(), m_vk_exts.cend(), [&](const VkExtensionProperties& p) { return p.extensionName == ext; });
}
};
class instance
{
private:
std::vector<physical_device> gpus;
VkInstance m_instance = VK_NULL_HANDLE;
VkSurfaceKHR m_surface = VK_NULL_HANDLE;
PFN_vkDestroyDebugReportCallbackEXT _vkDestroyDebugReportCallback = nullptr;
PFN_vkCreateDebugReportCallbackEXT _vkCreateDebugReportCallback = nullptr;
VkDebugReportCallbackEXT m_debugger = nullptr;
bool extensions_loaded = false;
public:
instance() = default;
~instance()
{
if (m_instance)
{
destroy();
}
}
void destroy()
{
if (!m_instance) return;
if (m_debugger)
{
_vkDestroyDebugReportCallback(m_instance, m_debugger, nullptr);
m_debugger = nullptr;
}
if (m_surface)
{
vkDestroySurfaceKHR(m_instance, m_surface, nullptr);
m_surface = VK_NULL_HANDLE;
}
vkDestroyInstance(m_instance, nullptr);
m_instance = VK_NULL_HANDLE;
}
void enable_debugging()
{
if (!g_cfg.video.debug_output) return;
PFN_vkDebugReportCallbackEXT callback = vk::dbgFunc;
_vkCreateDebugReportCallback = reinterpret_cast<PFN_vkCreateDebugReportCallbackEXT>(vkGetInstanceProcAddr(m_instance, "vkCreateDebugReportCallbackEXT"));
_vkDestroyDebugReportCallback = reinterpret_cast<PFN_vkDestroyDebugReportCallbackEXT>(vkGetInstanceProcAddr(m_instance, "vkDestroyDebugReportCallbackEXT"));
VkDebugReportCallbackCreateInfoEXT dbgCreateInfo = {};
dbgCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
dbgCreateInfo.pfnCallback = callback;
dbgCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT;
CHECK_RESULT(_vkCreateDebugReportCallback(m_instance, &dbgCreateInfo, NULL, &m_debugger));
}
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wold-style-cast"
#endif
bool create(const char* app_name, bool fast = false)
{
// Initialize a vulkan instance
VkApplicationInfo app = {};
app.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
app.pApplicationName = app_name;
app.applicationVersion = 0;
app.pEngineName = app_name;
app.engineVersion = 0;
app.apiVersion = VK_API_VERSION_1_0;
// Set up instance information
std::vector<const char*> extensions;
std::vector<const char*> layers;
const void* next_info = nullptr;
#ifdef __APPLE__
// Declare MVK variables here to ensure the lifetime within the entire scope
const VkBool32 setting_true = VK_TRUE;
const int32_t setting_fast_math = g_cfg.video.disable_msl_fast_math.get() ? MVK_CONFIG_FAST_MATH_NEVER : MVK_CONFIG_FAST_MATH_ON_DEMAND;
std::vector<VkLayerSettingEXT> mvk_settings;
VkLayerSettingsCreateInfoEXT mvk_layer_settings_create_info{};
#endif
if (!fast)
{
extensions_loaded = true;
supported_extensions support(supported_extensions::instance);
extensions.push_back(VK_KHR_SURFACE_EXTENSION_NAME);
if (support.is_supported(VK_EXT_DEBUG_REPORT_EXTENSION_NAME))
{
extensions.push_back(VK_EXT_DEBUG_REPORT_EXTENSION_NAME);
}
if (support.is_supported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME))
{
extensions.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
}
#ifdef __APPLE__
if (support.is_supported(VK_EXT_LAYER_SETTINGS_EXTENSION_NAME))
{
extensions.push_back(VK_EXT_LAYER_SETTINGS_EXTENSION_NAME);
layers.push_back(kMVKMoltenVKDriverLayerName);
mvk_settings.push_back(VkLayerSettingEXT{ kMVKMoltenVKDriverLayerName, "MVK_CONFIG_RESUME_LOST_DEVICE", VK_LAYER_SETTING_TYPE_BOOL32_EXT, 1, &setting_true });
mvk_settings.push_back(VkLayerSettingEXT{ kMVKMoltenVKDriverLayerName, "MVK_CONFIG_FAST_MATH_ENABLED", VK_LAYER_SETTING_TYPE_INT32_EXT, 1, &setting_fast_math });
mvk_layer_settings_create_info.sType = VK_STRUCTURE_TYPE_LAYER_SETTINGS_CREATE_INFO_EXT;
mvk_layer_settings_create_info.pNext = next_info;
mvk_layer_settings_create_info.settingCount = static_cast<uint32_t>(mvk_settings.size());
mvk_layer_settings_create_info.pSettings = mvk_settings.data();
next_info = &mvk_layer_settings_create_info;
}
#endif
if (support.is_supported(VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME))
{
extensions.push_back(VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME);
}
if (support.is_supported(VK_KHR_GET_SURFACE_CAPABILITIES_2_EXTENSION_NAME))
{
extensions.push_back(VK_KHR_GET_SURFACE_CAPABILITIES_2_EXTENSION_NAME);
}
if (g_cfg.video.renderdoc_compatiblity && support.is_supported(VK_EXT_DEBUG_UTILS_EXTENSION_NAME))
{
extensions.push_back(VK_EXT_DEBUG_UTILS_EXTENSION_NAME);
}
#ifdef _WIN32
extensions.push_back(VK_KHR_WIN32_SURFACE_EXTENSION_NAME);
#elif defined(__APPLE__)
extensions.push_back(VK_MVK_MACOS_SURFACE_EXTENSION_NAME);
#else
bool found_surface_ext = false;
#ifdef HAVE_X11
if (support.is_supported(VK_KHR_XLIB_SURFACE_EXTENSION_NAME))
{
extensions.push_back(VK_KHR_XLIB_SURFACE_EXTENSION_NAME);
found_surface_ext = true;
}
#endif
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
if (support.is_supported(VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME))
{
extensions.push_back(VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME);
found_surface_ext = true;
}
#endif //(WAYLAND)
if (!found_surface_ext)
{
rsx_log.error("Could not find a supported Vulkan surface extension");
return 0;
}
#endif //(WIN32, __APPLE__)
if (g_cfg.video.debug_output)
layers.push_back("VK_LAYER_KHRONOS_validation");
}
VkInstanceCreateInfo instance_info = {};
instance_info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
instance_info.pApplicationInfo = &app;
instance_info.enabledLayerCount = static_cast<u32>(layers.size());
instance_info.ppEnabledLayerNames = layers.data();
instance_info.enabledExtensionCount = fast ? 0 : static_cast<u32>(extensions.size());
instance_info.ppEnabledExtensionNames = fast ? nullptr : extensions.data();
instance_info.pNext = next_info;
if (VkResult result = vkCreateInstance(&instance_info, nullptr, &m_instance); result != VK_SUCCESS)
{
if (result == VK_ERROR_LAYER_NOT_PRESENT)
{
rsx_log.fatal("Could not initialize layer VK_LAYER_KHRONOS_validation");
}
return false;
}
return true;
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
void bind()
{
// Register some global states
if (m_debugger)
{
_vkDestroyDebugReportCallback(m_instance, m_debugger, nullptr);
m_debugger = nullptr;
}
enable_debugging();
}
std::vector<physical_device>& enumerate_devices()
{
u32 num_gpus;
// This may fail on unsupported drivers, so just assume no devices
if (vkEnumeratePhysicalDevices(m_instance, &num_gpus, nullptr) != VK_SUCCESS)
return gpus;
if (gpus.size() != num_gpus)
{
std::vector<VkPhysicalDevice> pdevs(num_gpus);
gpus.resize(num_gpus);
CHECK_RESULT(vkEnumeratePhysicalDevices(m_instance, &num_gpus, pdevs.data()));
for (u32 i = 0; i < num_gpus; ++i)
gpus[i].create(m_instance, pdevs[i], extensions_loaded);
}
return gpus;
}
swapchain_base* create_swapchain(display_handle_t window_handle, vk::physical_device& dev)
{
bool force_wm_reporting_off = false;
#ifdef _WIN32
using swapchain_NATIVE = swapchain_WIN32;
HINSTANCE hInstance = NULL;
VkWin32SurfaceCreateInfoKHR createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR;
createInfo.hinstance = hInstance;
createInfo.hwnd = window_handle;
CHECK_RESULT(vkCreateWin32SurfaceKHR(m_instance, &createInfo, NULL, &m_surface));
#elif defined(__APPLE__)
using swapchain_NATIVE = swapchain_MacOS;
VkMacOSSurfaceCreateInfoMVK createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_MACOS_SURFACE_CREATE_INFO_MVK;
createInfo.pView = window_handle;
CHECK_RESULT(vkCreateMacOSSurfaceMVK(m_instance, &createInfo, NULL, &m_surface));
#else
#ifdef HAVE_X11
using swapchain_NATIVE = swapchain_X11;
#else
using swapchain_NATIVE = swapchain_Wayland;
#endif
std::visit([&](auto&& p)
{
using T = std::decay_t<decltype(p)>;
#ifdef HAVE_X11
if constexpr (std::is_same_v<T, std::pair<Display*, Window>>)
{
VkXlibSurfaceCreateInfoKHR createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR;
createInfo.dpy = p.first;
createInfo.window = p.second;
CHECK_RESULT(vkCreateXlibSurfaceKHR(this->m_instance, &createInfo, nullptr, &m_surface));
}
else
#endif
#ifdef HAVE_WAYLAND
if constexpr (std::is_same_v<T, std::pair<wl_display*, wl_surface*>>)
{
VkWaylandSurfaceCreateInfoKHR createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR;
createInfo.display = p.first;
createInfo.surface = p.second;
CHECK_RESULT(vkCreateWaylandSurfaceKHR(this->m_instance, &createInfo, nullptr, &m_surface));
force_wm_reporting_off = true;
}
else
#endif
{
static_assert(std::conditional_t<true, std::false_type, T>::value, "Unhandled window_handle type in std::variant");
}
}, window_handle);
#endif
u32 device_queues = dev.get_queue_count();
std::vector<VkBool32> supports_present(device_queues, VK_FALSE);
bool present_possible = true;
for (u32 index = 0; index < device_queues; index++)
{
vkGetPhysicalDeviceSurfaceSupportKHR(dev, index, m_surface, &supports_present[index]);
}
u32 graphics_queue_idx = -1;
u32 present_queue_idx = -1;
u32 transfer_queue_idx = -1;
auto test_queue_family = [&](u32 index, u32 desired_flags)
{
if (const auto flags = dev.get_queue_properties(index).queueFlags;
(flags & desired_flags) == desired_flags)
{
return true;
}
return false;
};
for (u32 i = 0; i < device_queues; ++i)
{
// 1. Test for a present queue possibly one that also supports present
if (present_queue_idx == umax && supports_present[i])
{
present_queue_idx = i;
if (test_queue_family(i, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT))
{
graphics_queue_idx = i;
}
}
// 2. Check for graphics support
else if (graphics_queue_idx == umax && test_queue_family(i, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT))
{
graphics_queue_idx = i;
if (supports_present[i])
{
present_queue_idx = i;
}
}
// 3. Check if transfer + compute is available
else if (transfer_queue_idx == umax && test_queue_family(i, VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT))
{
transfer_queue_idx = i;
}
}
if (graphics_queue_idx == umax)
{
rsx_log.fatal("Failed to find a suitable graphics queue");
return nullptr;
}
if (graphics_queue_idx != present_queue_idx)
{
// Separate graphics and present, use headless fallback
present_possible = false;
}
if (!present_possible)
{
//Native(sw) swapchain
rsx_log.error("It is not possible for the currently selected GPU to present to the window (Likely caused by NVIDIA driver running the current display)");
rsx_log.warning("Falling back to software present support (native windowing API)");
auto swapchain = new swapchain_NATIVE(dev, -1, graphics_queue_idx, transfer_queue_idx);
swapchain->create(window_handle);
return swapchain;
}
// Get the list of VkFormat's that are supported:
u32 formatCount;
CHECK_RESULT(vkGetPhysicalDeviceSurfaceFormatsKHR(dev, m_surface, &formatCount, nullptr));
std::vector<VkSurfaceFormatKHR> surfFormats(formatCount);
CHECK_RESULT(vkGetPhysicalDeviceSurfaceFormatsKHR(dev, m_surface, &formatCount, surfFormats.data()));
VkFormat format;
VkColorSpaceKHR color_space;
if (formatCount == 1 && surfFormats[0].format == VK_FORMAT_UNDEFINED)
{
format = VK_FORMAT_B8G8R8A8_UNORM;
}
else
{
if (!formatCount) fmt::throw_exception("Format count is zero!");
format = surfFormats[0].format;
//Prefer BGRA8_UNORM to avoid sRGB compression (RADV)
for (auto& surface_format : surfFormats)
{
if (surface_format.format == VK_FORMAT_B8G8R8A8_UNORM)
{
format = VK_FORMAT_B8G8R8A8_UNORM;
break;
}
}
}
color_space = surfFormats[0].colorSpace;
return new swapchain_WSI(dev, present_queue_idx, graphics_queue_idx, transfer_queue_idx, format, m_surface, color_space, force_wm_reporting_off);
}
};
}
| 13,706
|
C++
|
.h
| 385
| 31.303896
| 166
| 0.714016
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
6,104
|
image.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/image.h
|
#pragma once
#include "../VulkanAPI.h"
#include "../../Common/TextureUtils.h"
#include "commands.h"
#include "device.h"
#include "memory.h"
#include <stack>
//using enum rsx::format_class;
using namespace ::rsx::format_class_;
#define VK_DISABLE_COMPONENT_SWIZZLE 0
namespace vk
{
enum : u32// special remap_encoding enums
{
VK_REMAP_IDENTITY = 0xCAFEBABE, // Special view encoding to return an identity image view
VK_REMAP_VIEW_MULTISAMPLED = 0xDEADBEEF, // Special encoding for multisampled images; returns a multisampled image view
VK_IMAGE_CREATE_ALLOW_NULL_RPCS3 = 0x80000000, // Special flag that allows null images to be created if there is no memory
VK_IMAGE_CREATE_SHAREABLE_RPCS3 = 0x40000000, // Special flag to create a shareable image
VK_IMAGE_CREATE_SPECIAL_FLAGS_RPCS3 = (VK_IMAGE_CREATE_ALLOW_NULL_RPCS3 | VK_IMAGE_CREATE_SHAREABLE_RPCS3)
};
class image
{
std::stack<VkImageLayout> m_layout_stack;
VkImageAspectFlags m_storage_aspect = 0;
rsx::format_class m_format_class = RSX_FORMAT_CLASS_UNDEFINED;
void validate(const vk::render_device& dev, const VkImageCreateInfo& info) const;
protected:
image() = default;
void create_impl(const vk::render_device& dev, u32 access_flags, const memory_type_info& memory_type, vmm_allocation_pool allocation_pool);
public:
VkImage value = VK_NULL_HANDLE;
VkComponentMapping native_component_map = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A };
VkImageLayout current_layout = VK_IMAGE_LAYOUT_UNDEFINED;
u32 current_queue_family = VK_QUEUE_FAMILY_IGNORED;
VkImageCreateInfo info = {};
std::shared_ptr<vk::memory_block> memory;
image(const vk::render_device& dev,
const memory_type_info& memory_type,
u32 access_flags,
VkImageType image_type,
VkFormat format,
u32 width, u32 height, u32 depth,
u32 mipmaps, u32 layers,
VkSampleCountFlagBits samples,
VkImageLayout initial_layout,
VkImageTiling tiling,
VkImageUsageFlags usage,
VkImageCreateFlags image_flags,
vmm_allocation_pool allocation_pool,
rsx::format_class format_class = RSX_FORMAT_CLASS_UNDEFINED);
virtual ~image();
image(const image&) = delete;
image(image&&) = delete;
// Properties
u32 width() const;
u32 height() const;
u32 depth() const;
u32 mipmaps() const;
u32 layers() const;
u8 samples() const;
VkFormat format() const;
VkImageType type() const;
VkSharingMode sharing_mode() const;
VkImageAspectFlags aspect() const;
rsx::format_class format_class() const;
// Pipeline management
void push_layout(const command_buffer& cmd, VkImageLayout layout);
void push_barrier(const command_buffer& cmd, VkImageLayout layout);
void pop_layout(const command_buffer& cmd);
void change_layout(const command_buffer& cmd, VkImageLayout new_layout);
// Queue transfer
void queue_acquire(const command_buffer& cmd, VkImageLayout new_layout);
void queue_release(const command_buffer& src_queue_cmd, u32 dst_queue_family, VkImageLayout new_layout);
// Debug utils
void set_debug_name(const std::string& name);
protected:
VkDevice m_device;
};
struct image_view
{
VkImageView value = VK_NULL_HANDLE;
VkImageViewCreateInfo info = {};
image_view(VkDevice dev, VkImage image, VkImageViewType view_type, VkFormat format, VkComponentMapping mapping, VkImageSubresourceRange range);
image_view(VkDevice dev, VkImageViewCreateInfo create_info);
image_view(VkDevice dev, vk::image* resource,
VkImageViewType view_type = VK_IMAGE_VIEW_TYPE_MAX_ENUM,
const VkComponentMapping& mapping = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A },
const VkImageSubresourceRange& range = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 });
~image_view();
u32 encoded_component_map() const;
vk::image* image() const;
image_view(const image_view&) = delete;
image_view(image_view&&) = delete;
private:
VkDevice m_device;
vk::image* m_resource = nullptr;
void create_impl();
};
class viewable_image : public image
{
protected:
std::unordered_map<u64, std::unique_ptr<vk::image_view>> views;
viewable_image* clone();
public:
using image::image;
virtual image_view* get_view(const rsx::texture_channel_remap_t& remap,
VkImageAspectFlags mask = VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT);
void set_native_component_layout(VkComponentMapping new_layout);
};
}
| 4,638
|
C++
|
.h
| 110
| 37.827273
| 146
| 0.732527
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,105
|
garbage_collector.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/garbage_collector.h
|
#include <util/types.hpp>
#include <functional>
namespace vk
{
class disposable_t
{
void* ptr;
std::function<void(void*)> deleter;
disposable_t(void* ptr_, std::function<void(void*)> deleter_) :
ptr(ptr_), deleter(deleter_) {}
public:
disposable_t() = delete;
disposable_t(const disposable_t&) = delete;
disposable_t(disposable_t&& other) :
ptr(std::exchange(other.ptr, nullptr)),
deleter(other.deleter)
{}
~disposable_t()
{
if (ptr)
{
deleter(ptr);
ptr = nullptr;
}
}
template <typename T>
static disposable_t make(T* raw)
{
return disposable_t(raw, [](void* raw)
{
delete static_cast<T*>(raw);
});
}
};
struct garbage_collector
{
virtual void dispose(vk::disposable_t& object) = 0;
virtual void add_exit_callback(std::function<void()> callback) = 0;
template<typename T>
void dispose(std::unique_ptr<T>& object)
{
auto ptr = vk::disposable_t::make(object.release());
dispose(ptr);
}
};
garbage_collector* get_gc();
}
| 1,081
|
C++
|
.h
| 47
| 18.553191
| 70
| 0.633399
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,106
|
chip_class.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/chip_class.h
|
#pragma once
#include "util/types.hpp"
#include <unordered_map>
namespace vk
{
// Chip classes grouped by vendor in order of release
enum class chip_class
{
unknown,
// AMD
AMD_gcn_generic,
AMD_polaris,
AMD_vega,
AMD_navi1x,
AMD_navi2x,
AMD_navi3x,
_AMD_ENUM_MAX_, // Do not insert AMD enums beyond this point
// NVIDIA
NV_generic,
NV_kepler,
NV_maxwell,
NV_pascal,
NV_volta,
NV_turing,
NV_ampere,
NV_lovelace,
_NV_ENUM_MAX_, // Do not insert NV enums beyond this point
// APPLE
MVK_apple,
// INTEL
INTEL_generic,
INTEL_alchemist,
_INTEL_ENUM_MAX_, // Do not insert INTEL enums beyond this point
};
enum class driver_vendor
{
unknown,
AMD,
NVIDIA,
RADV,
INTEL,
ANV,
MVK,
DOZEN,
LAVAPIPE,
NVK,
V3DV
};
driver_vendor get_driver_vendor();
struct chip_family_table
{
chip_class default_ = chip_class::unknown;
std::unordered_map<u32, chip_class> lut;
void add(u32 first, u32 last, chip_class family);
void add(u32 id, chip_class family);
chip_class find(u32 device_id) const;
};
chip_class get_chip_family();
chip_class get_chip_family(u32 vendor_id, u32 device_id);
static inline bool is_NVIDIA(chip_class chip) { return chip >= chip_class::NV_generic && chip < chip_class::_NV_ENUM_MAX_; }
static inline bool is_AMD(chip_class chip) { return chip >= chip_class::AMD_gcn_generic && chip < chip_class::_AMD_ENUM_MAX_; }
static inline bool is_INTEL(chip_class chip) { return chip >= chip_class::INTEL_generic && chip < chip_class::_INTEL_ENUM_MAX_; }
static inline bool is_NVIDIA(driver_vendor vendor) { return vendor == driver_vendor::NVIDIA || vendor == driver_vendor::NVK; }
static inline bool is_AMD(driver_vendor vendor) { return vendor == driver_vendor::AMD || vendor == driver_vendor::RADV; }
static inline bool is_INTEL(driver_vendor vendor) { return vendor == driver_vendor::INTEL || vendor == driver_vendor::ANV; }
}
| 1,937
|
C++
|
.h
| 66
| 26.606061
| 130
| 0.709747
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,107
|
descriptors.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/descriptors.h
|
#pragma once
#include "../VulkanAPI.h"
#include "Utilities/mutex.h"
#include "commands.h"
#include "device.h"
#include "Emu/RSX/Common/simple_array.hpp"
namespace vk
{
struct gc_callback_t
{
std::function<void()> m_callback;
gc_callback_t(std::function<void()> callback)
: m_callback(callback)
{}
~gc_callback_t()
{
if (m_callback)
{
m_callback();
}
}
};
class descriptor_pool
{
public:
descriptor_pool() = default;
~descriptor_pool() = default;
void create(const vk::render_device& dev, const rsx::simple_array<VkDescriptorPoolSize>& pool_sizes, u32 max_sets = 1024);
void destroy();
VkDescriptorSet allocate(VkDescriptorSetLayout layout, VkBool32 use_cache = VK_TRUE);
operator VkDescriptorPool() { return m_current_pool_handle; }
FORCE_INLINE bool valid() const { return !m_device_subpools.empty(); }
FORCE_INLINE u32 max_sets() const { return m_create_info.maxSets; }
private:
FORCE_INLINE bool can_allocate(u32 required_count, u32 already_used_count = 0) const { return (required_count + already_used_count) <= m_create_info.maxSets; };
void reset(u32 subpool_id, VkDescriptorPoolResetFlags flags);
void next_subpool();
struct logical_subpool_t
{
VkDescriptorPool handle;
VkBool32 busy;
};
const vk::render_device* m_owner = nullptr;
VkDescriptorPoolCreateInfo m_create_info = {};
rsx::simple_array<VkDescriptorPoolSize> m_create_info_pool_sizes;
rsx::simple_array<logical_subpool_t> m_device_subpools;
VkDescriptorPool m_current_pool_handle = VK_NULL_HANDLE;
u32 m_current_subpool_index = umax;
u32 m_current_subpool_offset = 0;
shared_mutex m_subpool_lock;
static constexpr size_t max_cache_size = 64;
VkDescriptorSetLayout m_cached_layout = VK_NULL_HANDLE;
rsx::simple_array<VkDescriptorSet> m_descriptor_set_cache;
rsx::simple_array<VkDescriptorSetLayout> m_allocation_request_cache;
};
class descriptor_set
{
static constexpr size_t max_cache_size = 16384;
static constexpr size_t max_overflow_size = 64;
static constexpr size_t m_pool_size = max_cache_size + max_overflow_size;
void init(VkDescriptorSet new_set);
public:
descriptor_set(VkDescriptorSet set);
descriptor_set() = default;
~descriptor_set();
descriptor_set(const descriptor_set&) = delete;
void swap(descriptor_set& other);
descriptor_set& operator = (VkDescriptorSet set);
VkDescriptorSet* ptr();
VkDescriptorSet value() const;
void push(const VkBufferView& buffer_view, VkDescriptorType type, u32 binding);
void push(const VkDescriptorBufferInfo& buffer_info, VkDescriptorType type, u32 binding);
void push(const VkDescriptorImageInfo& image_info, VkDescriptorType type, u32 binding);
void push(const VkDescriptorImageInfo* image_info, u32 count, VkDescriptorType type, u32 binding);
void push(rsx::simple_array<VkCopyDescriptorSet>& copy_cmd, u32 type_mask = umax);
void bind(const vk::command_buffer& cmd, VkPipelineBindPoint bind_point, VkPipelineLayout layout);
void flush();
private:
VkDescriptorSet m_handle = VK_NULL_HANDLE;
u64 m_update_after_bind_mask = 0;
u64 m_push_type_mask = 0;
bool m_in_use = false;
rsx::simple_array<VkBufferView> m_buffer_view_pool;
rsx::simple_array<VkDescriptorBufferInfo> m_buffer_info_pool;
rsx::simple_array<VkDescriptorImageInfo> m_image_info_pool;
#ifdef __clang__
// Clang (pre 16.x) does not support LWG 2089, std::construct_at for POD types
struct WriteDescriptorSetT : public VkWriteDescriptorSet
{
WriteDescriptorSetT(
VkStructureType sType,
const void* pNext,
VkDescriptorSet dstSet,
uint32_t dstBinding,
uint32_t dstArrayElement,
uint32_t descriptorCount,
VkDescriptorType descriptorType,
const VkDescriptorImageInfo* pImageInfo,
const VkDescriptorBufferInfo* pBufferInfo,
const VkBufferView* pTexelBufferView)
{
this->sType = sType,
this->pNext = pNext,
this->dstSet = dstSet,
this->dstBinding = dstBinding,
this->dstArrayElement = dstArrayElement,
this->descriptorCount = descriptorCount,
this->descriptorType = descriptorType,
this->pImageInfo = pImageInfo,
this->pBufferInfo = pBufferInfo,
this->pTexelBufferView = pTexelBufferView;
}
};
#else
using WriteDescriptorSetT = VkWriteDescriptorSet;
#endif
rsx::simple_array<WriteDescriptorSetT> m_pending_writes;
rsx::simple_array<VkCopyDescriptorSet> m_pending_copies;
};
namespace descriptors
{
void init();
void flush();
VkDescriptorSetLayout create_layout(const rsx::simple_array<VkDescriptorSetLayoutBinding>& bindings);
}
}
| 4,760
|
C++
|
.h
| 126
| 34.452381
| 162
| 0.728714
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,108
|
shared.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/shared.h
|
#pragma once
#include "../VulkanAPI.h"
#include <string>
namespace vk
{
#define CHECK_RESULT(expr) { VkResult _res = (expr); if (_res != VK_SUCCESS) vk::die_with_error(_res); }
#define CHECK_RESULT_EX(expr, msg) { VkResult _res = (expr); if (_res != VK_SUCCESS) vk::die_with_error(_res, msg); }
void die_with_error(VkResult error_code, std::string message = {}, std::source_location src_loc = std::source_location::current());
VKAPI_ATTR VkBool32 VKAPI_CALL dbgFunc(VkFlags msgFlags, VkDebugReportObjectTypeEXT objType,
u64 srcObject, usz location, s32 msgCode,
const char *pLayerPrefix, const char *pMsg, void *pUserData);
VkBool32 BreakCallback(VkFlags msgFlags, VkDebugReportObjectTypeEXT objType,
u64 srcObject, usz location, s32 msgCode,
const char* pLayerPrefix, const char* pMsg,
void* pUserData);
}
| 901
|
C++
|
.h
| 16
| 49.4375
| 132
| 0.680682
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,109
|
buffer_object.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/buffer_object.h
|
#pragma once
#include "../VulkanAPI.h"
#include "device.h"
#include "memory.h"
namespace vk
{
struct buffer_view
{
VkBufferView value;
VkBufferViewCreateInfo info = {};
buffer_view(VkDevice dev, VkBuffer buffer, VkFormat format, VkDeviceSize offset, VkDeviceSize size);
~buffer_view();
buffer_view(const buffer_view&) = delete;
buffer_view(buffer_view&&) = delete;
bool in_range(u32 address, u32 size, u32& offset) const;
private:
VkDevice m_device;
};
struct buffer
{
VkBuffer value;
VkBufferCreateInfo info = {};
std::unique_ptr<vk::memory_block> memory;
buffer(const vk::render_device& dev, u64 size, const memory_type_info& memory_type, u32 access_flags, VkBufferUsageFlags usage, VkBufferCreateFlags flags, vmm_allocation_pool allocation_pool);
buffer(const vk::render_device& dev, VkBufferUsageFlags usage, void* host_pointer, u64 size);
~buffer();
void* map(u64 offset, u64 size);
void unmap();
u32 size() const;
buffer(const buffer&) = delete;
buffer(buffer&&) = delete;
private:
VkDevice m_device;
};
}
| 1,076
|
C++
|
.h
| 35
| 28.057143
| 194
| 0.736893
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,110
|
barriers.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/barriers.h
|
#pragma once
#include "../VulkanAPI.h"
namespace vk
{
class image;
class command_buffer;
//Texture barrier applies to a texture to ensure writes to it are finished before any reads are attempted to avoid RAW hazards
void insert_texture_barrier(const vk::command_buffer& cmd, VkImage image, VkImageLayout current_layout, VkImageLayout new_layout, VkImageSubresourceRange range, bool preserve_renderpass = false);
void insert_texture_barrier(const vk::command_buffer& cmd, vk::image* image, VkImageLayout new_layout, bool preserve_renderpass = false);
void insert_buffer_memory_barrier(const vk::command_buffer& cmd, VkBuffer buffer, VkDeviceSize offset, VkDeviceSize length,
VkPipelineStageFlags src_stage, VkPipelineStageFlags dst_stage, VkAccessFlags src_mask, VkAccessFlags dst_mask,
bool preserve_renderpass = false);
void insert_image_memory_barrier(const vk::command_buffer& cmd, VkImage image, VkImageLayout current_layout, VkImageLayout new_layout,
VkPipelineStageFlags src_stage, VkPipelineStageFlags dst_stage, VkAccessFlags src_mask, VkAccessFlags dst_mask,
const VkImageSubresourceRange& range, bool preserve_renderpass = false);
void insert_global_memory_barrier(const vk::command_buffer& cmd,
VkPipelineStageFlags src_stage = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
VkPipelineStageFlags dst_stage = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
VkAccessFlags src_access = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT,
VkAccessFlags dst_access = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT,
bool preserve_renderpass = false);
}
| 1,607
|
C++
|
.h
| 22
| 69.318182
| 197
| 0.797203
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,111
|
image_helpers.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/image_helpers.h
|
#pragma once
#include "../VulkanAPI.h"
namespace rsx
{
struct texture_channel_remap_t;
}
namespace vk
{
class image;
class command_buffer;
extern VkComponentMapping default_component_map;
VkImageAspectFlags get_aspect_flags(VkFormat format);
VkComponentMapping apply_swizzle_remap(const std::array<VkComponentSwizzle, 4>& base_remap, const rsx::texture_channel_remap_t& remap_vector);
void change_image_layout(const vk::command_buffer& cmd, VkImage image, VkImageLayout current_layout, VkImageLayout new_layout, const VkImageSubresourceRange& range,
u32 src_queue_family = VK_QUEUE_FAMILY_IGNORED, u32 dst_queue_family = VK_QUEUE_FAMILY_IGNORED,
u32 src_access_mask_bits = 0xFFFFFFFF, u32 dst_access_mask_bits = 0xFFFFFFFF);
void change_image_layout(const vk::command_buffer& cmd, vk::image* image, VkImageLayout new_layout, const VkImageSubresourceRange& range);
void change_image_layout(const vk::command_buffer& cmd, vk::image* image, VkImageLayout new_layout);
}
| 1,011
|
C++
|
.h
| 19
| 49.894737
| 166
| 0.783673
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,112
|
RSXVertexProgram.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Program/RSXVertexProgram.h
|
#pragma once
#include "program_util.h"
#include <vector>
#include <bitset>
#include <set>
enum vp_reg_type
{
RSX_VP_REGISTER_TYPE_TEMP = 1,
RSX_VP_REGISTER_TYPE_INPUT = 2,
RSX_VP_REGISTER_TYPE_CONSTANT = 3,
};
enum sca_opcode
{
RSX_SCA_OPCODE_NOP = 0x00, // No-Operation
RSX_SCA_OPCODE_MOV = 0x01, // Move (copy)
RSX_SCA_OPCODE_RCP = 0x02, // Reciprocal
RSX_SCA_OPCODE_RCC = 0x03, // Reciprocal clamped
RSX_SCA_OPCODE_RSQ = 0x04, // Reciprocal square root
RSX_SCA_OPCODE_EXP = 0x05, // Exponential base 2 (low-precision)
RSX_SCA_OPCODE_LOG = 0x06, // Logarithm base 2 (low-precision)
RSX_SCA_OPCODE_LIT = 0x07, // Lighting calculation
RSX_SCA_OPCODE_BRA = 0x08, // Branch
RSX_SCA_OPCODE_BRI = 0x09, // Branch by CC register
RSX_SCA_OPCODE_CAL = 0x0a, // Subroutine call
RSX_SCA_OPCODE_CLI = 0x0b, // Subroutine call by CC register
RSX_SCA_OPCODE_RET = 0x0c, // Return from subroutine
RSX_SCA_OPCODE_LG2 = 0x0d, // Logarithm base 2
RSX_SCA_OPCODE_EX2 = 0x0e, // Exponential base 2
RSX_SCA_OPCODE_SIN = 0x0f, // Sine function
RSX_SCA_OPCODE_COS = 0x10, // Cosine function
RSX_SCA_OPCODE_BRB = 0x11, // Branch by Boolean constant
RSX_SCA_OPCODE_CLB = 0x12, // Subroutine call by Boolean constant
RSX_SCA_OPCODE_PSH = 0x13, // Push onto stack
RSX_SCA_OPCODE_POP = 0x14, // Pop from stack
};
enum vec_opcode
{
RSX_VEC_OPCODE_NOP = 0x00, // No-Operation
RSX_VEC_OPCODE_MOV = 0x01, // Move
RSX_VEC_OPCODE_MUL = 0x02, // Multiply
RSX_VEC_OPCODE_ADD = 0x03, // Addition
RSX_VEC_OPCODE_MAD = 0x04, // Multiply-Add
RSX_VEC_OPCODE_DP3 = 0x05, // 3-component Dot Product
RSX_VEC_OPCODE_DPH = 0x06, // Homogeneous Dot Product
RSX_VEC_OPCODE_DP4 = 0x07, // 4-component Dot Product
RSX_VEC_OPCODE_DST = 0x08, // Calculate distance vector
RSX_VEC_OPCODE_MIN = 0x09, // Minimum
RSX_VEC_OPCODE_MAX = 0x0a, // Maximum
RSX_VEC_OPCODE_SLT = 0x0b, // Set-If-LessThan
RSX_VEC_OPCODE_SGE = 0x0c, // Set-If-GreaterEqual
RSX_VEC_OPCODE_ARL = 0x0d, // Load to address register (round down)
RSX_VEC_OPCODE_FRC = 0x0e, // Extract fractional part (fraction)
RSX_VEC_OPCODE_FLR = 0x0f, // Round down (floor)
RSX_VEC_OPCODE_SEQ = 0x10, // Set-If-Equal
RSX_VEC_OPCODE_SFL = 0x11, // Set-If-False
RSX_VEC_OPCODE_SGT = 0x12, // Set-If-GreaterThan
RSX_VEC_OPCODE_SLE = 0x13, // Set-If-LessEqual
RSX_VEC_OPCODE_SNE = 0x14, // Set-If-NotEqual
RSX_VEC_OPCODE_STR = 0x15, // Set-If-True
RSX_VEC_OPCODE_SSG = 0x16, // Convert positive values to 1 and negative values to -1
RSX_VEC_OPCODE_TXL = 0x19, // Texture fetch
};
union D0
{
u32 HEX;
struct
{
u32 addr_swz : 2;
u32 mask_w : 2;
u32 mask_z : 2;
u32 mask_y : 2;
u32 mask_x : 2;
u32 cond : 3;
u32 cond_test_enable : 1;
u32 cond_update_enable_0 : 1;
u32 dst_tmp : 6;
u32 src0_abs : 1;
u32 src1_abs : 1;
u32 src2_abs : 1;
u32 addr_reg_sel_1 : 1;
u32 cond_reg_sel_1 : 1;
u32 staturate : 1;
u32 index_input : 1;
u32 : 1;
u32 cond_update_enable_1 : 1;
u32 vec_result : 1;
u32 : 1;
};
struct
{
u32 : 23;
u32 iaddrh2 : 1;
u32 : 8;
};
};
union D1
{
u32 HEX;
struct
{
u32 src0h : 8;
u32 input_src : 4;
u32 const_src : 10;
u32 vec_opcode : 5;
u32 sca_opcode : 5;
};
};
union D2
{
u32 HEX;
struct
{
u32 src2h : 6;
u32 src1 : 17;
u32 src0l : 9;
};
struct
{
u32 iaddrh : 6;
u32 : 26;
};
struct
{
u32 : 8;
u32 tex_num : 2; // Actual field may be 4 bits wide, but we only have 4 TIUs
u32 : 22;
};
};
union D3
{
u32 HEX;
struct
{
u32 end : 1;
u32 index_const : 1;
u32 dst : 5;
u32 sca_dst_tmp : 6;
u32 vec_writemask_w : 1;
u32 vec_writemask_z : 1;
u32 vec_writemask_y : 1;
u32 vec_writemask_x : 1;
u32 sca_writemask_w : 1;
u32 sca_writemask_z : 1;
u32 sca_writemask_y : 1;
u32 sca_writemask_x : 1;
u32 src2l : 11;
};
struct
{
u32 : 23;
u32 branch_index : 5; //Index into transform_program_branch_bits [x]
u32 brb_cond_true : 1; //If set, branch is taken if (b[x]) else if (!b[x])
u32 iaddrl : 3;
};
};
union SRC
{
union
{
u32 HEX;
struct
{
u32 src0l : 9;
u32 src0h : 8;
};
struct
{
u32 src1 : 17;
};
struct
{
u32 src2l : 11;
u32 src2h : 6;
};
};
struct
{
u32 reg_type : 2;
u32 tmp_src : 6;
u32 swz_w : 2;
u32 swz_z : 2;
u32 swz_y : 2;
u32 swz_x : 2;
u32 neg : 1;
};
};
static const std::string rsx_vp_sca_op_names[] =
{
"NOP", "MOV", "RCP", "RCC", "RSQ", "EXP", "LOG",
"LIT", "BRA", "BRI", "CAL", "CLI", "RET", "LG2",
"EX2", "SIN", "COS", "BRB", "CLB", "PSH", "POP"
};
static const std::string rsx_vp_vec_op_names[] =
{
"NOP", "MOV", "MUL", "ADD", "MAD", "DP3", "DPH", "DP4",
"DST", "MIN", "MAX", "SLT", "SGE", "ARL", "FRC", "FLR",
"SEQ", "SFL", "SGT", "SLE", "SNE", "STR", "SSG", "NULL", "NULL", "TXL"
};
struct RSXVertexProgram
{
std::vector<u32> data;
rsx::vertex_program_texture_state texture_state;
u32 ctrl;
u32 output_mask;
u32 base_address;
u32 entry;
std::bitset<rsx::max_vertex_program_instructions> instruction_mask;
std::set<u32> jump_table;
rsx::texture_dimension_extended get_texture_dimension(u8 id) const
{
return rsx::texture_dimension_extended{static_cast<u8>((texture_state.texture_dimensions >> (id * 2)) & 0x3)};
}
};
| 5,570
|
C++
|
.h
| 213
| 23.835681
| 112
| 0.612601
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,113
|
CgBinaryProgram.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Program/CgBinaryProgram.h
|
#pragma once
#include "Emu/Memory/vm.h"
#include "Emu/RSX/GL/GLVertexProgram.h"
#include "Emu/RSX/GL/GLFragmentProgram.h"
#include "Emu/RSX/Program/ProgramStateCache.h"
#include "Utilities/File.h"
using CGprofile = u32;
using CGbool = s32;
using CGresource = u32;
using CGenum = u32;
using CGtype = u32;
using CGbitfield = u32;
using CGbitfield16 = u16;
using CGint = s32;
using CGuint = u32;
using CgBinaryOffset = CGuint;
using CgBinarySize = CgBinaryOffset;
using CgBinaryEmbeddedConstantOffset = CgBinaryOffset;
using CgBinaryFloatOffset = CgBinaryOffset;
using CgBinaryStringOffset = CgBinaryOffset;
using CgBinaryParameterOffset = CgBinaryOffset;
using CgBinaryParameter = struct CgBinaryParameter;
using CgBinaryEmbeddedConstant = struct CgBinaryEmbeddedConstant;
using CgBinaryVertexProgram = struct CgBinaryVertexProgram;
using CgBinaryFragmentProgram = struct CgBinaryFragmentProgram;
using CgBinaryProgram = struct CgBinaryProgram;
// fragment programs have their constants embedded in the microcode
struct CgBinaryEmbeddedConstant
{
be_t<u32> ucodeCount; // occurrences
be_t<u32> ucodeOffset[1]; // offsets that need to be patched follow
};
// describe a binary program parameter (CgParameter is opaque)
struct CgBinaryParameter
{
CGtype type; // cgGetParameterType()
CGresource res; // cgGetParameterResource()
CGenum var; // cgGetParameterVariability()
CGint resIndex; // cgGetParameterResourceIndex()
CgBinaryStringOffset name; // cgGetParameterName()
CgBinaryFloatOffset defaultValue; // default constant value
CgBinaryEmbeddedConstantOffset embeddedConst; // embedded constant information
CgBinaryStringOffset semantic; // cgGetParameterSemantic()
CGenum direction; // cgGetParameterDirection()
CGint paramno; // 0..n: cgGetParameterIndex() -1: globals
CGbool isReferenced; // cgIsParameterReferenced()
CGbool isShared; // cgIsParameterShared()
};
// attributes needed for vshaders
struct CgBinaryVertexProgram
{
CgBinarySize instructionCount; // #instructions
CgBinarySize instructionSlot; // load address (indexed reads!)
CgBinarySize registerCount; // R registers count
CGbitfield attributeInputMask; // attributes vs reads from
CGbitfield attributeOutputMask; // attributes vs writes (uses SET_VERTEX_ATTRIB_OUTPUT_MASK bits)
CGbitfield userClipMask; // user clip plane enables (for SET_USER_CLIP_PLANE_CONTROL)
};
typedef enum
{
CgBinaryPTTNone = 0,
CgBinaryPTT2x16 = 1,
CgBinaryPTT1x32 = 2
} CgBinaryPartialTexType;
// attributes needed for pshaders
struct CgBinaryFragmentProgram
{
CgBinarySize instructionCount; // #instructions
CGbitfield attributeInputMask; // attributes fp reads (uses SET_VERTEX_ATTRIB_OUTPUT_MASK bits)
CGbitfield partialTexType; // texid 0..15 use two bits each marking whether the texture format requires partial load: see CgBinaryPartialTexType
CGbitfield16 texCoordsInputMask; // tex coords used by frag prog. (tex<n> is bit n)
CGbitfield16 texCoords2D; // tex coords that are 2d (tex<n> is bit n)
CGbitfield16 texCoordsCentroid; // tex coords that are centroid (tex<n> is bit n)
u8 registerCount; // R registers count
u8 outputFromH0; // final color from R0 or H0
u8 depthReplace; // fp generated z depth value
u8 pixelKill; // fp uses kill operations
};
struct CgBinaryProgram
{
// vertex/pixel shader identification (BE/LE as well)
CGprofile profile;
// binary revision (used to verify binary and driver structs match)
CgBinarySize binaryFormatRevision;
// total size of this struct including profile and totalSize field
CgBinarySize totalSize;
// parameter usually queried using cgGet[First/Next]LeafParameter
CgBinarySize parameterCount;
CgBinaryParameterOffset parameterArray;
// depending on profile points to a CgBinaryVertexProgram or CgBinaryFragmentProgram struct
CgBinaryOffset program;
// raw ucode data
CgBinarySize ucodeSize;
CgBinaryOffset ucode;
// variable length data follows
u8 data[1];
};
class CgBinaryDisasm
{
OPDEST dst;
SRC0 src0;
SRC1 src1;
SRC2 src2;
D0 d0;
D1 d1;
D2 d2;
D3 d3;
SRC src[3];
std::string m_path; // used for FP decompiler thread, delete this later
u8* m_buffer = nullptr;
usz m_buffer_size = 0;
std::string m_arb_shader;
std::string m_glsl_shader;
std::string m_dst_reg_name;
// FP members
u32 m_offset = 0;
u32 m_opcode = 0;
u32 m_step = 0;
u32 m_size = 0;
std::vector<u32> m_end_offsets;
std::vector<u32> m_else_offsets;
std::vector<u32> m_loop_end_offsets;
// VP members
u32 m_sca_opcode;
u32 m_vec_opcode;
static const usz m_max_instr_count = 512;
usz m_instr_count;
std::vector<u32> m_data;
public:
std::string GetArbShader() const { return m_arb_shader; }
std::string GetGlslShader() const { return m_glsl_shader; }
// FP functions
std::string GetMask() const;
void AddCodeAsm(const std::string& code);
std::string AddRegDisAsm(u32 index, int fp16) const;
std::string AddConstDisAsm();
std::string AddTexDisAsm() const;
std::string FormatDisAsm(const std::string& code);
std::string GetCondDisAsm() const;
template<typename T> std::string GetSrcDisAsm(T src);
// VP functions
std::string GetMaskDisasm(bool is_sca) const;
std::string GetVecMaskDisasm() const;
std::string GetScaMaskDisasm() const;
std::string GetDSTDisasm(bool is_sca = false) const;
std::string GetSRCDisasm(u32 n) const;
static std::string GetTexDisasm();
std::string GetCondDisasm() const;
std::string AddAddrMaskDisasm() const;
std::string AddAddrRegDisasm() const;
u32 GetAddrDisasm() const;
std::string FormatDisasm(const std::string& code) const;
void AddScaCodeDisasm(const std::string& code = "");
void AddVecCodeDisasm(const std::string& code = "");
void AddCodeCondDisasm(const std::string& dst, const std::string& src);
void AddCodeDisasm(const std::string& code);
void SetDSTDisasm(bool is_sca, const std::string& value);
void SetDSTVecDisasm(const std::string& code);
void SetDSTScaDisasm(const std::string& code);
CgBinaryDisasm(const std::string& path)
: m_path(path)
{
fs::file f(path);
if (!f) return;
m_buffer_size = f.size();
m_buffer = new u8[m_buffer_size];
f.read(m_buffer, m_buffer_size);
fmt::append(m_arb_shader, "Loading... [%s]\n", path.c_str());
}
~CgBinaryDisasm()
{
delete[] m_buffer;
}
static std::string GetCgParamType(u32 type)
{
switch (type)
{
case 1045: return "float";
case 1046:
case 1047:
case 1048: return fmt::format("float%d", type - 1044);
case 1064: return "float4x4";
case 1066: return "sampler2D";
case 1069: return "samplerCUBE";
case 1091: return "float1";
default: return fmt::format("!UnkCgType(%d)", type);
}
}
std::string GetCgParamName(u32 offset) const
{
return std::string(reinterpret_cast<char*>(&m_buffer[offset]));
}
std::string GetCgParamRes(u32 /*offset*/) const
{
// rsx_log.warning("GetCgParamRes offset 0x%x", offset);
// TODO
return "";
}
std::string GetCgParamSemantic(u32 offset) const
{
return std::string(reinterpret_cast<char*>(&m_buffer[offset]));
}
std::string GetCgParamValue(u32 offset, u32 end_offset) const
{
std::string offsets = "offsets:";
u32 num = 0;
offset += 6;
while (offset < end_offset)
{
fmt::append(offsets, " %d,", m_buffer[offset] << 8 | m_buffer[offset + 1]);
offset += 4;
num++;
}
if (num > 4)
return "";
offsets.pop_back();
return fmt::format("num %d ", num) + offsets;
}
template<typename T>
T& GetCgRef(const u32 offset)
{
return reinterpret_cast<T&>(m_buffer[offset]);
}
void ConvertToLE(CgBinaryProgram& prog)
{
// BE payload, requires that data be swapped
const auto be_profile = prog.profile;
auto swap_be32 = [&](u32 start_offset, size_t size_bytes)
{
auto start = reinterpret_cast<u32*>(m_buffer + start_offset);
auto end = reinterpret_cast<u32*>(m_buffer + start_offset + size_bytes);
for (auto data = start; data < end; ++data)
{
*data = std::bit_cast<be_t<u32>>(*data);
}
};
// 1. Swap the header
swap_be32(0, sizeof(CgBinaryProgram));
// 2. Swap parameters
swap_be32(prog.parameterArray, sizeof(CgBinaryParameter) * prog.parameterCount);
// 3. Swap the ucode
swap_be32(prog.ucode, m_buffer_size - prog.ucode);
// 4. Swap the domain header
if (be_profile == 7004u)
{
// Need to swap each field individually
auto& fprog = GetCgRef<CgBinaryFragmentProgram>(prog.program);
fprog.instructionCount = std::bit_cast<be_t<u32>>(fprog.instructionCount);
fprog.attributeInputMask = std::bit_cast<be_t<u32>>(fprog.attributeInputMask);
fprog.partialTexType = std::bit_cast<be_t<u32>>(fprog.partialTexType);
fprog.texCoordsInputMask = std::bit_cast<be_t<u16>>(fprog.texCoordsInputMask);
fprog.texCoords2D = std::bit_cast<be_t<u16>>(fprog.texCoords2D);
fprog.texCoordsCentroid = std::bit_cast<be_t<u16>>(fprog.texCoordsCentroid);
}
else
{
// Swap entire header block as all fields are u32
swap_be32(prog.program, sizeof(CgBinaryVertexProgram));
}
}
void BuildShaderBody()
{
ParamArray param_array;
auto& prog = GetCgRef<CgBinaryProgram>(0);
if (const u32 be_profile = std::bit_cast<be_t<u32>>(prog.profile);
be_profile == 7003u || be_profile == 7004u)
{
ConvertToLE(prog);
ensure(be_profile == prog.profile);
}
if (prog.profile == 7004u)
{
auto& fprog = GetCgRef<CgBinaryFragmentProgram>(prog.program);
m_arb_shader += "\n";
fmt::append(m_arb_shader, "# binaryFormatRevision 0x%x\n", prog.binaryFormatRevision);
fmt::append(m_arb_shader, "# profile sce_fp_rsx\n");
fmt::append(m_arb_shader, "# parameterCount %d\n", prog.parameterCount);
fmt::append(m_arb_shader, "# instructionCount %d\n", fprog.instructionCount);
fmt::append(m_arb_shader, "# attributeInputMask 0x%x\n", fprog.attributeInputMask);
fmt::append(m_arb_shader, "# registerCount %d\n\n", fprog.registerCount);
CgBinaryParameterOffset offset = prog.parameterArray;
for (u32 i = 0; i < prog.parameterCount; i++)
{
auto& fparam = GetCgRef<CgBinaryParameter>(offset);
std::string param_type = GetCgParamType(fparam.type) + " ";
std::string param_name = GetCgParamName(fparam.name) + " ";
std::string param_res = GetCgParamRes(fparam.res) + " ";
std::string param_semantic = GetCgParamSemantic(fparam.semantic) + " ";
std::string param_const = GetCgParamValue(fparam.embeddedConst, fparam.name);
fmt::append(m_arb_shader, "#%d%s%s%s%s\n", i, param_type, param_name, param_semantic, param_const);
offset += u32{sizeof(CgBinaryParameter)};
}
m_arb_shader += "\n";
m_offset = prog.ucode;
TaskFP();
u32 unused;
std::vector<u32> be_data;
// Swap bytes. FP decompiler expects input in BE
for (u32* ptr = reinterpret_cast<u32*>(m_buffer + m_offset),
*end = reinterpret_cast<u32*>(m_buffer + m_buffer_size);
ptr < end; ++ptr)
{
be_data.push_back(std::bit_cast<be_t<u32>>(*ptr));
}
RSXFragmentProgram prog;
auto metadata = program_hash_util::fragment_program_utils::analyse_fragment_program(be_data.data());
prog.ctrl = (fprog.outputFromH0 ? 0 : 0x40) | (fprog.depthReplace ? 0xe : 0);
prog.offset = metadata.program_start_offset;
prog.ucode_length = metadata.program_ucode_length;
prog.total_length = metadata.program_ucode_length + metadata.program_start_offset;
prog.data = reinterpret_cast<u8*>(be_data.data()) + metadata.program_start_offset;
for (u32 i = 0; i < 16; ++i) prog.texture_state.set_dimension(rsx::texture_dimension_extended::texture_dimension_2d, i);
GLFragmentDecompilerThread(m_glsl_shader, param_array, prog, unused).Task();
}
else
{
const auto& vprog = GetCgRef<CgBinaryVertexProgram>(prog.program);
m_arb_shader += "\n";
fmt::append(m_arb_shader, "# binaryFormatRevision 0x%x\n", prog.binaryFormatRevision);
fmt::append(m_arb_shader, "# profile sce_vp_rsx\n");
fmt::append(m_arb_shader, "# parameterCount %d\n", prog.parameterCount);
fmt::append(m_arb_shader, "# instructionCount %d\n", vprog.instructionCount);
fmt::append(m_arb_shader, "# registerCount %d\n", vprog.registerCount);
fmt::append(m_arb_shader, "# attributeInputMask 0x%x\n", vprog.attributeInputMask);
fmt::append(m_arb_shader, "# attributeOutputMask 0x%x\n\n", vprog.attributeOutputMask);
CgBinaryParameterOffset offset = prog.parameterArray;
for (u32 i = 0; i < prog.parameterCount; i++)
{
auto& vparam = GetCgRef<CgBinaryParameter>(offset);
std::string param_type = GetCgParamType(vparam.type) + " ";
std::string param_name = GetCgParamName(vparam.name) + " ";
std::string param_res = GetCgParamRes(vparam.res) + " ";
std::string param_semantic = GetCgParamSemantic(vparam.semantic) + " ";
std::string param_const = GetCgParamValue(vparam.embeddedConst, vparam.name);
fmt::append(m_arb_shader, "#%d%s%s%s%s\n", i, param_type, param_name, param_semantic, param_const);
offset += u32{sizeof(CgBinaryParameter)};
}
m_arb_shader += "\n";
m_offset = prog.ucode;
ensure((m_buffer_size - m_offset) % sizeof(u32) == 0);
u32* vdata = reinterpret_cast<u32*>(&m_buffer[m_offset]);
m_data.resize(prog.ucodeSize / sizeof(u32));
std::memcpy(m_data.data(), vdata, prog.ucodeSize);
TaskVP();
RSXVertexProgram prog;
program_hash_util::vertex_program_utils::analyse_vertex_program(vdata, 0, prog);
for (u32 i = 0; i < 4; ++i) prog.texture_state.set_dimension(rsx::texture_dimension_extended::texture_dimension_2d, i);
GLVertexDecompilerThread(prog, m_glsl_shader, param_array).Task();
}
}
static u32 GetData(const u32 d) { return d << 16 | d >> 16; }
void TaskFP();
void TaskVP();
};
| 14,110
|
C++
|
.h
| 354
| 37.036723
| 156
| 0.707868
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,114
|
VertexProgramDecompiler.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Program/VertexProgramDecompiler.h
|
#pragma once
#include "RSXVertexProgram.h"
#include <vector>
#include <stack>
#include "ShaderParam.h"
/**
* This class is used to translate RSX Vertex program to GLSL/HLSL code
* Backend with text based shader can subclass this class and implement :
* - virtual std::string getFloatTypeName(usz elementCount) = 0;
* - virtual std::string getFunction(enum class FUNCTION) = 0;
* - virtual std::string compareFunction(enum class COMPARE, const std::string &, const std::string &) = 0;
* - virtual void insertHeader(std::stringstream &OS) = 0;
* - virtual void insertInputs(std::stringstream &OS) = 0;
* - virtual void insertOutputs(std::stringstream &OS) = 0;
* - virtual void insertConstants(std::stringstream &OS) = 0;
* - virtual void insertMainStart(std::stringstream &OS) = 0;
* - virtual void insertMainEnd(std::stringstream &OS) = 0;
*/
struct VertexProgramDecompiler
{
D0 d0;
D1 d1;
D2 d2;
D3 d3;
SRC src[3];
enum
{
lt = 0x1,
eq = 0x2,
gt = 0x4,
};
struct FuncInfo
{
u32 offset;
std::string name;
};
struct Instruction
{
std::vector<std::string> body;
int open_scopes;
int close_scopes;
int put_close_scopes;
int do_count;
void reset()
{
body.clear();
put_close_scopes = open_scopes = close_scopes = do_count = 0;
}
};
Instruction m_instructions[rsx::max_vertex_program_instructions];
Instruction* m_cur_instr;
usz m_instr_count;
std::vector<std::string> m_body;
std::stack<u32> m_call_stack;
const RSXVertexProgram& m_prog;
ParamArray m_parr;
std::set<u16> m_constant_ids;
static std::string NotZeroPositive(const std::string& code);
std::string GetMask(bool is_sca) const;
std::string GetVecMask();
std::string GetScaMask();
std::string GetDST(bool is_sca = false);
std::string GetSRC(u32 n);
std::string GetTex();
std::string GetRawCond();
std::string GetCond();
std::string GetOptionalBranchCond() const; //Conditional branch expression modified externally at runtime
std::string AddAddrReg();
std::string AddCondReg();
u32 GetAddr() const;
std::string Format(const std::string& code);
void AddCodeCond(const std::string& lhs, const std::string& rhs);
void AddCode(const std::string& code);
void SetDST(bool is_sca, std::string value);
void SetDSTVec(const std::string& code);
void SetDSTSca(const std::string& code);
std::string BuildCode();
protected:
/** returns the type name of float vectors.
*/
virtual std::string getFloatTypeName(usz elementCount) = 0;
/** returns the type name of int vectors.
*/
virtual std::string getIntTypeName(usz elementCount) = 0;
/** returns string calling function where arguments are passed via
* $0 $1 $2 substring.
*/
virtual std::string getFunction(FUNCTION) = 0;
/** returns string calling comparison function on 2 args passed as strings.
*/
virtual std::string compareFunction(COMPARE, const std::string &, const std::string &, bool scalar = false) = 0;
/** Insert header of shader file (eg #version, "system constants"...)
*/
virtual void insertHeader(std::stringstream &OS) = 0;
/** Insert vertex declaration.
*/
virtual void insertInputs(std::stringstream &OS, const std::vector<ParamType> &inputs) = 0;
/** insert global declaration of vertex shader outputs.
*/
virtual void insertConstants(std::stringstream &OS, const std::vector<ParamType> &constants) = 0;
/** insert declaration of shader constants.
*/
virtual void insertOutputs(std::stringstream &OS, const std::vector<ParamType> &outputs) = 0;
/** insert beginning of main (signature, temporary declaration...)
*/
virtual void insertMainStart(std::stringstream &OS) = 0;
/** insert end of main function (return value, output copy...)
*/
virtual void insertMainEnd(std::stringstream &OS) = 0;
public:
struct
{
bool has_lit_op = false;
bool has_indexed_constants = false;
}
properties;
VertexProgramDecompiler(const RSXVertexProgram& prog);
std::string Decompile();
};
| 3,918
|
C++
|
.h
| 119
| 30.764706
| 113
| 0.736494
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,115
|
FragmentProgramDecompiler.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Program/FragmentProgramDecompiler.h
|
#pragma once
#include "ShaderParam.h"
#include "RSXFragmentProgram.h"
#include <sstream>
// Helper for GPR occupancy tracking
struct temp_register
{
bool aliased_r0 = false;
bool aliased_h0 = false;
bool aliased_h1 = false;
bool last_write_half[4] = { false, false, false, false };
u32 real_index = -1;
u32 h0_writes = 0u; // Number of writes to the first 64-bits of the register
u32 h1_writes = 0u; // Number of writes to the last 64-bits of the register
void tag(u32 index, bool half_register, bool x, bool y, bool z, bool w)
{
if (half_register)
{
if (index & 1)
{
if (x) last_write_half[2] = true;
if (y) last_write_half[2] = true;
if (z) last_write_half[3] = true;
if (w) last_write_half[3] = true;
aliased_h1 = true;
h1_writes++;
}
else
{
if (x) last_write_half[0] = true;
if (y) last_write_half[0] = true;
if (z) last_write_half[1] = true;
if (w) last_write_half[1] = true;
aliased_h0 = true;
h0_writes++;
}
}
else
{
if (x) last_write_half[0] = false;
if (y) last_write_half[1] = false;
if (z) last_write_half[2] = false;
if (w) last_write_half[3] = false;
aliased_r0 = true;
h0_writes++;
h1_writes++;
}
if (real_index == umax)
{
if (half_register)
real_index = index >> 1;
else
real_index = index;
}
}
bool requires_gather(u8 channel) const
{
//Data fetched from the single precision register requires merging of the two half registers
ensure(channel < 4);
if (aliased_h0 && channel < 2)
{
return last_write_half[channel];
}
if (aliased_h1 && channel > 1)
{
return last_write_half[channel];
}
return false;
}
bool requires_split(u32 /*index*/) const
{
//Data fetched from any of the two half registers requires sync with the full register
if (!(last_write_half[0] || last_write_half[1]) && aliased_r0)
{
//r0 has been written to
//TODO: Check for specific elements in real32 register
return true;
}
return false;
}
std::string gather_r() const
{
std::string h0 = "h" + std::to_string(real_index << 1);
std::string h1 = "h" + std::to_string(real_index << 1 | 1);
std::string reg = "r" + std::to_string(real_index);
std::string ret = "//Invalid gather";
if (aliased_h0 && aliased_h1)
ret = "(gather(" + h0 + ", " + h1 + "))";
else if (aliased_h0)
ret = "(gather(" + h0 + "), " + reg + ".zw)";
else if (aliased_h1)
ret = "(" + reg + ".xy, gather(" + h1 + "))";
return ret;
}
};
/**
* This class is used to translate RSX Fragment program to GLSL/HLSL code
* Backend with text based shader can subclass this class and implement :
* - virtual std::string getFloatTypeName(usz elementCount) = 0;
* - virtual std::string getHalfTypeName(usz elementCount) = 0;
* - virtual std::string getFunction(enum class FUNCTION) = 0;
* - virtual std::string saturate(const std::string &code) = 0;
* - virtual std::string compareFunction(enum class COMPARE, const std::string &, const std::string &) = 0;
* - virtual void insertHeader(std::stringstream &OS) = 0;
* - virtual void insertInputs(std::stringstream &OS) = 0;
* - virtual void insertOutputs(std::stringstream &OS) = 0;
* - virtual void insertConstants(std::stringstream &OS) = 0;
* - virtual void insertMainStart(std::stringstream &OS) = 0;
* - virtual void insertMainEnd(std::stringstream &OS) = 0;
*/
class FragmentProgramDecompiler
{
enum OPFLAGS
{
no_src_mask = 1,
src_cast_f32 = 2,
skip_type_cast = 4,
texture_ref = 8,
op_extern = src_cast_f32 | skip_type_cast,
};
OPDEST dst;
SRC0 src0;
SRC1 src1;
SRC2 src2;
u32 opflags;
std::string main;
u32& m_size;
u32 m_const_index = 0;
u32 m_offset;
u32 m_location = 0;
u32 m_loop_count;
int m_code_level;
std::vector<u32> m_end_offsets;
std::vector<u32> m_else_offsets;
bool m_is_valid_ucode = true;
std::array<temp_register, 64> temp_registers;
std::string GetMask() const;
void SetDst(std::string code, u32 flags = 0);
void AddCode(const std::string& code);
std::string AddReg(u32 index, bool fp16);
bool HasReg(u32 index, bool fp16);
std::string AddCond();
std::string AddConst();
std::string AddTex();
void AddFlowOp(const std::string& code);
std::string Format(const std::string& code, bool ignore_redirects = false);
// Support the transform-2d temp result for use with TEXBEM
std::string AddX2d();
// Prevents operations from overflowing the desired range (tested with fp_dynamic3 autotest sample, DS2 for src1.input_prec_mod)
std::string ClampValue(const std::string& code, u32 precision);
/**
* Returns true if the dst set is not a vector (i.e only a single component)
*/
bool DstExpectsSca() const;
void AddCodeCond(const std::string& lhs, const std::string& rhs);
std::string GetRawCond();
std::string GetCond();
template<typename T> std::string GetSRC(T src);
std::string BuildCode();
static u32 GetData(const u32 d) { return d << 16 | d >> 16; }
/**
* Emits code if opcode is an SCT/SCB one and returns true,
* otherwise do nothing and return false.
* NOTE: What does SCT means ???
*/
bool handle_sct_scb(u32 opcode);
/**
* Emits code if opcode is an TEX SRB one and returns true,
* otherwise do nothing and return false.
* NOTE: What does TEX SRB means ???
*/
bool handle_tex_srb(u32 opcode);
protected:
const RSXFragmentProgram &m_prog;
u32 m_ctrl = 0;
/** returns the type name of float vectors.
*/
virtual std::string getFloatTypeName(usz elementCount) = 0;
/** returns the type name of half vectors.
*/
virtual std::string getHalfTypeName(usz elementCount) = 0;
/** returns string calling function where arguments are passed via
* $0 $1 $2 substring.
*/
virtual std::string getFunction(FUNCTION) = 0;
/** returns string calling comparison function on 2 args passed as strings.
*/
virtual std::string compareFunction(COMPARE, const std::string &, const std::string &) = 0;
/** Insert header of shader file (eg #version, "system constants"...)
*/
virtual void insertHeader(std::stringstream &OS) = 0;
/** Insert global declaration of fragments inputs.
*/
virtual void insertInputs(std::stringstream &OS) = 0;
/** insert global declaration of fragments outputs.
*/
virtual void insertOutputs(std::stringstream &OS) = 0;
/** insert declaration of shader constants.
*/
virtual void insertConstants(std::stringstream &OS) = 0;
/** insert helper function definitions.
*/
virtual void insertGlobalFunctions(std::stringstream &OS) = 0;
/** insert beginning of main (signature, temporary declaration...)
*/
virtual void insertMainStart(std::stringstream &OS) = 0;
/** insert end of main function (return value, output copy...)
*/
virtual void insertMainEnd(std::stringstream &OS) = 0;
public:
enum : u16
{
in_wpos = (1 << 0),
in_diff_color = (1 << 1),
in_spec_color = (1 << 2),
in_fogc = (1 << 3),
in_tc0 = (1 << 4),
in_tc1 = (1 << 5),
in_tc2 = (1 << 6),
in_tc3 = (1 << 7),
in_tc4 = (1 << 8),
in_tc5 = (1 << 9),
in_tc6 = (1 << 10),
in_tc7 = (1 << 11),
in_tc8 = (1 << 12),
in_tc9 = (1 << 13),
in_ssa = (1 << 14)
};
struct
{
u16 in_register_mask = 0;
u16 common_access_sampler_mask = 0;
u16 shadow_sampler_mask = 0;
u16 redirected_sampler_mask = 0;
u16 multisampled_sampler_mask = 0;
bool has_lit_op = false;
bool has_gather_op = false;
bool has_no_output = false;
bool has_discard_op = false;
bool has_tex_op = false;
bool has_divsq = false;
bool has_clamp = false;
bool has_w_access = false;
bool has_exp_tex_op = false;
bool has_pkg = false;
bool has_upg = false;
bool has_dynamic_register_load = false;
bool has_tex1D = false;
bool has_tex2D = false;
bool has_tex3D = false;
bool has_texShadowProj = false;
}
properties;
struct
{
bool has_native_half_support = false;
bool emulate_depth_compare = false;
bool has_low_precision_rounding = false;
}
device_props;
ParamArray m_parr;
FragmentProgramDecompiler(const RSXFragmentProgram &prog, u32& size);
FragmentProgramDecompiler(const FragmentProgramDecompiler&) = delete;
FragmentProgramDecompiler(FragmentProgramDecompiler&&) = delete;
std::string Decompile();
};
| 8,196
|
C++
|
.h
| 266
| 28.033835
| 129
| 0.682141
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,116
|
RSXOverlay.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Program/RSXOverlay.h
|
#pragma once
#include <util/types.hpp>
namespace rsx
{
namespace overlays
{
// This is overlay common code meant only for render backends
enum class texture_sampling_mode : s32
{
none = 0,
font2D = 1,
font3D = 2,
texture2D = 3
};
class fragment_options
{
u32 value = 0;
enum e_offsets: s32
{
fragment_clip_bit = 0,
pulse_glow_bit = 1,
sampling_mode_bit = 2
};
public:
fragment_options& texture_mode(texture_sampling_mode mode)
{
value |= static_cast<s32>(mode) << e_offsets::sampling_mode_bit;
return *this;
}
fragment_options& pulse_glow(bool enable = true)
{
if (enable)
{
value |= (1 << e_offsets::pulse_glow_bit);
}
return *this;
}
fragment_options& clip_fragments(bool enable = true)
{
if (enable)
{
value |= (1 << e_offsets::fragment_clip_bit);
}
return *this;
}
u32 get() const
{
return value;
}
};
class vertex_options
{
u32 value = 0;
public:
vertex_options& disable_vertex_snap(bool enable)
{
value = enable ? 1 : 0;
return *this;
}
u32 get() const
{
return value;
}
};
}
}
| 1,258
|
C++
|
.h
| 66
| 13.909091
| 69
| 0.578632
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,117
|
program_util.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Program/program_util.h
|
#pragma once
#include "util/types.hpp"
#include "../gcm_enums.h"
namespace rsx
{
enum program_limits
{
max_vertex_program_instructions = 544
};
#pragma pack(push, 1)
// NOTE: This structure must be packed to match GPU layout (std140).
struct fragment_program_texture_config
{
struct TIU_slot
{
float scale[3];
float bias[3];
float clamp_min[2];
float clamp_max[2];
u32 remap;
u32 control;
}
slots_[16]; // QT headers will collide with any variable named 'slots' because reasons
TIU_slot& operator[](u32 index) { return slots_[index]; }
void write_to(void* dst, u16 mask) const;
void load_from(const void* src, u16 mask);
static void masked_transfer(void* dst, const void* src, u16 mask);
};
#pragma pack(pop)
struct fragment_program_texture_state
{
u32 texture_dimensions = 0;
u16 redirected_textures = 0;
u16 shadow_textures = 0;
u16 multisampled_textures = 0;
void clear(u32 index);
void import(const fragment_program_texture_state& other, u16 mask);
void set_dimension(texture_dimension_extended type, u32 index);
bool operator == (const fragment_program_texture_state& other) const;
};
struct vertex_program_texture_state
{
u32 texture_dimensions = 0;
u16 multisampled_textures = 0;
void clear(u32 index);
void import(const vertex_program_texture_state& other, u16 mask);
void set_dimension(texture_dimension_extended type, u32 index);
bool operator == (const vertex_program_texture_state& other) const;
};
struct VertexProgramBase
{
u32 id = 0;
std::vector<u16> constant_ids;
bool has_indexed_constants = false;
// Translates an incoming range of constants against our mapping.
// If there is no linear mapping available, return -1, otherwise returns the translated index of the first slot
// TODO: Move this somewhere else during refactor
int TranslateConstantsRange(int first_index, int count) const;
};
}
| 1,991
|
C++
|
.h
| 60
| 29.216667
| 114
| 0.718406
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,118
|
SPIRVCommon.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Program/SPIRVCommon.h
|
#pragma once
namespace glsl
{
enum program_domain : unsigned char;
enum glsl_rules : unsigned char;
}
namespace spirv
{
bool compile_glsl_to_spv(std::vector<u32>& spv, std::string& shader, ::glsl::program_domain domain, ::glsl::glsl_rules rules);
void initialize_compiler_context();
void finalize_compiler_context();
}
| 342
|
C++
|
.h
| 12
| 25.583333
| 128
| 0.734568
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.