id
int64 0
755k
| file_name
stringlengths 3
109
| file_path
stringlengths 13
185
| content
stringlengths 31
9.38M
| size
int64 31
9.38M
| language
stringclasses 1
value | extension
stringclasses 11
values | total_lines
int64 1
340k
| avg_line_length
float64 2.18
149k
| max_line_length
int64 7
2.22M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 6
65
| repo_stars
int64 100
47.3k
| repo_forks
int64 0
12k
| repo_open_issues
int64 0
3.4k
| repo_license
stringclasses 9
values | repo_extraction_date
stringclasses 92
values | exact_duplicates_redpajama
bool 2
classes | near_duplicates_redpajama
bool 2
classes | exact_duplicates_githubcode
bool 2
classes | exact_duplicates_stackv2
bool 1
class | exact_duplicates_stackv1
bool 2
classes | near_duplicates_githubcode
bool 2
classes | near_duplicates_stackv1
bool 2
classes | near_duplicates_stackv2
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5,406
|
overlay_perf_metrics.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_perf_metrics.cpp
|
#include "stdafx.h"
#include "overlay_manager.h"
#include "overlay_perf_metrics.h"
#include "Emu/RSX/RSXThread.h"
#include "Emu/Cell/SPUThread.h"
#include "Emu/Cell/PPUThread.h"
#include <algorithm>
#include <utility>
#include <charconv>
#include "util/cpu_stats.hpp"
namespace rsx
{
namespace overlays
{
inline color4f convert_color_code(std::string hex_color, f32 opacity = 1.0f)
{
if (hex_color.length() > 0 && hex_color[0] == '#')
{
hex_color.erase(0, 1);
}
unsigned hexval = 0;
const auto len = hex_color.length();
if (len != 6 && len != 8)
{
rsx_log.error("Incompatible color code: '%s' has wrong length: %d", hex_color, len);
return color4f(0.0f, 0.0f, 0.0f, 0.0f);
}
else
{
// auto&& [ptr, ec] = std::from_chars(hex_color.c_str(), hex_color.c_str() + len, &hexval, 16);
// if (ptr != hex_color.c_str() + len || ec)
// {
// rsx_log.error("Overlays: tried to convert incompatible color code: '%s'", hex_color);
// return color4f(0.0f, 0.0f, 0.0f, 0.0f);
// }
for (u32 i = 0; i < len; i++)
{
hexval <<= 4;
switch (char c = hex_color[i])
{
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
hexval |= (c - '0');
break;
case 'a':
case 'b':
case 'c':
case 'd':
case 'e':
case 'f':
hexval |= (c - 'a' + 10);
break;
case 'A':
case 'B':
case 'C':
case 'D':
case 'E':
case 'F':
hexval |= (c - 'A' + 10);
break;
default:
{
rsx_log.error("Overlays: invalid characters in color code: '%s'", hex_color);
return color4f(0.0f, 0.0f, 0.0f, 0.0f);
}
}
}
}
const int r = (len == 8 ? (hexval >> 24) : (hexval >> 16)) & 0xff;
const int g = (len == 8 ? (hexval >> 16) : (hexval >> 8)) & 0xff;
const int b = (len == 8 ? (hexval >> 8) : (hexval >> 0)) & 0xff;
const int a = len == 8 ? ((hexval >> 0) & 0xff) : 255;
return color4f(r / 255.f, g / 255.f, b / 255.f, a / 255.f * opacity);
}
void perf_metrics_overlay::reset_transform(label& elm) const
{
// left, top, right, bottom
const areau padding { m_padding, m_padding - std::min<u32>(4, m_padding), m_padding, m_padding };
const positionu margin { m_margin_x, m_margin_y };
positionu pos;
u16 graph_width = 0;
u16 graph_height = 0;
if (m_framerate_graph_enabled)
{
graph_width = std::max(graph_width, m_fps_graph.w);
graph_height += m_fps_graph.get_height();
}
if (m_frametime_graph_enabled)
{
graph_width = std::max(graph_width, m_frametime_graph.w);
graph_height += m_frametime_graph.get_height();
}
if (graph_height > 0 && m_body.h > 0)
{
graph_height += m_padding;
}
switch (m_quadrant)
{
case screen_quadrant::top_left:
pos.x = margin.x;
pos.y = margin.y;
break;
case screen_quadrant::top_right:
pos.x = virtual_width - std::max(m_body.w, graph_width) - margin.x;
pos.y = margin.y;
break;
case screen_quadrant::bottom_left:
pos.x = margin.x;
pos.y = virtual_height - m_body.h - graph_height - margin.y;
break;
case screen_quadrant::bottom_right:
pos.x = virtual_width - std::max(m_body.w, graph_width) - margin.x;
pos.y = virtual_height - m_body.h - graph_height - margin.y;
break;
}
if (m_center_x)
{
pos.x = (virtual_width - std::max(m_body.w, graph_width)) / 2;
}
if (m_center_y)
{
pos.y = (virtual_height - m_body.h - graph_height) / 2;
}
elm.set_pos(pos.x, pos.y);
elm.set_padding(padding.x1, padding.x2, padding.y1, padding.y2);
}
void perf_metrics_overlay::reset_transforms()
{
const u16 fps_graph_h = 60;
const u16 frametime_graph_h = 45;
if (m_framerate_graph_enabled)
{
m_fps_graph.set_size(m_fps_graph.w, fps_graph_h);
}
if (m_frametime_graph_enabled)
{
m_frametime_graph.set_size(m_frametime_graph.w, frametime_graph_h);
}
// Set body/titles transform
if (m_force_repaint)
{
reset_body();
reset_titles();
}
else
{
reset_transform(m_body);
reset_transform(m_titles);
}
if (m_framerate_graph_enabled || m_frametime_graph_enabled)
{
// Position the graphs within the body
const u16 graphs_width = m_body.w;
const u16 body_left = m_body.x;
s16 y_offset = m_body.y;
if (m_body.h > 0)
{
y_offset += static_cast<s16>(m_body.h + m_padding);
}
if (m_framerate_graph_enabled)
{
if (m_force_repaint)
{
m_fps_graph.set_font_size(static_cast<u16>(m_font_size * 0.8));
}
m_fps_graph.update();
m_fps_graph.set_pos(body_left, y_offset);
m_fps_graph.set_size(graphs_width, fps_graph_h);
y_offset += m_fps_graph.get_height();
}
if (m_frametime_graph_enabled)
{
if (m_force_repaint)
{
m_frametime_graph.set_font_size(static_cast<u16>(m_font_size * 0.8));
}
m_frametime_graph.update();
m_frametime_graph.set_pos(body_left, y_offset);
m_frametime_graph.set_size(graphs_width, frametime_graph_h);
}
}
m_force_repaint = false;
}
void perf_metrics_overlay::reset_body()
{
m_body.set_font(m_font.c_str(), m_font_size);
m_body.fore_color = convert_color_code(m_color_body, m_opacity);
m_body.back_color = convert_color_code(m_background_body, m_opacity);
reset_transform(m_body);
}
void perf_metrics_overlay::reset_titles()
{
m_titles.set_font(m_font.c_str(), m_font_size);
m_titles.fore_color = convert_color_code(m_color_title, m_opacity);
m_titles.back_color = convert_color_code(m_background_title, m_opacity);
reset_transform(m_titles);
switch (m_detail)
{
case detail_level::none: [[fallthrough]];
case detail_level::minimal: [[fallthrough]];
case detail_level::low: m_titles.set_text(""); break;
case detail_level::medium: m_titles.set_text(fmt::format("\n\n%s", title1_medium)); break;
case detail_level::high: m_titles.set_text(fmt::format("\n\n%s\n\n\n\n\n\n%s", title1_high, title2)); break;
}
m_titles.auto_resize();
m_titles.refresh();
}
void perf_metrics_overlay::init()
{
m_padding = m_font_size / 2;
m_fps_graph.set_one_percent_sort_high(false);
m_frametime_graph.set_one_percent_sort_high(true);
reset_transforms();
force_next_update();
if (!m_is_initialised)
{
m_update_timer.Start();
m_frametime_timer.Start();
}
update(get_system_time());
// The text might have changed during the update. Recalculate positions.
reset_transforms();
m_is_initialised = true;
visible = true;
}
void perf_metrics_overlay::set_framerate_graph_enabled(bool enabled)
{
if (m_framerate_graph_enabled == enabled)
return;
m_framerate_graph_enabled = enabled;
if (enabled)
{
m_fps_graph.set_title("Framerate: 00.0");
m_fps_graph.set_font_size(static_cast<u16>(m_font_size * 0.8));
m_fps_graph.set_color(convert_color_code(m_color_body, m_opacity));
m_fps_graph.set_guide_interval(10);
}
m_force_repaint = true;
}
void perf_metrics_overlay::set_frametime_graph_enabled(bool enabled)
{
if (m_frametime_graph_enabled == enabled)
return;
m_frametime_graph_enabled = enabled;
if (enabled)
{
m_frametime_graph.set_title("Frametime: 0.0");
m_frametime_graph.set_font_size(static_cast<u16>(m_font_size * 0.8));
m_frametime_graph.set_color(convert_color_code(m_color_body, m_opacity));
m_frametime_graph.set_guide_interval(8);
}
m_force_repaint = true;
}
void perf_metrics_overlay::set_framerate_datapoint_count(u32 datapoint_count)
{
if (m_fps_graph.get_datapoint_count() == datapoint_count)
return;
m_fps_graph.set_count(datapoint_count);
m_force_repaint = true;
}
void perf_metrics_overlay::set_frametime_datapoint_count(u32 datapoint_count)
{
if (m_frametime_graph.get_datapoint_count() == datapoint_count)
return;
m_frametime_graph.set_count(datapoint_count);
m_force_repaint = true;
}
void perf_metrics_overlay::set_graph_detail_levels(perf_graph_detail_level framerate_level, perf_graph_detail_level frametime_level)
{
m_fps_graph.set_labels_visible(
framerate_level == perf_graph_detail_level::show_all || framerate_level == perf_graph_detail_level::show_min_max,
framerate_level == perf_graph_detail_level::show_all || framerate_level == perf_graph_detail_level::show_one_percent_avg);
m_frametime_graph.set_labels_visible(
frametime_level == perf_graph_detail_level::show_all || frametime_level == perf_graph_detail_level::show_min_max,
frametime_level == perf_graph_detail_level::show_all || frametime_level == perf_graph_detail_level::show_one_percent_avg);
m_force_repaint = true;
}
void perf_metrics_overlay::set_detail_level(detail_level level)
{
if (m_detail == level)
return;
m_detail = level;
m_force_repaint = true;
}
void perf_metrics_overlay::set_position(screen_quadrant quadrant)
{
if (m_quadrant == quadrant)
return;
m_quadrant = quadrant;
m_force_repaint = true;
}
// In ms
void perf_metrics_overlay::set_update_interval(u32 update_interval)
{
m_update_interval = update_interval;
}
void perf_metrics_overlay::set_font(std::string font)
{
if (m_font == font)
return;
m_font = std::move(font);
m_force_repaint = true;
}
void perf_metrics_overlay::set_font_size(u16 font_size)
{
if (m_font_size == font_size)
return;
m_font_size = font_size;
m_padding = m_font_size / 2;
m_force_repaint = true;
}
void perf_metrics_overlay::set_margins(u32 margin_x, u32 margin_y, bool center_x, bool center_y)
{
if (m_margin_x == margin_x && m_margin_y == margin_y && m_center_x == center_x && m_center_y == center_y)
return;
m_margin_x = margin_x;
m_margin_y = margin_y;
m_center_x = center_x;
m_center_y = center_y;
m_force_repaint = true;
}
void perf_metrics_overlay::set_opacity(f32 opacity)
{
if (m_opacity == opacity)
return;
m_opacity = opacity;
m_force_repaint = true;
}
void perf_metrics_overlay::set_body_colors(std::string color, std::string background)
{
if (m_color_body == color && m_background_body == background)
return;
m_color_body = std::move(color);
m_background_body = std::move(background);
m_force_repaint = true;
}
void perf_metrics_overlay::set_title_colors(std::string color, std::string background)
{
if (m_color_title == color && m_background_title == background)
return;
m_color_title = std::move(color);
m_background_title = std::move(background);
m_force_repaint = true;
}
void perf_metrics_overlay::force_next_update()
{
m_force_update = true;
}
void perf_metrics_overlay::update(u64 /*timestamp_us*/)
{
const auto elapsed_update = m_update_timer.GetElapsedTimeInMilliSec();
const bool do_update = m_force_update || elapsed_update >= m_update_interval;
if (m_is_initialised)
{
if (m_frametime_graph_enabled && !m_force_update)
{
const float elapsed_frame = static_cast<float>(m_frametime_timer.GetElapsedTimeInMilliSec());
m_frametime_timer.Start();
m_frametime_graph.record_datapoint(elapsed_frame, do_update);
m_frametime_graph.set_title(fmt::format("Frametime: %4.1f", elapsed_frame).c_str());
}
if (m_force_repaint)
{
reset_transforms();
}
}
if (!m_force_update)
{
++m_frames;
}
if (do_update)
{
// 1. Fetch/calculate metrics we'll need
if (!m_is_initialised || !m_force_update)
{
m_update_timer.Start();
auto& rsx_thread = g_fxo->get<rsx::thread>();
switch (m_detail)
{
case detail_level::high:
{
m_frametime = std::max(0.f, static_cast<float>(elapsed_update / m_frames));
m_rsx_load = rsx_thread.get_load();
m_total_threads = utils::cpu_stats::get_current_thread_count();
[[fallthrough]];
}
case detail_level::medium:
{
m_ppus = idm::select<named_thread<ppu_thread>>([this](u32, named_thread<ppu_thread>& ppu)
{
m_ppu_cycles += thread_ctrl::get_cycles(ppu);
});
m_spus = idm::select<named_thread<spu_thread>>([this](u32, named_thread<spu_thread>& spu)
{
m_spu_cycles += thread_ctrl::get_cycles(spu);
});
m_rsx_cycles += rsx_thread.get_cycles();
m_total_cycles = std::max<u64>(1, m_ppu_cycles + m_spu_cycles + m_rsx_cycles);
m_cpu_usage = static_cast<f32>(m_cpu_stats.get_usage());
m_ppu_usage = std::clamp(m_cpu_usage * m_ppu_cycles / m_total_cycles, 0.f, 100.f);
m_spu_usage = std::clamp(m_cpu_usage * m_spu_cycles / m_total_cycles, 0.f, 100.f);
m_rsx_usage = std::clamp(m_cpu_usage * m_rsx_cycles / m_total_cycles, 0.f, 100.f);
[[fallthrough]];
}
case detail_level::low:
{
if (m_detail == detail_level::low) // otherwise already acquired in medium
m_cpu_usage = static_cast<f32>(m_cpu_stats.get_usage());
[[fallthrough]];
}
case detail_level::minimal:
{
[[fallthrough]];
}
case detail_level::none:
{
m_fps = std::max(0.f, static_cast<f32>(m_frames / (elapsed_update / 1000)));
if (m_is_initialised && m_framerate_graph_enabled)
{
m_fps_graph.record_datapoint(m_fps, true);
m_fps_graph.set_title(fmt::format("Framerate: %04.1f", m_fps).c_str());
}
break;
}
}
}
// 2. Format output string
std::string perf_text;
switch (m_detail)
{
case detail_level::none:
{
break;
}
case detail_level::minimal:
{
fmt::append(perf_text, "FPS : %05.2f", m_fps);
break;
}
case detail_level::low:
{
fmt::append(perf_text, "FPS : %05.2f\n"
"CPU : %04.1f %%",
m_fps, m_cpu_usage);
break;
}
case detail_level::medium:
{
fmt::append(perf_text, "FPS : %05.2f\n\n"
"%s\n"
" PPU : %04.1f %%\n"
" SPU : %04.1f %%\n"
" RSX : %04.1f %%\n"
" Total : %04.1f %%",
m_fps, std::string(title1_medium.size(), ' '), m_ppu_usage, m_spu_usage, m_rsx_usage, m_cpu_usage, std::string(title2.size(), ' '));
break;
}
case detail_level::high:
{
fmt::append(perf_text, "FPS : %05.2f (%03.1fms)\n\n"
"%s\n"
" PPU : %04.1f %% (%2u)\n"
" SPU : %04.1f %% (%2u)\n"
" RSX : %04.1f %% ( 1)\n"
" Total : %04.1f %% (%2u)\n\n"
"%s\n"
" RSX : %02u %%",
m_fps, m_frametime, std::string(title1_high.size(), ' '), m_ppu_usage, m_ppus, m_spu_usage, m_spus, m_rsx_usage, m_cpu_usage, m_total_threads, std::string(title2.size(), ' '), m_rsx_load);
break;
}
}
m_body.set_text(perf_text);
if (perf_text.empty())
{
if (m_body.w > 0 || m_body.h > 0)
{
m_body.set_size(0, 0);
reset_transforms();
}
}
else if (m_body.auto_resize())
{
reset_transforms();
}
m_body.refresh();
if (!m_force_update)
{
m_frames = 0;
}
else
{
// Only force once
m_force_update = false;
}
if (m_framerate_graph_enabled)
{
m_fps_graph.update();
}
if (m_frametime_graph_enabled)
{
m_frametime_graph.update();
}
}
}
compiled_resource perf_metrics_overlay::get_compiled()
{
if (!visible)
{
return {};
}
compiled_resource compiled_resources = m_body.get_compiled();
compiled_resources.add(m_titles.get_compiled());
if (m_framerate_graph_enabled)
{
compiled_resources.add(m_fps_graph.get_compiled());
}
if (m_frametime_graph_enabled)
{
compiled_resources.add(m_frametime_graph.get_compiled());
}
return compiled_resources;
}
graph::graph()
{
m_label.set_font("e046323ms.ttf", 8);
m_label.alignment = text_align::center;
m_label.fore_color = { 1.f, 1.f, 1.f, 1.f };
m_label.back_color = { 0.f, 0.f, 0.f, .7f };
back_color = { 0.f, 0.f, 0.f, 0.5f };
}
void graph::set_pos(s16 _x, s16 _y)
{
m_label.set_pos(_x, _y);
overlay_element::set_pos(_x, _y + m_label.h);
}
void graph::set_size(u16 _w, u16 _h)
{
m_label.set_size(_w, m_label.h);
overlay_element::set_size(_w, _h);
}
void graph::set_title(const char* title)
{
m_title = title;
}
void graph::set_font(const char* font_name, u16 font_size)
{
m_label.set_font(font_name, font_size);
}
void graph::set_font_size(u16 font_size)
{
const auto font_name = m_label.get_font()->get_name().data();
m_label.set_font(font_name, font_size);
}
void graph::set_count(u32 datapoint_count)
{
m_datapoint_count = datapoint_count;
if (m_datapoints.empty())
{
m_datapoints.resize(m_datapoint_count, -1.0f);
}
else if (m_datapoints.empty() || m_datapoint_count < m_datapoints.size())
{
std::copy(m_datapoints.begin() + m_datapoints.size() - m_datapoint_count, m_datapoints.end(), m_datapoints.begin());
m_datapoints.resize(m_datapoint_count);
}
else
{
m_datapoints.insert(m_datapoints.begin(), m_datapoint_count - m_datapoints.size(), -1.0f);
}
}
void graph::set_color(color4f color)
{
m_color = color;
}
void graph::set_guide_interval(f32 guide_interval)
{
m_guide_interval = guide_interval;
}
void graph::set_labels_visible(bool show_min_max, bool show_1p_avg)
{
m_show_min_max = show_min_max;
m_show_1p_avg = show_1p_avg;
}
void graph::set_one_percent_sort_high(bool sort_1p_high)
{
m_1p_sort_high = sort_1p_high;
}
u16 graph::get_height() const
{
return h + m_label.h + m_label.padding_top + m_label.padding_bottom;
}
u32 graph::get_datapoint_count() const
{
return m_datapoint_count;
}
void graph::record_datapoint(f32 datapoint, bool update_metrics)
{
ensure(datapoint >= 0.0f);
// std::dequeue is only faster for large sizes, so just use a std::vector and resize once in while
// Record datapoint
m_datapoints.push_back(datapoint);
// Cull vector when it gets large
if (m_datapoints.size() > m_datapoint_count * 16ull)
{
std::copy(m_datapoints.begin() + m_datapoints.size() - m_datapoint_count, m_datapoints.end(), m_datapoints.begin());
m_datapoints.resize(m_datapoint_count);
}
if (!update_metrics)
{
return;
}
m_min = max_v<f32>;
m_max = 0.0f;
m_avg = 0.0f;
m_1p = 0.0f;
std::vector<f32> valid_datapoints;
// Make sure min/max reflects the data being displayed, not the entire datapoints vector
for (usz i = m_datapoints.size() - m_datapoint_count; i < m_datapoints.size(); i++)
{
const f32& dp = m_datapoints[i];
if (dp < 0) continue; // Skip initial negative values. They don't count.
m_min = std::min(m_min, dp);
m_max = std::max(m_max, dp);
m_avg += dp;
if (m_show_1p_avg)
{
valid_datapoints.push_back(dp);
}
}
// Sanitize min value
m_min = std::min(m_min, m_max);
if (m_show_1p_avg && !valid_datapoints.empty())
{
// Sort datapoints (we are only interested in the lowest/highest 1%)
const usz i_1p = valid_datapoints.size() / 100;
const usz n_1p = i_1p + 1;
if (m_1p_sort_high)
std::nth_element(valid_datapoints.begin(), valid_datapoints.begin() + i_1p, valid_datapoints.end(), std::greater<f32>());
else
std::nth_element(valid_datapoints.begin(), valid_datapoints.begin() + i_1p, valid_datapoints.end());
// Calculate statistics
m_avg /= valid_datapoints.size();
m_1p = std::accumulate(valid_datapoints.begin(), valid_datapoints.begin() + n_1p, 0.0f) / static_cast<float>(n_1p);
}
}
void graph::update()
{
std::string fps_info = m_title;
if (m_show_1p_avg)
{
fmt::append(fps_info, "\n1%%:%4.1f av:%4.1f", m_1p, m_avg);
}
if (m_show_min_max)
{
fmt::append(fps_info, "\nmn:%4.1f mx:%4.1f", m_min, m_max);
}
m_label.set_text(fps_info);
m_label.set_padding(4, 4, 0, 4);
m_label.auto_resize();
m_label.refresh();
// If label horizontal end is larger, widen graph width to match it
set_size(std::max(m_label.w, w), h);
}
compiled_resource& graph::get_compiled()
{
if (is_compiled)
{
return compiled_resources;
}
overlay_element::get_compiled();
const f32 normalize_factor = f32(h) / (m_max != 0.0f ? m_max : 1.0f);
// Don't show guide lines if they'd be more dense than 1 guide line every 3 pixels
const bool guides_too_dense = (m_max / m_guide_interval) > (h / 3.0f);
if (m_guide_interval > 0 && !guides_too_dense)
{
auto& cmd_guides = compiled_resources.append({});
auto& config_guides = cmd_guides.config;
config_guides.color = { 1.f, 1.f, 1.f, .2f };
config_guides.primitives = primitive_type::line_list;
auto& verts_guides = compiled_resources.draw_commands.back().verts;
for (auto y_off = m_guide_interval; y_off < m_max; y_off += m_guide_interval)
{
const f32 guide_y = y + h - y_off * normalize_factor;
verts_guides.emplace_back(x, guide_y);
verts_guides.emplace_back(static_cast<float>(x + w), guide_y);
}
}
auto& cmd_graph = compiled_resources.append({});
auto& config_graph = cmd_graph.config;
config_graph.color = m_color;
config_graph.primitives = primitive_type::line_strip;
auto& verts_graph = compiled_resources.draw_commands.back().verts;
f32 x_stride = w;
if (m_datapoint_count > 2)
{
x_stride /= (m_datapoint_count - 1);
}
const usz tail_index_offset = m_datapoints.size() - m_datapoint_count;
for (u32 i = 0; i < m_datapoint_count; ++i)
{
const f32 x_line = x + i * x_stride;
const f32 y_line = y + h - (std::max(0.0f, m_datapoints[i + tail_index_offset]) * normalize_factor);
verts_graph.emplace_back(x_line, y_line);
}
compiled_resources.add(m_label.get_compiled());
return compiled_resources;
}
extern void reset_performance_overlay()
{
if (!g_cfg.misc.use_native_interface)
return;
if (auto manager = g_fxo->try_get<rsx::overlays::display_manager>())
{
auto& perf_settings = g_cfg.video.perf_overlay;
auto perf_overlay = manager->get<rsx::overlays::perf_metrics_overlay>();
if (perf_settings.perf_overlay_enabled)
{
if (!perf_overlay)
{
perf_overlay = manager->create<rsx::overlays::perf_metrics_overlay>();
}
std::lock_guard lock(*manager);
perf_overlay->set_detail_level(perf_settings.level);
perf_overlay->set_position(perf_settings.position);
perf_overlay->set_update_interval(perf_settings.update_interval);
perf_overlay->set_font(perf_settings.font);
perf_overlay->set_font_size(perf_settings.font_size);
perf_overlay->set_margins(perf_settings.margin_x, perf_settings.margin_y, perf_settings.center_x.get(), perf_settings.center_y.get());
perf_overlay->set_opacity(perf_settings.opacity / 100.f);
perf_overlay->set_body_colors(perf_settings.color_body, perf_settings.background_body);
perf_overlay->set_title_colors(perf_settings.color_title, perf_settings.background_title);
perf_overlay->set_framerate_datapoint_count(perf_settings.framerate_datapoint_count);
perf_overlay->set_frametime_datapoint_count(perf_settings.frametime_datapoint_count);
perf_overlay->set_framerate_graph_enabled(perf_settings.framerate_graph_enabled.get());
perf_overlay->set_frametime_graph_enabled(perf_settings.frametime_graph_enabled.get());
perf_overlay->set_graph_detail_levels(perf_settings.framerate_graph_detail_level.get(), perf_settings.frametime_graph_detail_level.get());
perf_overlay->init();
}
else if (perf_overlay)
{
manager->remove<rsx::overlays::perf_metrics_overlay>();
}
}
}
} // namespace overlays
} // namespace rsx
| 24,280
|
C++
|
.cpp
| 754
| 27.005305
| 197
| 0.623234
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,407
|
overlay_osk.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_osk.cpp
|
#include "stdafx.h"
#include "overlay_manager.h"
#include "overlay_osk.h"
#include "Emu/Cell/Modules/cellSysutil.h"
#include "Emu/Cell/Modules/cellMsgDialog.h"
#include "Emu/Cell/Modules/cellKb.h"
#include "Emu/System.h"
#include "Emu/system_config.h"
LOG_CHANNEL(osk, "OSK");
namespace rsx
{
namespace overlays
{
osk_dialog::osk_dialog()
{
m_auto_repeat_buttons.insert(pad_button::L1);
m_auto_repeat_buttons.insert(pad_button::R1);
m_auto_repeat_buttons.insert(pad_button::cross);
m_auto_repeat_buttons.insert(pad_button::triangle);
m_auto_repeat_buttons.insert(pad_button::square);
m_keyboard_input_enabled = true;
}
void osk_dialog::Close(s32 status)
{
osk.notice("Closing osk (status=%d)", status);
if (status == FAKE_CELL_OSKDIALOG_CLOSE_TERMINATE)
{
close(false, true);
return;
}
if (status != FAKE_CELL_OSKDIALOG_CLOSE_ABORT && m_use_separate_windows && continuous_mode == CELL_OSKDIALOG_CONTINUOUS_MODE_REMAIN_OPEN)
{
// Just call the on_osk_close and don't actually close the dialog.
if (on_osk_close)
{
Emu.CallFromMainThread([this, status]()
{
on_osk_close(status);
});
}
return;
}
fade_animation.current = color4f(1.f);
fade_animation.end = color4f(0.f);
fade_animation.duration_sec = 0.5f;
fade_animation.on_finish = [this, status]
{
if (on_osk_close)
{
Emu.CallFromMainThread([this, status]()
{
on_osk_close(status);
});
}
set_visible(false);
// Only really close the dialog if we aren't in the continuous separate window mode
if (!m_use_separate_windows || continuous_mode == CELL_OSKDIALOG_CONTINUOUS_MODE_NONE)
{
close(true, true);
return;
}
};
fade_animation.active = true;
}
void osk_dialog::Clear(bool clear_all_data)
{
// Try to lock. Clear might be called recursively.
const bool locked = m_preview_mutex.try_lock();
osk.notice("Clearing osk (clear_all_data=%d)", clear_all_data);
m_preview.caret_position = 0;
m_preview.set_text({});
if (clear_all_data)
{
on_text_changed();
}
if (locked)
{
m_preview_mutex.unlock();
}
m_update = true;
}
void osk_dialog::SetText(const std::u16string& text)
{
// Try to lock. Insert might be called recursively.
const bool locked = m_preview_mutex.try_lock();
const std::u16string new_str = text.length() <= char_limit ? text : text.substr(0, char_limit);
osk.notice("Setting osk text (text='%s', new_str='%s', char_limit=%d)", utf16_to_ascii8(text), utf16_to_ascii8(new_str), char_limit);
m_preview.caret_position = new_str.length();
m_preview.set_unicode_text(utf16_to_u32string(new_str));
on_text_changed();
if (locked)
{
m_preview_mutex.unlock();
}
m_update = true;
}
void osk_dialog::Insert(const std::u16string& text)
{
// Try to lock. Insert might be called recursively.
const bool locked = m_preview_mutex.try_lock();
osk.notice("Inserting into osk at position %d (text='%s', char_limit=%d)", m_preview.caret_position, utf16_to_ascii8(text), char_limit);
// Append to output text
if (m_preview.value.empty())
{
const std::u16string new_str = text.length() <= char_limit ? text : text.substr(0, char_limit);
m_preview.caret_position = new_str.length();
m_preview.set_unicode_text(utf16_to_u32string(new_str));
}
else if ((m_preview.value.length() + text.length()) <= char_limit)
{
m_preview.insert_text(utf16_to_u32string(text));
}
else
{
osk.notice("Can't insert into osk: Character limit reached.");
}
on_text_changed();
if (locked)
{
m_preview_mutex.unlock();
}
m_update = true;
}
void osk_dialog::add_panel(const osk_panel& panel)
{
// On PS3 apparently only 7 panels are added, the rest is ignored
if (m_panels.size() < 7)
{
// Don't add this panel if there already exists one with the same panel mode
if (std::none_of(m_panels.begin(), m_panels.end(), [&panel](const osk_panel& existing) { return existing.osk_panel_mode == panel.osk_panel_mode; }))
{
m_panels.push_back(panel);
}
}
}
void osk_dialog::step_panel(bool next_panel)
{
const usz num_panels = m_panels.size();
if (num_panels > 0)
{
if (next_panel)
{
m_panel_index = (m_panel_index + 1) % num_panels;
}
else if (m_panel_index > 0)
{
m_panel_index = (m_panel_index - 1) % num_panels;
}
else
{
m_panel_index = num_panels - 1;
}
}
update_panel();
}
void osk_dialog::update_panel()
{
ensure(m_panel_index < m_panels.size());
const auto& panel = m_panels[m_panel_index];
num_rows = panel.num_rows;
num_columns = panel.num_columns;
cell_size_x = get_scaled(panel.cell_size_x);
cell_size_y = get_scaled(panel.cell_size_y);
update_layout();
const u32 cell_count = num_rows * num_columns;
m_grid.resize(cell_count);
num_shift_layers_by_charset.clear();
const position2u grid_origin(m_panel_frame.x, m_panel_frame.y);
const u32 old_index = (selected_y * num_columns) + selected_x;
u32 index = 0;
for (const auto& props : panel.layout)
{
for (u32 c = 0; c < props.num_cell_hz; ++c)
{
const auto row = (index / num_columns);
const auto col = (index % num_columns);
ensure(row < num_rows && col < num_columns);
auto& _cell = m_grid[index++];
_cell.button_flag = props.type_flags;
_cell.pos = { grid_origin.x + col * cell_size_x, grid_origin.y + row * cell_size_y };
_cell.backcolor = props.color;
_cell.callback = props.callback;
_cell.outputs = props.outputs;
_cell.selected = false;
// Add shift layers
for (u32 layer = 0; layer < _cell.outputs.size(); ++layer)
{
// Only add a shift layer if at least one default button has content in a layer
if (props.type_flags != button_flags::_default)
{
continue;
}
usz cell_shift_layers = 0;
for (usz i = 0; i < _cell.outputs[layer].size(); ++i)
{
if (_cell.outputs[layer][i].empty() == false)
{
cell_shift_layers = i + 1;
}
}
if (layer >= num_shift_layers_by_charset.size())
{
num_shift_layers_by_charset.push_back(static_cast<u32>(cell_shift_layers));
}
else
{
num_shift_layers_by_charset[layer] = std::max(num_shift_layers_by_charset[layer], static_cast<u32>(cell_shift_layers));
}
}
switch (props.type_flags)
{
default:
case button_flags::_default:
_cell.enabled = true;
break;
case button_flags::_space:
_cell.enabled = !(flags & CELL_OSKDIALOG_NO_SPACE);
break;
case button_flags::_return:
_cell.enabled = !(flags & CELL_OSKDIALOG_NO_RETURN);
break;
case button_flags::_shift:
_cell.enabled |= !_cell.outputs.empty();
break;
case button_flags::_layer:
_cell.enabled |= !num_shift_layers_by_charset.empty();
break;
}
if (props.num_cell_hz == 1) [[likely]]
{
_cell.flags = border_flags::default_cell;
}
else if (c == 0)
{
// Leading cell
_cell.flags = border_flags::start_cell;
}
else if (c == (props.num_cell_hz - 1))
{
// Last cell
_cell.flags = border_flags::end_cell;
}
else
{
// Middle cell
_cell.flags = border_flags::middle_cell;
}
}
}
ensure(num_shift_layers_by_charset.size());
for (u32 layer = 0; layer < num_shift_layers_by_charset.size(); ++layer)
{
ensure(num_shift_layers_by_charset[layer]);
}
// Reset to first shift layer in the first charset, because the panel changed and we don't know if the layers are similar between panels.
m_selected_charset = 0;
selected_z = 0;
// Enable/Disable the control buttons based on the current layout.
update_controls();
// Roughly keep x and y selection in grid if possible. Jumping to (0,0) would be annoying. Needs to be done after updating the control buttons.
update_selection_by_index(old_index);
m_update = true;
}
void osk_dialog::update_layout()
{
const bool show_panel = m_show_panel || !m_use_separate_windows;
// The title is omitted in separate window mode
const u16 title_height = m_use_separate_windows ? 0 : get_scaled(30);
const u16 preview_height = get_scaled((flags & CELL_OSKDIALOG_NO_RETURN) ? 40 : 90);
// Place elements with absolute positioning
const u16 button_margin = get_scaled(30);
const u16 button_height = get_scaled(30);
const u16 panel_w = show_panel ? (num_columns * cell_size_x) : 0;
const u16 panel_h = show_panel ? (num_rows * cell_size_y) : 0;
const u16 input_w = m_use_separate_windows ? m_input_field_window_width : panel_w;
const u16 input_h = title_height + preview_height;
const u16 button_h = show_panel ? (button_height + button_margin) : 0;
const u16 total_w = std::max(input_w, panel_w);
const u16 total_h = input_h + panel_h + button_h;
// The cellOskDialog's origin is at the center of the screen with positive values increasing toward the right and upper directions.
// The layout mode tells us which corner of the dialog we should use for positioning.
// With CELL_OSKDIALOG_LAYOUTMODE_X_ALIGN_LEFT and CELL_OSKDIALOG_LAYOUTMODE_Y_ALIGN_TOP and a point (0,0) the dialog's top left corner would be in the center of the screen.
// With CELL_OSKDIALOG_LAYOUTMODE_X_ALIGN_CENTER and CELL_OSKDIALOG_LAYOUTMODE_Y_ALIGN_CENTER and a point (0,0) the dialog would be exactly in the center of the screen.
// TODO: Make sure separate windows don't overlap.
// Calculate initial position and analog movement range.
constexpr f32 margin = 50.0f; // Let's add a minimal margin on all sides
const s16 x_min = static_cast<s16>(margin);
const s16 x_max = static_cast<s16>(static_cast<f32>(virtual_width - total_w) - margin);
const s16 y_min = static_cast<s16>(margin);
const s16 y_max = static_cast<s16>(static_cast<f32>(virtual_height - total_h) - margin);
s16 input_x = 0;
s16 input_y = 0;
s16 panel_x = 0;
s16 panel_y = 0;
// x pos should only be 0 the first time, because we always add a margin
if (m_x_input_pos == 0)
{
const auto get_x = [](const osk_window_layout& layout, const u16& width) -> f32
{
constexpr f32 origin_x = virtual_width / 2.0f;
const f32 x = origin_x + layout.x_offset;
switch (layout.x_align)
{
case CELL_OSKDIALOG_LAYOUTMODE_X_ALIGN_RIGHT:
return x - width;
case CELL_OSKDIALOG_LAYOUTMODE_X_ALIGN_CENTER:
return x - (width / 2.0f);
case CELL_OSKDIALOG_LAYOUTMODE_X_ALIGN_LEFT:
default:
return x;
}
};
const auto get_y = [](const osk_window_layout& layout, const u16& height) -> f32
{
constexpr f32 origin_y = virtual_height / 2.0f;
const f32 y = origin_y - layout.y_offset; // Negative because we increase y towards the bottom and cellOsk increases y towards the top.
switch (layout.y_align)
{
case CELL_OSKDIALOG_LAYOUTMODE_Y_ALIGN_BOTTOM:
return y - height;
case CELL_OSKDIALOG_LAYOUTMODE_Y_ALIGN_CENTER:
return y - (height / 2.0f);
case CELL_OSKDIALOG_LAYOUTMODE_Y_ALIGN_TOP:
default:
return y;
}
};
if (m_use_separate_windows)
{
input_x = m_x_input_pos = static_cast<s16>(std::clamp<f32>(get_x(m_input_layout, input_w), x_min, x_max));
input_y = m_y_input_pos = static_cast<s16>(std::clamp<f32>(get_y(m_input_layout, input_h), y_min, y_max));
panel_x = m_x_panel_pos = static_cast<s16>(std::clamp<f32>(get_x(m_panel_layout, panel_w), x_min, x_max));
panel_y = m_y_panel_pos = static_cast<s16>(std::clamp<f32>(get_y(m_panel_layout, panel_h), static_cast<f32>(y_min + input_h), static_cast<f32>(y_max + input_h)));
}
else
{
input_x = panel_x = m_x_input_pos = m_x_panel_pos = static_cast<u16>(std::clamp<f32>(get_x(m_layout, total_w), x_min, x_max));
input_y = m_y_input_pos = static_cast<s16>(std::clamp<f32>(get_y(m_layout, total_h), y_min, y_max));
panel_y = m_y_panel_pos = input_y + input_h;
}
}
else if (m_use_separate_windows)
{
input_x = m_x_input_pos = std::clamp(m_x_input_pos, x_min, x_max);
input_y = m_y_input_pos = std::clamp(m_y_input_pos, y_min, y_max);
panel_x = m_x_panel_pos = std::clamp(m_x_panel_pos, x_min, x_max);
panel_y = m_y_panel_pos = std::clamp<s16>(m_y_panel_pos, y_min + input_h, y_max + input_h);
}
else
{
input_x = panel_x = m_x_input_pos = m_x_panel_pos = std::clamp(m_x_input_pos, x_min, x_max);
input_y = m_y_input_pos = std::clamp(m_y_input_pos, y_min, y_max);
panel_y = m_y_panel_pos = input_y + input_h;
}
m_input_frame.set_pos(input_x, input_y);
m_input_frame.set_size(input_w, input_h);
m_panel_frame.set_pos(panel_x, panel_y);
m_panel_frame.set_size(panel_w, panel_h);
m_title.set_pos(input_x, input_y);
m_title.set_size(input_w, title_height);
m_title.set_padding(get_scaled(15), 0, get_scaled(5), 0);
m_preview.set_pos(input_x, input_y + title_height);
m_preview.set_size(input_w, preview_height);
m_preview.set_padding(get_scaled(15), 0, get_scaled(10), 0);
const s16 button_y = panel_y + panel_h + button_margin;
m_btn_cancel.set_pos(panel_x, button_y);
m_btn_cancel.set_size(get_scaled(140), button_height);
m_btn_cancel.set_text(localized_string_id::RSX_OVERLAYS_OSK_DIALOG_CANCEL);
m_btn_cancel.set_text_vertical_adjust(get_scaled(5));
m_btn_space.set_pos(panel_x + get_scaled(100), button_y);
m_btn_space.set_size(get_scaled(100), button_height);
m_btn_space.set_text(localized_string_id::RSX_OVERLAYS_OSK_DIALOG_SPACE);
m_btn_space.set_text_vertical_adjust(get_scaled(5));
m_btn_delete.set_pos(panel_x + get_scaled(200), button_y);
m_btn_delete.set_size(get_scaled(100), button_height);
m_btn_delete.set_text(localized_string_id::RSX_OVERLAYS_OSK_DIALOG_BACKSPACE);
m_btn_delete.set_text_vertical_adjust(get_scaled(5));
m_btn_shift.set_pos(panel_x + get_scaled(320), button_y);
m_btn_shift.set_size(get_scaled(80), button_height);
m_btn_shift.set_text(localized_string_id::RSX_OVERLAYS_OSK_DIALOG_SHIFT);
m_btn_shift.set_text_vertical_adjust(get_scaled(5));
m_btn_accept.set_pos(panel_x + get_scaled(400), button_y);
m_btn_accept.set_size(get_scaled(100), button_height);
m_btn_accept.set_text(localized_string_id::RSX_OVERLAYS_OSK_DIALOG_ACCEPT);
m_btn_accept.set_text_vertical_adjust(get_scaled(5));
m_update = true;
}
void osk_dialog::initialize_layout(const std::u32string& title, const std::u32string& initial_text)
{
const auto scale_font = [this](overlay_element& elem)
{
if (const font* fnt = elem.get_font())
{
elem.set_font(fnt->get_name().data(), get_scaled(fnt->get_size_pt()));
}
};
m_pointer.set_color(color4f{ 1.f, 1.f, 1.f, 1.f });
m_background.set_size(virtual_width, virtual_height);
m_title.set_unicode_text(title);
scale_font(m_title);
m_preview.password_mode = m_password_mode;
m_preview.set_placeholder(get_placeholder());
m_preview.set_unicode_text(initial_text);
scale_font(m_preview);
if (m_preview.value.empty())
{
m_preview.caret_position = 0;
m_preview.fore_color.a = 0.5f; // Muted contrast for hint text
}
else
{
m_preview.caret_position = m_preview.value.length();
m_preview.fore_color.a = 1.f;
}
scale_font(m_btn_shift);
scale_font(m_btn_accept);
scale_font(m_btn_space);
scale_font(m_btn_delete);
scale_font(m_btn_cancel);
m_btn_shift.text_horizontal_offset = get_scaled(m_btn_shift.text_horizontal_offset);
m_btn_accept.text_horizontal_offset = get_scaled(m_btn_accept.text_horizontal_offset);
m_btn_space.text_horizontal_offset = get_scaled(m_btn_space.text_horizontal_offset);
m_btn_delete.text_horizontal_offset = get_scaled(m_btn_delete.text_horizontal_offset);
m_btn_cancel.text_horizontal_offset = get_scaled(m_btn_cancel.text_horizontal_offset);
m_btn_shift.set_image_resource(resource_config::standard_image_resource::select);
m_btn_accept.set_image_resource(resource_config::standard_image_resource::start);
m_btn_space.set_image_resource(resource_config::standard_image_resource::triangle);
m_btn_delete.set_image_resource(resource_config::standard_image_resource::square);
if (g_cfg.sys.enter_button_assignment == enter_button_assign::circle)
{
m_btn_cancel.set_image_resource(resource_config::standard_image_resource::cross);
}
else
{
m_btn_cancel.set_image_resource(resource_config::standard_image_resource::circle);
}
m_update = true;
set_visible(continuous_mode != CELL_OSKDIALOG_CONTINUOUS_MODE_HIDE);
m_stop_input_loop = false;
fade_animation.current = color4f(0.f);
fade_animation.end = color4f(1.f);
fade_animation.duration_sec = 0.5f;
fade_animation.active = true;
}
void osk_dialog::update_controls()
{
const bool shift_enabled = num_shift_layers_by_charset[m_selected_charset] > 1;
const bool layer_enabled = num_shift_layers_by_charset.size() > 1;
for (auto& cell : m_grid)
{
switch (cell.button_flag)
{
case button_flags::_shift:
cell.enabled = shift_enabled;
break;
case button_flags::_layer:
cell.enabled = layer_enabled;
break;
default:
break;
}
}
m_update = true;
}
std::pair<u32, u32> osk_dialog::get_cell_geometry(u32 index)
{
const u32 grid_size = num_columns * num_rows;
u32 start_index = index;
u32 count = 0;
while (start_index >= grid_size && start_index >= num_columns)
{
// Try one row above
start_index -= num_columns;
}
// Find first cell
while (!(m_grid[start_index].flags & border_flags::left) && start_index)
{
--start_index;
}
// Find last cell
while (true)
{
const u32 current_index = (start_index + count);
ensure(current_index < grid_size);
++count;
if (m_grid[current_index].flags & border_flags::right)
{
break;
}
}
return std::make_pair(start_index, count);
}
void osk_dialog::update_selection_by_index(u32 index)
{
auto select_cell = [&](u32 i, bool state)
{
const auto info = get_cell_geometry(i);
// Tag all in range
for (u32 _index = info.first, _ctr = 0; _ctr < info.second; ++_index, ++_ctr)
{
m_grid[_index].selected = state;
}
};
// 1. Deselect current
const auto current_index = (selected_y * num_columns) + selected_x;
select_cell(current_index, false);
// 2. Select new
selected_y = index / num_columns;
selected_x = index % num_columns;
select_cell(index, true);
}
void osk_dialog::set_visible(bool visible)
{
if (m_use_separate_windows)
{
if (visible && continuous_mode == CELL_OSKDIALOG_CONTINUOUS_MODE_HIDE)
{
continuous_mode = CELL_OSKDIALOG_CONTINUOUS_MODE_SHOW;
}
else if (!visible && continuous_mode == CELL_OSKDIALOG_CONTINUOUS_MODE_SHOW)
{
continuous_mode = CELL_OSKDIALOG_CONTINUOUS_MODE_HIDE;
}
}
if (this->visible != visible)
{
this->visible = visible;
if (m_use_separate_windows)
{
osk.notice("set_visible: sending CELL_SYSUTIL_OSKDIALOG_DISPLAY_CHANGED with %s", visible ? "CELL_OSKDIALOG_DISPLAY_STATUS_SHOW" : "CELL_OSKDIALOG_DISPLAY_STATUS_HIDE");
sysutil_send_system_cmd(CELL_SYSUTIL_OSKDIALOG_DISPLAY_CHANGED, visible ? CELL_OSKDIALOG_DISPLAY_STATUS_SHOW : CELL_OSKDIALOG_DISPLAY_STATUS_HIDE);
}
}
}
void osk_dialog::on_button_pressed(pad_button button_press, bool is_auto_repeat)
{
if (!pad_input_enabled || ignore_device_events)
return;
if (input_device.exchange(CELL_OSKDIALOG_INPUT_DEVICE_PAD) != CELL_OSKDIALOG_INPUT_DEVICE_PAD)
{
osk.notice("on_button_pressed: sending CELL_SYSUTIL_OSKDIALOG_INPUT_DEVICE_CHANGED with CELL_OSKDIALOG_INPUT_DEVICE_PAD");
sysutil_send_system_cmd(CELL_SYSUTIL_OSKDIALOG_INPUT_DEVICE_CHANGED, CELL_OSKDIALOG_INPUT_DEVICE_PAD);
}
if (input_device != CELL_OSKDIALOG_INPUT_DEVICE_PAD)
{
return;
}
// Always show the pad input panel if the pad is enabled and in use.
if (!m_show_panel)
{
m_show_panel = true;
update_panel();
}
// Make sure to show the dialog and send necessary events
set_visible(true);
std::lock_guard lock(m_preview_mutex);
const u32 grid_size = num_columns * num_rows;
const auto on_accept = [this]()
{
const u32 current_index = (selected_y * num_columns) + selected_x;
const auto& current_cell = m_grid[current_index];
u32 output_count = 0;
if (m_selected_charset < current_cell.outputs.size())
{
output_count = ::size32(current_cell.outputs[m_selected_charset]);
}
if (output_count)
{
const auto _z = std::clamp<u32>(selected_z, 0u, output_count - 1u);
const auto& str = current_cell.outputs[m_selected_charset][_z];
if (current_cell.callback)
{
current_cell.callback(str);
}
else
{
on_default_callback(str);
}
}
};
// Increase auto repeat interval for some buttons
switch (button_press)
{
case pad_button::rs_left:
case pad_button::rs_right:
case pad_button::rs_down:
case pad_button::rs_up:
m_auto_repeat_ms_interval = 10;
break;
default:
m_auto_repeat_ms_interval = m_auto_repeat_ms_interval_default;
break;
}
bool play_cursor_sound = true;
switch (button_press)
{
case pad_button::L1:
{
m_preview.move_caret(edit_text::direction::left);
m_update = true;
break;
}
case pad_button::R1:
{
m_preview.move_caret(edit_text::direction::right);
m_update = true;
break;
}
case pad_button::dpad_right:
case pad_button::ls_right:
{
u32 current_index = (selected_y * num_columns) + selected_x;
while (true)
{
const auto current = get_cell_geometry(current_index);
current_index = current.first + current.second;
if (current_index >= grid_size)
{
break;
}
if (m_grid[get_cell_geometry(current_index).first].enabled)
{
update_selection_by_index(current_index);
break;
}
}
m_reset_pulse = true;
break;
}
case pad_button::dpad_left:
case pad_button::ls_left:
{
u32 current_index = (selected_y * num_columns) + selected_x;
while (current_index > 0)
{
const auto current = get_cell_geometry(current_index);
if (current.first)
{
current_index = current.first - 1;
if (m_grid[get_cell_geometry(current_index).first].enabled)
{
update_selection_by_index(current_index);
break;
}
}
else
{
break;
}
}
m_reset_pulse = true;
break;
}
case pad_button::dpad_down:
case pad_button::ls_down:
{
u32 current_index = (selected_y * num_columns) + selected_x;
while (true)
{
current_index += num_columns;
if (current_index >= grid_size)
{
break;
}
if (m_grid[get_cell_geometry(current_index).first].enabled)
{
update_selection_by_index(current_index);
break;
}
}
m_reset_pulse = true;
break;
}
case pad_button::dpad_up:
case pad_button::ls_up:
{
u32 current_index = (selected_y * num_columns) + selected_x;
while (current_index >= num_columns)
{
current_index -= num_columns;
if (m_grid[get_cell_geometry(current_index).first].enabled)
{
update_selection_by_index(current_index);
break;
}
}
m_reset_pulse = true;
break;
}
case pad_button::select:
{
on_shift(U"");
break;
}
case pad_button::start:
{
Emu.GetCallbacks().play_sound(fs::get_config_dir() + "sounds/snd_oskenter.wav");
Close(CELL_OSKDIALOG_CLOSE_CONFIRM);
play_cursor_sound = false;
break;
}
case pad_button::triangle:
{
on_space(U"");
break;
}
case pad_button::square:
{
on_backspace(U"");
break;
}
case pad_button::cross:
{
Emu.GetCallbacks().play_sound(fs::get_config_dir() + "sounds/snd_oskenter.wav");
on_accept();
m_reset_pulse = true;
play_cursor_sound = false;
break;
}
case pad_button::circle:
{
Emu.GetCallbacks().play_sound(fs::get_config_dir() + "sounds/snd_oskcancel.wav");
Close(CELL_OSKDIALOG_CLOSE_CANCEL);
play_cursor_sound = false;
break;
}
case pad_button::L2:
{
step_panel(false);
break;
}
case pad_button::R2:
{
step_panel(true);
break;
}
case pad_button::rs_left:
case pad_button::rs_right:
case pad_button::rs_down:
case pad_button::rs_up:
{
if (!(flags & CELL_OSKDIALOG_NO_INPUT_ANALOG))
{
switch (button_press)
{
case pad_button::rs_left: m_x_input_pos -= 5; m_x_panel_pos -= 5; break;
case pad_button::rs_right: m_x_input_pos += 5; m_x_panel_pos += 5; break;
case pad_button::rs_down: m_y_input_pos += 5; m_y_panel_pos += 5; break;
case pad_button::rs_up: m_y_input_pos -= 5; m_y_panel_pos -= 5; break;
default: break;
}
update_panel();
}
play_cursor_sound = false;
break;
}
default:
break;
}
// Play a sound unless this is a fast auto repeat which would induce a nasty noise
if (play_cursor_sound && (!is_auto_repeat || m_auto_repeat_ms_interval >= m_auto_repeat_ms_interval_default))
{
Emu.GetCallbacks().play_sound(fs::get_config_dir() + "sounds/snd_cursor.wav");
}
if (m_reset_pulse)
{
m_update = true;
}
}
void osk_dialog::on_key_pressed(u32 led, u32 mkey, u32 key_code, u32 out_key_code, bool pressed, std::u32string key)
{
if (!pressed || !keyboard_input_enabled || ignore_device_events)
return;
if (input_device.exchange(CELL_OSKDIALOG_INPUT_DEVICE_KEYBOARD) != CELL_OSKDIALOG_INPUT_DEVICE_KEYBOARD)
{
osk.notice("on_key_pressed: sending CELL_SYSUTIL_OSKDIALOG_INPUT_DEVICE_CHANGED with CELL_OSKDIALOG_INPUT_DEVICE_KEYBOARD");
sysutil_send_system_cmd(CELL_SYSUTIL_OSKDIALOG_INPUT_DEVICE_CHANGED, CELL_OSKDIALOG_INPUT_DEVICE_KEYBOARD);
}
if (input_device != CELL_OSKDIALOG_INPUT_DEVICE_KEYBOARD)
{
return;
}
if (m_use_separate_windows && m_show_panel)
{
// Hide the pad input panel if the keyboard is in use during separate windows.
m_show_panel = false;
update_panel();
}
// Make sure to show the dialog and send necessary events
set_visible(true);
std::lock_guard lock(m_preview_mutex);
// The key should normally be empty unless the backend couldn't find a match.
const bool is_key_string_fallback = !key.empty();
// Pure meta keys need to be treated with care, as their out key code contains the meta key code instead of the normal key code.
const bool is_meta_key = mkey != 0 && key_code == CELL_KEYC_NO_EVENT && key.empty();
osk.notice("osk_dialog::on_key_pressed(led=%d, mkey=%d, key_code=%d, out_key_code=%d, pressed=%d, is_key_string_fallback=%d, is_meta_key=%d)", led, mkey, key_code, out_key_code, pressed, is_key_string_fallback, is_meta_key);
// Find matching key in the OSK
const auto find_key = [&]() -> bool
{
if (is_meta_key)
{
// We don't need to process meta keys in the grid at the moment.
// The key is valid either way, so we return true.
// Only on_osk_key_input_entered is called later.
return true;
}
// Get the string representation of this key (unless it's already set by the backend)
if (key.empty())
{
// Get keyboard layout
const u32 kb_mapping = static_cast<u32>(g_cfg.sys.keyboard_type.get());
// Convert key to its u32string presentation
const u16 converted_out_key = cellKbCnvRawCode(kb_mapping, mkey, led, out_key_code);
std::u16string utf16_string;
utf16_string.push_back(converted_out_key);
key = utf16_to_u32string(utf16_string);
}
if (key.empty())
{
return false;
}
for (const cell& current_cell : m_grid)
{
for (const auto& output : current_cell.outputs)
{
for (const auto& str : output)
{
if (str == key)
{
// Apply key press
if (current_cell.callback)
{
current_cell.callback(str);
}
else
{
on_default_callback(str);
}
return true;
}
}
}
}
return false;
};
const bool found_key = find_key();
if (is_key_string_fallback)
{
// We don't have a keycode, so we can't process any of the following code anyway
return;
}
// Handle special input
if (!found_key)
{
switch (out_key_code)
{
case CELL_KEYC_SPACE:
on_space(key);
break;
case CELL_KEYC_BS:
on_backspace(key);
break;
case CELL_KEYC_DELETE:
on_delete(key);
break;
case CELL_KEYC_ESCAPE:
Close(CELL_OSKDIALOG_CLOSE_CANCEL);
break;
case CELL_KEYC_RIGHT_ARROW:
on_move_cursor(key, edit_text::direction::right);
break;
case CELL_KEYC_LEFT_ARROW:
on_move_cursor(key, edit_text::direction::left);
break;
case CELL_KEYC_DOWN_ARROW:
on_move_cursor(key, edit_text::direction::down);
break;
case CELL_KEYC_UP_ARROW:
on_move_cursor(key, edit_text::direction::up);
break;
case CELL_KEYC_ENTER:
if ((flags & CELL_OSKDIALOG_NO_RETURN))
{
Close(CELL_OSKDIALOG_CLOSE_CONFIRM);
}
else
{
on_enter(key);
}
break;
default:
break;
}
}
if (on_osk_key_input_entered)
{
CellOskDialogKeyMessage key_message{};
key_message.led = led;
key_message.mkey = mkey;
key_message.keycode = out_key_code;
on_osk_key_input_entered(key_message);
}
}
void osk_dialog::on_text_changed()
{
const std::u16string ws = u32string_to_utf16(m_preview.value);
const usz length = std::min(osk_text.size(), ws.length() + 1) * sizeof(char16_t);
memcpy(osk_text.data(), ws.c_str(), length);
osk.notice("on_text_changed: osk_text='%s'", utf16_to_ascii8(ws));
// Muted contrast for placeholder text
m_preview.fore_color.a = m_preview.value.empty() ? 0.5f : 1.f;
m_update = true;
}
void osk_dialog::on_default_callback(const std::u32string& str)
{
if (str.empty())
{
return;
}
// Append to output text
if (m_preview.value.empty())
{
m_preview.caret_position = str.length();
m_preview.set_unicode_text(str);
}
else
{
if (m_preview.value.length() >= char_limit)
{
return;
}
if ((m_preview.value.length() + str.length()) <= char_limit)
{
m_preview.insert_text(str);
}
}
on_text_changed();
}
void osk_dialog::on_shift(const std::u32string&)
{
const u32 max = num_shift_layers_by_charset[m_selected_charset];
selected_z = (selected_z + 1) % max;
m_update = true;
}
void osk_dialog::on_layer(const std::u32string&)
{
const u32 num_charsets = std::max<u32>(::size32(num_shift_layers_by_charset), 1);
m_selected_charset = (m_selected_charset + 1) % num_charsets;
const u32 max_z_layer = num_shift_layers_by_charset[m_selected_charset] - 1;
if (selected_z > max_z_layer)
{
selected_z = max_z_layer;
}
update_controls();
m_update = true;
}
void osk_dialog::on_space(const std::u32string&)
{
if (!(flags & CELL_OSKDIALOG_NO_SPACE))
{
on_default_callback(U" ");
}
else
{
// Beep or give some other kind of visual feedback
}
}
void osk_dialog::on_backspace(const std::u32string&)
{
m_preview.erase();
on_text_changed();
}
void osk_dialog::on_delete(const std::u32string&)
{
m_preview.del();
on_text_changed();
}
void osk_dialog::on_enter(const std::u32string&)
{
if (!(flags & CELL_OSKDIALOG_NO_RETURN))
{
on_default_callback(U"\n");
}
else
{
// Beep or give some other kind of visual feedback
}
}
void osk_dialog::on_move_cursor(const std::u32string&, edit_text::direction dir)
{
m_preview.move_caret(dir);
m_update = true;
}
std::u32string osk_dialog::get_placeholder() const
{
const localized_string_id id = m_password_mode
? localized_string_id::RSX_OVERLAYS_OSK_DIALOG_ENTER_PASSWORD
: localized_string_id::RSX_OVERLAYS_OSK_DIALOG_ENTER_TEXT;
return get_localized_u32string(id);
}
void osk_dialog::update(u64 timestamp_us)
{
if (fade_animation.active)
{
fade_animation.update(timestamp_us);
m_update = true;
}
osk_info& info = g_fxo->get<osk_info>();
if (const bool pointer_enabled = info.pointer_enabled; pointer_enabled != m_pointer.visible())
{
m_pointer.set_expiration(pointer_enabled ? u64{umax} : 0);
m_pointer.update_visibility(get_system_time());
m_update = true;
}
if (m_pointer.visible() && m_pointer.set_position(static_cast<s16>(info.pointer_x), static_cast<s16>(info.pointer_y)))
{
m_update = true;
}
}
compiled_resource osk_dialog::get_compiled()
{
if (!visible)
{
return {};
}
if (!m_update)
{
fade_animation.apply(m_cached_resource);
return m_cached_resource;
}
m_cached_resource.clear();
m_cached_resource.add(m_background.get_compiled());
if (m_use_separate_windows && !m_show_panel)
{
std::lock_guard lock(m_preview_mutex);
m_cached_resource.add(m_preview.get_compiled());
}
else
{
m_cached_resource.add(m_panel_frame.get_compiled());
if (!m_use_separate_windows)
{
// The title is omitted in separate window mode
m_cached_resource.add(m_input_frame.get_compiled());
m_cached_resource.add(m_title.get_compiled());
}
{
std::lock_guard lock(m_preview_mutex);
m_cached_resource.add(m_preview.get_compiled());
}
m_cached_resource.add(m_btn_accept.get_compiled());
m_cached_resource.add(m_btn_cancel.get_compiled());
m_cached_resource.add(m_btn_shift.get_compiled());
m_cached_resource.add(m_btn_space.get_compiled());
m_cached_resource.add(m_btn_delete.get_compiled());
overlay_element tmp;
u16 buffered_cell_count = 0;
bool render_label = false;
const color4f disabled_back_color = { 0.3f, 0.3f, 0.3f, 1.f };
const color4f disabled_fore_color = { 0.8f, 0.8f, 0.8f, 1.f };
const color4f normal_fore_color = { 0.f, 0.f, 0.f, 1.f };
label label;
label.back_color = { 0.f, 0.f, 0.f, 0.f };
label.set_padding(0, 0, get_scaled(10), 0);
const auto scale_font = [this](overlay_element& elem)
{
if (const font* fnt = elem.get_font())
{
elem.set_font(fnt->get_name().data(), get_scaled(fnt->get_size_pt()));
}
};
scale_font(label);
if (m_reset_pulse)
{
// Reset the pulse slightly above 0 falling on each user interaction
m_key_pulse_cache.set_sinus_offset(0.6f);
}
for (const auto& c : m_grid)
{
s16 x = static_cast<s16>(c.pos.x);
s16 y = static_cast<s16>(c.pos.y);
u16 w = cell_size_x;
u16 h = cell_size_y;
if (c.flags & border_flags::left)
{
x++;
w--;
buffered_cell_count = 0;
}
if (c.flags & border_flags::right)
{
w--;
u32 output_count = 0;
if (m_selected_charset < c.outputs.size())
{
output_count = ::size32(c.outputs[m_selected_charset]);
}
if (output_count)
{
const s16 offset_x = static_cast<s16>(buffered_cell_count * cell_size_x);
const u16 full_width = static_cast<u16>(offset_x + cell_size_x);
label.set_pos(x - offset_x, y);
label.set_size(full_width, cell_size_y);
label.fore_color = c.enabled ? normal_fore_color : disabled_fore_color;
const auto _z = (selected_z < output_count) ? selected_z : output_count - 1u;
label.set_unicode_text(c.outputs[m_selected_charset][_z]);
label.align_text(rsx::overlays::overlay_element::text_align::center);
render_label = true;
}
}
if (c.flags & border_flags::top)
{
y++;
h--;
}
if (c.flags & border_flags::bottom)
{
h--;
}
buffered_cell_count++;
tmp.back_color = c.enabled? c.backcolor : disabled_back_color;
tmp.set_pos(x, y);
tmp.set_size(w, h);
tmp.pulse_effect_enabled = c.selected;
tmp.pulse_sinus_offset = m_key_pulse_cache.pulse_sinus_offset;
m_cached_resource.add(tmp.get_compiled());
if (render_label)
{
label.pulse_effect_enabled = c.selected;
label.pulse_sinus_offset = m_key_pulse_cache.pulse_sinus_offset;
m_cached_resource.add(label.get_compiled());
}
}
m_cached_resource.add(m_pointer.get_compiled());
m_reset_pulse = false;
}
m_update = false;
fade_animation.apply(m_cached_resource);
return m_cached_resource;
}
void osk_dialog::Create(const osk_params& params)
{
state = OskDialogState::Open;
flags = params.prohibit_flags;
char_limit = params.charlimit;
m_layout = params.layout;
m_input_layout = params.input_layout;
m_panel_layout = params.panel_layout;
m_input_field_window_width = params.input_field_window_width;
m_scaling = params.initial_scale;
m_input_frame.back_color.r = params.base_color.r;
m_input_frame.back_color.g = params.base_color.g;
m_input_frame.back_color.b = params.base_color.b;
m_input_frame.back_color.a = params.base_color.a;
m_panel_frame.back_color = m_input_frame.back_color;
m_background.back_color.a = params.dimmer_enabled ? 0.8f : 0.0f;
m_start_pad_interception = params.intercept_input;
m_use_separate_windows = params.use_separate_windows;
if (m_use_separate_windows)
{
// When using separate windows, we show the text field, but hide the pad input panel if the input device is a pad.
m_show_panel = pad_input_enabled && input_device == CELL_OSKDIALOG_INPUT_DEVICE_PAD;
m_preview.back_color.a = std::clamp(params.input_field_background_transparency, 0.0f, 1.0f);
}
else
{
m_title.back_color.a = 0.7f; // Uses the dimmed color of the frame background
}
const callback_t shift_cb = [this](const std::u32string& text){ on_shift(text); };
const callback_t layer_cb = [this](const std::u32string& text){ on_layer(text); };
const callback_t space_cb = [this](const std::u32string& text){ on_space(text); };
const callback_t delete_cb = [this](const std::u32string& text){ on_backspace(text); };
const callback_t enter_cb = [this](const std::u32string& text){ on_enter(text); };
const auto is_supported = [&](u32 mode) -> bool
{
switch (mode)
{
case CELL_OSKDIALOG_PANELMODE_POLISH:
case CELL_OSKDIALOG_PANELMODE_KOREAN:
case CELL_OSKDIALOG_PANELMODE_TURKEY:
case CELL_OSKDIALOG_PANELMODE_TRADITIONAL_CHINESE:
case CELL_OSKDIALOG_PANELMODE_SIMPLIFIED_CHINESE:
case CELL_OSKDIALOG_PANELMODE_PORTUGUESE_BRAZIL:
case CELL_OSKDIALOG_PANELMODE_DANISH:
case CELL_OSKDIALOG_PANELMODE_SWEDISH:
case CELL_OSKDIALOG_PANELMODE_NORWEGIAN:
case CELL_OSKDIALOG_PANELMODE_FINNISH:
return (params.panel_flag & mode) && (params.support_language & mode);
default:
return (params.panel_flag & mode);
}
};
const auto has_language_support = [&](CellSysutilLang language)
{
switch (language)
{
case CELL_SYSUTIL_LANG_KOREAN: return is_supported(CELL_OSKDIALOG_PANELMODE_KOREAN);
case CELL_SYSUTIL_LANG_FINNISH: return is_supported(CELL_OSKDIALOG_PANELMODE_FINNISH);
case CELL_SYSUTIL_LANG_SWEDISH: return is_supported(CELL_OSKDIALOG_PANELMODE_SWEDISH);
case CELL_SYSUTIL_LANG_DANISH: return is_supported(CELL_OSKDIALOG_PANELMODE_DANISH);
case CELL_SYSUTIL_LANG_NORWEGIAN: return is_supported(CELL_OSKDIALOG_PANELMODE_NORWEGIAN);
case CELL_SYSUTIL_LANG_POLISH: return is_supported(CELL_OSKDIALOG_PANELMODE_POLISH);
case CELL_SYSUTIL_LANG_PORTUGUESE_BR: return is_supported(CELL_OSKDIALOG_PANELMODE_PORTUGUESE_BRAZIL);
case CELL_SYSUTIL_LANG_TURKISH: return is_supported(CELL_OSKDIALOG_PANELMODE_TURKEY);
case CELL_SYSUTIL_LANG_CHINESE_T: return is_supported(CELL_OSKDIALOG_PANELMODE_TRADITIONAL_CHINESE);
case CELL_SYSUTIL_LANG_CHINESE_S: return is_supported(CELL_OSKDIALOG_PANELMODE_SIMPLIFIED_CHINESE);
default: return true;
}
};
if (params.panel_flag & CELL_OSKDIALOG_PANELMODE_PASSWORD)
{
// If password was requested, then password has to be the only osk panel mode available to the user
// first_view_panel can be ignored
add_panel(osk_panel_password(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
m_password_mode = true;
}
else if (params.panel_flag == CELL_OSKDIALOG_PANELMODE_DEFAULT || params.panel_flag == CELL_OSKDIALOG_PANELMODE_DEFAULT_NO_JAPANESE)
{
// Prefer the systems settings
// first_view_panel is ignored
CellSysutilLang language = g_cfg.sys.language;
// Fall back to english if the panel is not supported
if (!has_language_support(language))
{
language = CELL_SYSUTIL_LANG_ENGLISH_US;
}
switch (g_cfg.sys.language)
{
case CELL_SYSUTIL_LANG_JAPANESE:
if (params.panel_flag == CELL_OSKDIALOG_PANELMODE_DEFAULT_NO_JAPANESE)
add_panel(osk_panel_english(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
else
add_panel(osk_panel_japanese(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
break;
case CELL_SYSUTIL_LANG_FRENCH:
add_panel(osk_panel_french(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
break;
case CELL_SYSUTIL_LANG_SPANISH:
add_panel(osk_panel_spanish(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
break;
case CELL_SYSUTIL_LANG_GERMAN:
add_panel(osk_panel_german(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
break;
case CELL_SYSUTIL_LANG_ITALIAN:
add_panel(osk_panel_italian(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
break;
case CELL_SYSUTIL_LANG_DANISH:
add_panel(osk_panel_danish(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
break;
case CELL_SYSUTIL_LANG_NORWEGIAN:
add_panel(osk_panel_norwegian(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
break;
case CELL_SYSUTIL_LANG_DUTCH:
add_panel(osk_panel_dutch(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
break;
case CELL_SYSUTIL_LANG_FINNISH:
add_panel(osk_panel_finnish(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
break;
case CELL_SYSUTIL_LANG_SWEDISH:
add_panel(osk_panel_swedish(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
break;
case CELL_SYSUTIL_LANG_PORTUGUESE_PT:
add_panel(osk_panel_portuguese_pt(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
break;
case CELL_SYSUTIL_LANG_PORTUGUESE_BR:
add_panel(osk_panel_portuguese_br(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
break;
case CELL_SYSUTIL_LANG_TURKISH:
add_panel(osk_panel_turkey(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
break;
case CELL_SYSUTIL_LANG_POLISH:
add_panel(osk_panel_polish(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
break;
case CELL_SYSUTIL_LANG_RUSSIAN:
add_panel(osk_panel_russian(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
break;
case CELL_SYSUTIL_LANG_KOREAN:
add_panel(osk_panel_korean(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
break;
case CELL_SYSUTIL_LANG_CHINESE_T:
add_panel(osk_panel_traditional_chinese(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
break;
case CELL_SYSUTIL_LANG_CHINESE_S:
add_panel(osk_panel_simplified_chinese(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
break;
case CELL_SYSUTIL_LANG_ENGLISH_US:
case CELL_SYSUTIL_LANG_ENGLISH_GB:
default:
add_panel(osk_panel_english(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
break;
}
}
else
{
// Append osk modes.
// TODO: find out the exact order
if (is_supported(CELL_OSKDIALOG_PANELMODE_LATIN))
{
add_panel(osk_panel_latin(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
}
if (is_supported(CELL_OSKDIALOG_PANELMODE_ENGLISH))
{
add_panel(osk_panel_english(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
}
if (is_supported(CELL_OSKDIALOG_PANELMODE_FRENCH))
{
add_panel(osk_panel_french(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
}
if (is_supported(CELL_OSKDIALOG_PANELMODE_SPANISH))
{
add_panel(osk_panel_spanish(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
}
if (is_supported(CELL_OSKDIALOG_PANELMODE_ITALIAN))
{
add_panel(osk_panel_italian(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
}
if (is_supported(CELL_OSKDIALOG_PANELMODE_GERMAN))
{
add_panel(osk_panel_german(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
}
if (is_supported(CELL_OSKDIALOG_PANELMODE_TURKEY))
{
add_panel(osk_panel_turkey(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
}
if (is_supported(CELL_OSKDIALOG_PANELMODE_POLISH))
{
add_panel(osk_panel_polish(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
}
if (is_supported(CELL_OSKDIALOG_PANELMODE_RUSSIAN))
{
add_panel(osk_panel_russian(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
}
if (is_supported(CELL_OSKDIALOG_PANELMODE_DANISH))
{
add_panel(osk_panel_danish(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
}
if (is_supported(CELL_OSKDIALOG_PANELMODE_NORWEGIAN))
{
add_panel(osk_panel_norwegian(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
}
if (is_supported(CELL_OSKDIALOG_PANELMODE_DUTCH))
{
add_panel(osk_panel_dutch(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
}
if (is_supported(CELL_OSKDIALOG_PANELMODE_SWEDISH))
{
add_panel(osk_panel_swedish(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
}
if (is_supported(CELL_OSKDIALOG_PANELMODE_FINNISH))
{
add_panel(osk_panel_finnish(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
}
if (is_supported(CELL_OSKDIALOG_PANELMODE_PORTUGUESE))
{
add_panel(osk_panel_portuguese_pt(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
}
if (is_supported(CELL_OSKDIALOG_PANELMODE_PORTUGUESE_BRAZIL))
{
add_panel(osk_panel_portuguese_br(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
}
if (is_supported(CELL_OSKDIALOG_PANELMODE_KOREAN))
{
add_panel(osk_panel_korean(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
}
if (is_supported(CELL_OSKDIALOG_PANELMODE_TRADITIONAL_CHINESE))
{
add_panel(osk_panel_traditional_chinese(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
}
if (is_supported(CELL_OSKDIALOG_PANELMODE_SIMPLIFIED_CHINESE))
{
add_panel(osk_panel_simplified_chinese(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
}
if (is_supported(CELL_OSKDIALOG_PANELMODE_JAPANESE))
{
add_panel(osk_panel_japanese(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
}
if (is_supported(CELL_OSKDIALOG_PANELMODE_JAPANESE_HIRAGANA))
{
add_panel(osk_panel_japanese_hiragana(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
}
if (is_supported(CELL_OSKDIALOG_PANELMODE_JAPANESE_KATAKANA))
{
add_panel(osk_panel_japanese_katakana(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
}
if (is_supported(CELL_OSKDIALOG_PANELMODE_ALPHABET))
{
add_panel(osk_panel_alphabet_half_width(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
}
if (is_supported(CELL_OSKDIALOG_PANELMODE_ALPHABET_FULL_WIDTH))
{
add_panel(osk_panel_alphabet_full_width(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
}
if (is_supported(CELL_OSKDIALOG_PANELMODE_NUMERAL))
{
add_panel(osk_panel_numeral_half_width(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
}
if (is_supported(CELL_OSKDIALOG_PANELMODE_NUMERAL_FULL_WIDTH))
{
add_panel(osk_panel_numeral_full_width(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
}
if (is_supported(CELL_OSKDIALOG_PANELMODE_URL))
{
add_panel(osk_panel_url(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
}
// Get initial panel based on first_view_panel
for (usz i = 0; i < m_panels.size(); ++i)
{
if (params.first_view_panel == m_panels[i].osk_panel_mode)
{
m_panel_index = i;
break;
}
}
}
// Fallback to english in case we forgot something
if (m_panels.empty())
{
osk.error("No OSK panel found. Using english panel.");
add_panel(osk_panel_english(shift_cb, layer_cb, space_cb, delete_cb, enter_cb));
}
initialize_layout(utf16_to_u32string(params.message), utf16_to_u32string(params.init_text));
update_panel();
const auto notify = std::make_shared<atomic_t<u32>>(0);
auto& overlayman = g_fxo->get<display_manager>();
overlayman.attach_thread_input(
uid, "OSK",
[notify]() { *notify = true; notify->notify_one(); }
);
while (!Emu.IsStopped() && !*notify)
{
notify->wait(0, atomic_wait_timeout{1'000'000});
}
}
}
}
| 49,461
|
C++
|
.cpp
| 1,435
| 29.443902
| 227
| 0.658313
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,408
|
overlay_trophy_notification.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_trophy_notification.cpp
|
#include "stdafx.h"
#include "overlay_trophy_notification.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/System.h"
namespace rsx
{
namespace overlays
{
// TODO: Move somewhere in rsx_utils or general utils if needed anywhere else
class ticket_semaphore_t
{
atomic_t<u64> acquired{0};
atomic_t<u64> released{0};
public:
u64 enqueue()
{
return acquired.fetch_add(1);
}
bool try_acquire(u64 queue_id) const
{
return (queue_id == released.load());
}
void release()
{
released++;
}
};
static ticket_semaphore_t s_trophy_semaphore;
trophy_notification::trophy_notification()
{
frame.set_pos(68, 55);
frame.set_size(300, 72);
frame.back_color.r = 0.247059f;
frame.back_color.g = 0.250980f;
frame.back_color.b = 0.247059f;
frame.back_color.a = 0.88f;
image.set_pos(78, 64);
image.set_size(53, 53);
image.back_color.a = 0.f;
text_view.set_pos(139, 69);
text_view.set_padding(0, 0, 0, 0);
text_view.set_font("Arial", 14);
text_view.align_text(overlay_element::text_align::center);
text_view.back_color.a = 0.f;
sliding_animation.duration_sec = 1.5f;
sliding_animation.type = animation_type::ease_in_out_cubic;
// Make the fade animation a bit shorter to see the trophy better.
fade_animation.duration_sec = 1.0f;
}
void trophy_notification::update(u64 timestamp_us)
{
if (!s_trophy_semaphore.try_acquire(display_sched_id))
{
// Not scheduled to run just yet
return;
}
if (!creation_time_us)
{
// First tick
creation_time_us = timestamp_us;
Emu.GetCallbacks().play_sound(fs::get_config_dir() + "sounds/snd_trophy.wav");
return;
}
const u64 time_since_creation_us = timestamp_us - creation_time_us;
u64 end_animation_begin_us = 5'000'000;
if (time_since_creation_us > end_animation_begin_us)
{
if (!sliding_animation.active)
{
sliding_animation.end = { -f32(frame.x + frame.w), 0, 0 };
sliding_animation.on_finish = [this]
{
s_trophy_semaphore.release();
close(false, false);
};
sliding_animation.active = true;
}
}
// Match both animation ends based on their durations
if (sliding_animation.duration_sec > fade_animation.duration_sec)
{
end_animation_begin_us += static_cast<u64>((sliding_animation.duration_sec - fade_animation.duration_sec) * 1'000'000);
}
if (time_since_creation_us > end_animation_begin_us)
{
if (!fade_animation.active)
{
fade_animation.current = color4f(1.f);
fade_animation.end = color4f(0.f);
fade_animation.active = true;
}
}
if (sliding_animation.active)
{
sliding_animation.update(timestamp_us);
}
if (fade_animation.active)
{
fade_animation.update(timestamp_us);
}
}
compiled_resource trophy_notification::get_compiled()
{
if (!creation_time_us || !visible)
{
return {};
}
auto result = frame.get_compiled();
result.add(image.get_compiled());
result.add(text_view.get_compiled());
sliding_animation.apply(result);
fade_animation.apply(result);
return result;
}
s32 trophy_notification::show(const SceNpTrophyDetails& trophy, const std::vector<uchar>& trophy_icon_buffer)
{
// Schedule to display this trophy
display_sched_id = s_trophy_semaphore.enqueue();
visible = false;
if (!trophy_icon_buffer.empty())
{
icon_info = std::make_unique<image_info>(trophy_icon_buffer);
image.set_raw_image(icon_info.get());
}
localized_string_id string_id = localized_string_id::INVALID;
switch (trophy.trophyGrade)
{
case SCE_NP_TROPHY_GRADE_BRONZE: string_id = localized_string_id::RSX_OVERLAYS_TROPHY_BRONZE; break;
case SCE_NP_TROPHY_GRADE_SILVER: string_id = localized_string_id::RSX_OVERLAYS_TROPHY_SILVER; break;
case SCE_NP_TROPHY_GRADE_GOLD: string_id = localized_string_id::RSX_OVERLAYS_TROPHY_GOLD; break;
case SCE_NP_TROPHY_GRADE_PLATINUM: string_id = localized_string_id::RSX_OVERLAYS_TROPHY_PLATINUM; break;
default: break;
}
text_view.set_unicode_text(get_localized_u32string(string_id, trophy.name));
text_view.auto_resize();
// Resize background to cover the text
constexpr u16 margin_sz = 9;
frame.w = margin_sz * 3 + image.w + text_view.w;
sliding_animation.current = { -f32(frame.x + frame.w), 0, 0 };
sliding_animation.end = { 0, 0, 0 };
sliding_animation.active = true;
fade_animation.current = color4f(0.f);
fade_animation.end = color4f(1.f);
fade_animation.active = true;
visible = true;
return CELL_OK;
}
} // namespace overlays
} // namespace rsx
| 4,657
|
C++
|
.cpp
| 149
| 27.04698
| 123
| 0.684022
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,409
|
overlay_controls.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_controls.cpp
|
#include "stdafx.h"
#include "overlay_controls.h"
#include "util/types.hpp"
#include "util/logs.hpp"
#include "Utilities/geometry.h"
#include "Utilities/File.h"
#ifndef _WIN32
#include <unistd.h>
#include <libgen.h>
#include <limits.h>
#ifdef __APPLE__
#include <mach-o/dyld.h>
#endif
#if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__NetBSD__)
#include <sys/sysctl.h>
#endif
#endif
// Definitions for common UI controls and their routines
namespace rsx
{
namespace overlays
{
static std::vector<vertex> generate_unit_quadrant(int num_patch_points, const float offset[2], const float scale[2])
{
ensure(num_patch_points >= 3);
std::vector<vertex> result(num_patch_points + 1);
// Set root vertex
result[0].vec2(offset[0], offset[1]);
// Set the 0th and Nth outer vertices which lie flush with the axes
result[1].vec2(offset[0] + scale[0], offset[1]);
result[num_patch_points].vec2(offset[0], offset[1] + scale[1]);
constexpr float degrees_to_radians = 0.0174533f;
for (int i = 1; i < num_patch_points - 1; i++)
{
// If we keep a unit circle, 2 of the 4 components of the rotation matrix become 0
// We end up with a simple vec2(cos_theta, sin_theta) as the output
// The final scaling and translation can then be done with fmad
const auto angle = degrees_to_radians * ((i * 90) / (num_patch_points - 1));
result[i + 1].vec2(
std::fmaf(std::cos(angle), scale[0], offset[0]),
std::fmaf(std::sin(angle), scale[1], offset[1])
);
}
return result;
}
image_info::image_info(const char* filename)
{
fs::file f(filename, fs::read + fs::isfile);
if (!f)
{
rsx_log.error("Image resource file `%s' could not be opened (%s)", filename, fs::g_tls_error);
return;
}
const std::vector<u8> bytes = f.to_vector<u8>();
load_data(bytes);
}
image_info::image_info(const std::vector<u8>& bytes)
{
load_data(bytes);
}
image_info::~image_info()
{
if (data) stbi_image_free(data);
}
void image_info::load_data(const std::vector<u8>& bytes)
{
data = stbi_load_from_memory(bytes.data(), ::narrow<int>(bytes.size()), &w, &h, &bpp, STBI_rgb_alpha);
}
resource_config::resource_config()
{
}
void resource_config::load_files()
{
const std::array<std::string, 15> texture_resource_files
{
"fade_top.png",
"fade_bottom.png",
"select.png",
"start.png",
"cross.png",
"circle.png",
"triangle.png",
"square.png",
"L1.png",
"R1.png",
"L2.png",
"R2.png",
"save.png",
"new.png",
"spinner-24.png"
};
for (const std::string& res : texture_resource_files)
{
// First check the global config dir
const std::string image_path = fs::get_config_dir() + "Icons/ui/" + res;
auto info = std::make_unique<image_info>(image_path.c_str());
#if !defined(_WIN32) && !defined(__APPLE__) && defined(DATADIR)
// Check the DATADIR if defined
if (info->data == nullptr)
{
const std::string data_dir (DATADIR);
const std::string image_data = data_dir + "/Icons/ui/" + res;
info = std::make_unique<image_info>(image_data.c_str());
}
#endif
if (info->data == nullptr)
{
// Resource was not found in the DATADIR or config dir, try and grab from relative path (linux)
std::string src = "Icons/ui/" + res;
info = std::make_unique<image_info>(src.c_str());
#ifndef _WIN32
// Check for Icons in ../share/rpcs3 for AppImages,
// in rpcs3.app/Contents/Resources for App Bundles, and /usr/bin.
if (info->data == nullptr)
{
char result[ PATH_MAX ];
#if defined(__APPLE__)
u32 bufsize = PATH_MAX;
bool success = _NSGetExecutablePath( result, &bufsize ) == 0;
#elif defined(KERN_PROC_PATHNAME)
usz bufsize = PATH_MAX;
int mib[] = {
CTL_KERN,
#if defined(__NetBSD__)
KERN_PROC_ARGS,
-1,
KERN_PROC_PATHNAME,
#else
KERN_PROC,
KERN_PROC_PATHNAME,
-1,
#endif
};
bool success = sysctl(mib, sizeof(mib)/sizeof(mib[0]), result, &bufsize, NULL, 0) >= 0;
#elif defined(__linux__)
bool success = readlink( "/proc/self/exe", result, PATH_MAX ) >= 0;
#elif defined(__sun)
bool success = readlink( "/proc/self/path/a.out", result, PATH_MAX ) >= 0;
#else
bool success = readlink( "/proc/curproc/file", result, PATH_MAX ) >= 0;
#endif
if (success)
{
std::string executablePath = dirname(result);
#ifdef __APPLE__
src = executablePath + "/../Resources/Icons/ui/" + res;
#else
src = executablePath + "/../share/rpcs3/Icons/ui/" + res;
#endif
info = std::make_unique<image_info>(src.c_str());
// Check if the icons are in the same directory as the executable (local builds)
if (info->data == nullptr)
{
src = executablePath + "/Icons/ui/" + res;
info = std::make_unique<image_info>(src.c_str());
}
}
}
#endif
if (info->data != nullptr)
{
// Install the image to config dir
fs::create_path(fs::get_parent_dir(image_path));
fs::copy_file(src, image_path, true);
}
}
texture_raw_data.push_back(std::move(info));
}
}
void resource_config::free_resources()
{
texture_raw_data.clear();
}
void compiled_resource::command_config::set_image_resource(u8 ref)
{
texture_ref = ref;
font_ref = nullptr;
}
void compiled_resource::command_config::set_font(font *ref)
{
texture_ref = image_resource_id::font_file;
font_ref = ref;
}
f32 compiled_resource::command_config::get_sinus_value() const
{
return (static_cast<f32>(get_system_time() / 1000) * pulse_speed_modifier) - pulse_sinus_offset;
}
void compiled_resource::add(const compiled_resource& other)
{
auto old_size = draw_commands.size();
draw_commands.resize(old_size + other.draw_commands.size());
std::copy(other.draw_commands.begin(), other.draw_commands.end(), draw_commands.begin() + old_size);
}
void compiled_resource::add(const compiled_resource& other, f32 x_offset, f32 y_offset)
{
auto old_size = draw_commands.size();
draw_commands.resize(old_size + other.draw_commands.size());
std::copy(other.draw_commands.begin(), other.draw_commands.end(), draw_commands.begin() + old_size);
for (usz n = old_size; n < draw_commands.size(); ++n)
{
for (auto &v : draw_commands[n].verts)
{
v += vertex(x_offset, y_offset, 0.f, 0.f);
}
}
}
void compiled_resource::add(const compiled_resource& other, f32 x_offset, f32 y_offset, const areaf& clip_rect)
{
auto old_size = draw_commands.size();
draw_commands.resize(old_size + other.draw_commands.size());
std::copy(other.draw_commands.begin(), other.draw_commands.end(), draw_commands.begin() + old_size);
for (usz n = old_size; n < draw_commands.size(); ++n)
{
for (auto &v : draw_commands[n].verts)
{
v += vertex(x_offset, y_offset, 0.f, 0.f);
}
draw_commands[n].config.clip_rect = clip_rect;
draw_commands[n].config.clip_region = true;
}
}
void compiled_resource::clear()
{
draw_commands.clear();
}
compiled_resource::command& compiled_resource::append(const command& new_command)
{
draw_commands.emplace_back(new_command);
return draw_commands.back();
}
compiled_resource::command& compiled_resource::prepend(const command& new_command)
{
draw_commands.emplace(draw_commands.begin(), new_command);
return draw_commands.front();
}
void overlay_element::set_sinus_offset(f32 sinus_modifier)
{
if (sinus_modifier >= 0)
{
static constexpr f32 PI = 3.14159265f;
const f32 pulse_sinus_x = static_cast<f32>(get_system_time() / 1000) * pulse_speed_modifier;
pulse_sinus_offset = fmod(pulse_sinus_x + sinus_modifier * PI, 2.0f * PI);
}
}
void overlay_element::refresh()
{
// Just invalidate for draw when get_compiled() is called
is_compiled = false;
}
void overlay_element::translate(s16 _x, s16 _y)
{
x += _x;
y += _y;
is_compiled = false;
}
void overlay_element::scale(f32 _x, f32 _y, bool origin_scaling)
{
if (origin_scaling)
{
x = static_cast<s16>(_x * x);
y = static_cast<s16>(_y * y);
}
w = static_cast<u16>(_x * w);
h = static_cast<u16>(_y * h);
is_compiled = false;
}
void overlay_element::set_pos(s16 _x, s16 _y)
{
x = _x;
y = _y;
is_compiled = false;
}
void overlay_element::set_size(u16 _w, u16 _h)
{
w = _w;
h = _h;
is_compiled = false;
}
void overlay_element::set_padding(u16 left, u16 right, u16 top, u16 bottom)
{
padding_left = left;
padding_right = right;
padding_top = top;
padding_bottom = bottom;
is_compiled = false;
}
void overlay_element::set_padding(u16 padding)
{
padding_left = padding_right = padding_top = padding_bottom = padding;
is_compiled = false;
}
// NOTE: Functions as a simple position offset. Top left corner is the anchor.
void overlay_element::set_margin(u16 left, u16 top)
{
margin_left = left;
margin_top = top;
is_compiled = false;
}
void overlay_element::set_margin(u16 margin)
{
margin_left = margin_top = margin;
is_compiled = false;
}
void overlay_element::set_text(const std::string& text)
{
this->text = utf8_to_u32string(text);
is_compiled = false;
}
void overlay_element::set_unicode_text(const std::u32string& text)
{
this->text = text;
is_compiled = false;
}
void overlay_element::set_text(localized_string_id id)
{
set_unicode_text(get_localized_u32string(id));
}
void overlay_element::set_font(const char* font_name, u16 font_size)
{
font_ref = fontmgr::get(font_name, font_size);
is_compiled = false;
}
void overlay_element::align_text(text_align align)
{
alignment = align;
is_compiled = false;
}
void overlay_element::set_wrap_text(bool state)
{
wrap_text = state;
is_compiled = false;
}
font* overlay_element::get_font() const
{
return font_ref ? font_ref : fontmgr::get("Arial", 12);
}
std::vector<vertex> overlay_element::render_text(const char32_t* string, f32 x, f32 y)
{
auto renderer = get_font();
const u16 clip_width = clip_text ? w : umax;
std::vector<vertex> result = renderer->render_text(string, clip_width, wrap_text);
if (!result.empty())
{
const auto apply_transform = [&]()
{
const f32 size_px = renderer->get_size_px();
for (vertex& v : result)
{
// Apply transform.
// (0, 0) has text sitting one line off the top left corner (text is outside the rect) hence the offset by text height
v.x() += x + padding_left;
v.y() += y + padding_top + size_px;
}
};
if (alignment == text_align::left)
{
apply_transform();
}
else
{
// Scan for lines and measure them
// Reposition them to the center or right depending on the alignment
std::vector<std::tuple<u32, u32, f32>> lines;
u32 line_begin = 0;
u32 line_end = 0;
u32 word_end = 0;
u32 ctr = 0;
f32 text_extents_w = w;
for (const auto& c : text)
{
switch (c)
{
case '\r':
{
continue;
}
case '\n':
{
lines.emplace_back(line_begin, std::min(word_end, line_end), text_extents_w);
word_end = line_end = line_begin = ctr;
text_extents_w = w;
continue;
}
default:
{
ctr += 4;
if (c == ' ')
{
if (line_end == line_begin)
{
// Ignore leading whitespace
word_end = line_end = line_begin = ctr;
}
else
{
line_end = ctr;
}
}
else
{
word_end = line_end = ctr;
// Check for real text region extent
text_extents_w = std::max(result[ctr - 1].x(), text_extents_w);
}
continue;
}
}
}
// Add final line
lines.emplace_back(line_begin, std::min(word_end, line_end), std::max<f32>(text_extents_w, w));
const f32 offset_extent = (alignment == text_align::center ? 0.5f : 1.0f);
const f32 size_px = renderer->get_size_px() * 0.5f;
// Apply padding
apply_transform();
// Moves all glyphs of a line by the correct amount to get a nice alignment.
const auto move_line = [&result, &offset_extent](u32 begin, u32 end, f32 max_region_w)
{
const f32 line_length = result[end - 1].x() - result[begin].x();
if (line_length < max_region_w)
{
const f32 offset = (max_region_w - line_length) * offset_extent;
for (auto n = begin; n < end; ++n)
{
result[n].x() += offset;
}
}
};
// Properly place all lines individually
for (const auto& [begin, end, max_region_w] : lines)
{
if (begin >= end)
continue;
// Check if there's any wrapped text
if (std::fabs(result[end - 1].y() - result[begin + 3].y()) < size_px)
{
// No wrapping involved. We can just move the entire line.
move_line(begin, end, max_region_w);
continue;
}
// Wrapping involved. We have to search for the line breaks and move each line seperately.
for (u32 i_begin = begin, i_next = begin + 4;; i_next += 4)
{
// Check if this is the last glyph in the line of text.
const bool is_last_glyph = i_next >= end;
// The line may be wrapped, so we need to check if the next glyph's position is below the current position.
if (is_last_glyph || (result[i_next - 1].y() - result[i_begin + 3].y() >= size_px))
{
// Whenever we reached the end of a visual line we need to move its glyphs accordingly.
const u32 i_end = i_next - (is_last_glyph ? 0 : 4);
move_line(i_begin, i_end, max_region_w);
i_begin = i_end;
if (is_last_glyph)
{
break;
}
}
}
}
}
}
return result;
}
compiled_resource& overlay_element::get_compiled()
{
if (!is_compiled)
{
compiled_resources.clear();
compiled_resource compiled_resources_temp = {};
auto& cmd_bg = compiled_resources_temp.append({});
auto& config = cmd_bg.config;
config.color = back_color;
config.pulse_glow = pulse_effect_enabled;
config.pulse_sinus_offset = pulse_sinus_offset;
config.pulse_speed_modifier = pulse_speed_modifier;
auto& verts = compiled_resources_temp.draw_commands.front().verts;
verts.resize(4);
verts[0].vec4(x, y, 0.f, 0.f);
verts[1].vec4(f32(x + w), y, 1.f, 0.f);
verts[2].vec4(x, f32(y + h), 0.f, 1.f);
verts[3].vec4(f32(x + w), f32(y + h), 1.f, 1.f);
compiled_resources.add(std::move(compiled_resources_temp), margin_left, margin_top);
if (!text.empty())
{
compiled_resources_temp.clear();
auto& cmd_text = compiled_resources_temp.append({});
cmd_text.config.set_font(font_ref ? font_ref : fontmgr::get("Arial", 12));
cmd_text.config.color = fore_color;
cmd_text.verts = render_text(text.c_str(), static_cast<f32>(x), static_cast<f32>(y));
if (!cmd_text.verts.empty())
compiled_resources.add(std::move(compiled_resources_temp), margin_left, margin_top);
}
is_compiled = true;
}
return compiled_resources;
}
void overlay_element::measure_text(u16& width, u16& height, bool ignore_word_wrap) const
{
if (text.empty())
{
width = height = 0;
return;
}
auto renderer = get_font();
f32 text_width = 0.f;
f32 unused = 0.f;
f32 max_w = 0.f;
f32 last_word = 0.f;
height = static_cast<u16>(renderer->get_size_px());
for (auto c : text)
{
if (c == '\n')
{
height += static_cast<u16>(renderer->get_size_px() + 2);
max_w = std::max(max_w, text_width);
text_width = 0.f;
last_word = 0.f;
continue;
}
if (c == ' ')
{
last_word = text_width;
}
renderer->get_char(c, text_width, unused);
if (!ignore_word_wrap && wrap_text && text_width >= w)
{
if ((text_width - last_word) < w)
{
max_w = std::max(max_w, last_word);
text_width -= (last_word + renderer->get_em_size());
height += static_cast<u16>(renderer->get_size_px() + 2);
}
}
}
max_w = std::max(max_w, text_width);
width = static_cast<u16>(ceilf(max_w));
}
layout_container::layout_container()
{
// Transparent by default
back_color.a = 0.f;
}
void layout_container::translate(s16 _x, s16 _y)
{
overlay_element::translate(_x, _y);
for (auto &itm : m_items)
itm->translate(_x, _y);
}
void layout_container::set_pos(s16 _x, s16 _y)
{
const s16 dx = _x - x;
const s16 dy = _y - y;
translate(dx, dy);
}
compiled_resource& layout_container::get_compiled()
{
if (!is_compiled)
{
compiled_resource result = overlay_element::get_compiled();
for (auto &itm : m_items)
result.add(itm->get_compiled());
compiled_resources = result;
}
return compiled_resources;
}
void layout_container::add_spacer()
{
std::unique_ptr<overlay_element> spacer_element = std::make_unique<spacer>();
add_element(spacer_element);
}
overlay_element* vertical_layout::add_element(std::unique_ptr<overlay_element>& item, int offset)
{
if (auto_resize)
{
item->set_pos(item->x + x, h + pack_padding + y);
h += item->h + pack_padding;
w = std::max(w, item->w);
}
else
{
item->set_pos(item->x + x, advance_pos + pack_padding + y);
advance_pos += item->h + pack_padding;
}
if (offset < 0)
{
m_items.push_back(std::move(item));
return m_items.back().get();
}
auto result = item.get();
m_items.insert(m_items.begin() + offset, std::move(item));
return result;
}
compiled_resource& vertical_layout::get_compiled()
{
if (scroll_offset_value == 0 && auto_resize)
return layout_container::get_compiled();
if (!is_compiled)
{
compiled_resource result = overlay_element::get_compiled();
const f32 global_y_offset = static_cast<f32>(-scroll_offset_value);
for (auto &item : m_items)
{
if (!item)
{
rsx_log.error("Found null item in overlay_controls");
continue;
}
const s32 item_y_limit = s32{item->y} + item->h - scroll_offset_value - y;
const s32 item_y_base = s32{item->y} - scroll_offset_value - y;
if (item_y_limit < 0 || item_y_base > h)
{
// Out of bounds
continue;
}
if (item_y_limit > h || item_y_base < 0)
{
// Partial render
areaf clip_rect = static_cast<areaf>(areai{x, y, (x + w), (y + h)});
result.add(item->get_compiled(), 0.f, global_y_offset, clip_rect);
}
else
{
// Normal
result.add(item->get_compiled(), 0.f, global_y_offset);
}
}
compiled_resources = result;
}
return compiled_resources;
}
u16 vertical_layout::get_scroll_offset_px()
{
return scroll_offset_value;
}
overlay_element* horizontal_layout::add_element(std::unique_ptr<overlay_element>& item, int offset)
{
if (auto_resize)
{
item->set_pos(w + pack_padding + x, item->y + y);
w += item->w + pack_padding;
h = std::max(h, item->h);
}
else
{
item->set_pos(advance_pos + pack_padding + x, item->y + y);
advance_pos += item->w + pack_padding;
}
if (offset < 0)
{
m_items.push_back(std::move(item));
return m_items.back().get();
}
auto result = item.get();
m_items.insert(m_items.begin() + offset, std::move(item));
return result;
}
compiled_resource& horizontal_layout::get_compiled()
{
if (scroll_offset_value == 0 && auto_resize)
return layout_container::get_compiled();
if (!is_compiled)
{
compiled_resource result = overlay_element::get_compiled();
const f32 global_x_offset = static_cast<f32>(-scroll_offset_value);
for (auto &item : m_items)
{
const s32 item_x_limit = s32{item->x} + item->w - scroll_offset_value - w;
const s32 item_x_base = s32{item->x} - scroll_offset_value - w;
if (item_x_limit < 0 || item_x_base > h)
{
// Out of bounds
continue;
}
else if (item_x_limit > h || item_x_base < 0)
{
// Partial render
areaf clip_rect = static_cast<areaf>(areai{x, y, (x + w), (y + h)});
result.add(item->get_compiled(), global_x_offset, 0.f, clip_rect);
}
else
{
// Normal
result.add(item->get_compiled(), global_x_offset, 0.f);
}
}
compiled_resources = result;
}
return compiled_resources;
}
u16 horizontal_layout::get_scroll_offset_px()
{
return scroll_offset_value;
}
compiled_resource& image_view::get_compiled()
{
if (!is_compiled)
{
auto& result = overlay_element::get_compiled();
auto& cmd_img = result.draw_commands.front();
cmd_img.config.set_image_resource(image_resource_ref);
cmd_img.config.color = fore_color;
cmd_img.config.external_data_ref = external_ref;
cmd_img.config.blur_strength = blur_strength;
// Make padding work for images (treat them as the content instead of the 'background')
auto& verts = cmd_img.verts;
verts[0] += vertex(padding_left, padding_bottom, 0, 0);
verts[1] += vertex(-padding_right, padding_bottom, 0, 0);
verts[2] += vertex(padding_left, -padding_top, 0, 0);
verts[3] += vertex(-padding_right, -padding_top, 0, 0);
is_compiled = true;
}
return compiled_resources;
}
void image_view::set_image_resource(u8 resource_id)
{
image_resource_ref = resource_id;
external_ref = nullptr;
}
void image_view::set_raw_image(image_info* raw_image)
{
image_resource_ref = image_resource_id::raw_image;
external_ref = raw_image;
}
void image_view::clear_image()
{
image_resource_ref = image_resource_id::none;
external_ref = nullptr;
}
void image_view::set_blur_strength(u8 strength)
{
blur_strength = strength;
}
image_button::image_button()
{
// Do not clip text to region extents
// TODO: Define custom clipping region or use two controls to emulate
clip_text = false;
}
image_button::image_button(u16 _w, u16 _h)
{
clip_text = false;
set_size(_w, _h);
}
void image_button::set_text_vertical_adjust(s16 offset)
{
m_text_offset_y = offset;
}
void image_button::set_size(u16 /*w*/, u16 h)
{
image_view::set_size(h, h);
m_text_offset_x = (h / 2) + text_horizontal_offset; // By default text is at the horizontal center
}
compiled_resource& image_button::get_compiled()
{
if (!is_compiled)
{
auto& compiled = image_view::get_compiled();
for (auto& cmd : compiled.draw_commands)
{
if (cmd.config.texture_ref == image_resource_id::font_file)
{
// Text, translate geometry to the right
for (auto &v : cmd.verts)
{
v.values[0] += m_text_offset_x;
v.values[1] += m_text_offset_y;
}
}
}
}
return compiled_resources;
}
label::label(const std::string& text)
{
set_text(text);
}
bool label::auto_resize(bool grow_only, u16 limit_w, u16 limit_h)
{
u16 new_width, new_height;
u16 old_width = w, old_height = h;
measure_text(new_width, new_height, true);
new_width += padding_left + padding_right;
new_height += padding_top + padding_bottom;
if (new_width > limit_w && wrap_text)
measure_text(new_width, new_height, false);
if (grow_only)
{
new_width = std::max(w, new_width);
new_height = std::max(h, new_height);
}
w = std::min(new_width, limit_w);
h = std::min(new_height, limit_h);
bool size_changed = old_width != new_width || old_height != new_height;
return size_changed;
}
compiled_resource& rounded_rect::get_compiled()
{
if (!is_compiled)
{
compiled_resources.clear();
#ifdef __APPLE__
if (true)
#else
if (radius == 0 || radius > (w / 2))
#endif
{
// Invalid radius
compiled_resources = overlay_element::get_compiled();
}
else
{
compiled_resource compiled_resources_temp = {};
compiled_resources_temp.append({}); // Bg horizontal mid
compiled_resources_temp.append({}); // Bg horizontal top
compiled_resources_temp.append({}); // Bg horizontal bottom
compiled_resources_temp.append({}); // Bg upper-left
compiled_resources_temp.append({}); // Bg lower-left
compiled_resources_temp.append({}); // Bg upper-right
compiled_resources_temp.append({}); // Bg lower-right
for (auto& draw_cmd : compiled_resources_temp.draw_commands)
{
auto& config = draw_cmd.config;
config.color = back_color;
config.disable_vertex_snap = true;
config.pulse_glow = pulse_effect_enabled;
config.pulse_sinus_offset = pulse_sinus_offset;
config.pulse_speed_modifier = pulse_speed_modifier;
}
auto& bg0 = compiled_resources_temp.draw_commands[0];
auto& bg1 = compiled_resources_temp.draw_commands[1];
auto& bg2 = compiled_resources_temp.draw_commands[2];
bg0.verts.emplace_back(f32(x), f32(y + radius), 0.f, 0.f);
bg0.verts.emplace_back(f32(x + w), f32(y + radius), 0.f, 0.f);
bg0.verts.emplace_back(f32(x), f32(y + h) - radius, 0.f, 0.f);
bg0.verts.emplace_back(f32(x + w), f32(y + h) - radius, 0.f, 0.f);
bg1.verts.emplace_back(f32(x + radius), f32(y), 0.f, 0.f);
bg1.verts.emplace_back(f32(x + w) - radius, f32(y), 0.f, 0.f);
bg1.verts.emplace_back(f32(x + radius), f32(y + radius), 0.f, 0.f);
bg1.verts.emplace_back(f32(x + w) - radius, f32(y + radius), 0.f, 0.f);
bg2.verts.emplace_back(f32(x + radius), f32(y + h) - radius, 0.f, 0.f);
bg2.verts.emplace_back(f32(x + w) - radius, f32(y + h) - radius, 0.f, 0.f);
bg2.verts.emplace_back(f32(x + radius), f32(y + h), 0.f, 0.f);
bg2.verts.emplace_back(f32(x + w) - radius, f32(y + h), 0.f, 0.f);
// Generate the quadrants
const f32 corners[4][2] =
{
{ f32(x + radius), f32(y + radius) },
{ f32(x + radius), f32(y + h) - radius },
{ f32(x + w) - radius, f32(y + radius) },
{ f32(x + w) - radius, f32(y + h) - radius }
};
const f32 radius_f = static_cast<f32>(radius);
const f32 scale[4][2] =
{
{ -radius_f, -radius_f },
{ -radius_f, +radius_f },
{ +radius_f, -radius_f },
{ +radius_f, +radius_f }
};
for (int i = 0; i < 4; ++i)
{
auto& command = compiled_resources_temp.draw_commands[i + 3];
command.config.primitives = rsx::overlays::primitive_type::triangle_fan;
command.verts = generate_unit_quadrant(num_control_points, corners[i], scale[i]);
}
compiled_resources.add(std::move(compiled_resources_temp), margin_left, margin_top);
}
is_compiled = true;
}
return compiled_resources;
}
}
}
| 26,952
|
C++
|
.cpp
| 872
| 25.969037
| 124
| 0.619501
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,410
|
overlay_utils.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_utils.cpp
|
#include "stdafx.h"
#include "overlay_utils.h"
#include <unordered_map>
LOG_CHANNEL(overlays);
static auto s_ascii_lowering_map = []()
{
std::unordered_map<u32, u8> _map;
// Fullwidth block (FF00-FF5E)
for (u32 u = 0xFF01, c = 0x21; u <= 0xFF5E; ++u, ++c)
{
_map[u] = u8(c);
}
// Em and En space variations (General Punctuation)
for (u32 u = 0x2000; u <= 0x200A; ++u)
{
_map[u] = u8(' ');
}
// Misc space variations
_map[0x202F] = u8(0xA0); // narrow NBSP
_map[0x205F] = u8(' '); // medium mathematical space
_map[0x3164] = u8(' '); // hangul filler
// Ideographic (CJK punctuation)
_map[0x3000] = u8(' '); // space
_map[0x3001] = u8(','); // comma
_map[0x3002] = u8('.'); // fullstop
_map[0x3003] = u8('"'); // ditto
_map[0x3007] = u8('0'); // wide zero
_map[0x3008] = u8('<'); // left angle brace
_map[0x3009] = u8('>'); // right angle brace
_map[0x300A] = u8(0xAB); // double left angle brace
_map[0x300B] = u8(0xBB); // double right angle brace
_map[0x300C] = u8('['); // the following are all slight variations on the angular brace
_map[0x300D] = u8(']');
_map[0x300E] = u8('[');
_map[0x300F] = u8(']');
_map[0x3010] = u8('[');
_map[0x3011] = u8(']');
_map[0x3014] = u8('[');
_map[0x3015] = u8(']');
_map[0x3016] = u8('[');
_map[0x3017] = u8(']');
_map[0x3018] = u8('[');
_map[0x3019] = u8(']');
_map[0x301A] = u8('[');
_map[0x301B] = u8(']');
_map[0x301C] = u8('~'); // wave dash (inverted tilde)
_map[0x301D] = u8('"'); // reverse double prime quotation
_map[0x301E] = u8('"'); // double prime quotation
_map[0x301F] = u8('"'); // low double prime quotation
_map[0x3031] = u8('<'); // vertical kana repeat mark
return _map;
}();
template<typename F>
void process_multibyte(const std::string& s, F&& func)
{
const usz end = s.length();
for (usz index = 0; index < end; ++index)
{
const u8 code = static_cast<u8>(s[index]);
if (!code)
{
break;
}
if (code <= 0x7F)
{
std::invoke(func, code);
continue;
}
const u32 extra_bytes = (code <= 0xDF) ? 1u : (code <= 0xEF) ? 2u : 3u;
if ((index + extra_bytes) > end)
{
// Malformed string, abort
overlays.error("Failed to decode supossedly malformed utf8 string '%s'", s);
break;
}
u32 u_code = 0;
switch (extra_bytes)
{
case 1:
// 11 bits, 6 + 5
u_code = (u32(code & 0x1F) << 6) | u32(s[index + 1] & 0x3F);
break;
case 2:
// 16 bits, 6 + 6 + 4
u_code = (u32(code & 0xF) << 12) | (u32(s[index + 1] & 0x3F) << 6) | u32(s[index + 2] & 0x3F);
break;
case 3:
// 21 bits, 6 + 6 + 6 + 3
u_code = (u32(code & 0x7) << 18) | (u32(s[index + 1] & 0x3F) << 12) | (u32(s[index + 2] & 0x3F) << 6) | u32(s[index + 3] & 0x3F);
break;
default:
fmt::throw_exception("Unreachable");
}
index += extra_bytes;
std::invoke(func, u_code);
}
}
std::string utf8_to_ascii8(const std::string& utf8_string)
{
std::string out;
out.reserve(utf8_string.length());
process_multibyte(utf8_string, [&out](u32 code)
{
if (code <= 0x7F)
{
out.push_back(static_cast<u8>(code));
}
else if (auto replace = s_ascii_lowering_map.find(code);
replace == s_ascii_lowering_map.end())
{
out.push_back('#');
}
else
{
out.push_back(replace->second);
}
});
return out;
}
std::string utf16_to_ascii8(const std::u16string& utf16_string)
{
// Strip extended codes, map to '#' instead (placeholder)
std::string out;
out.reserve(utf16_string.length());
for (const auto& code : utf16_string)
{
if (!code)
break;
out.push_back(code > 0xFF ? '#' : static_cast<char>(code));
}
return out;
}
std::u16string ascii8_to_utf16(const std::string& ascii_string)
{
std::u16string out;
out.reserve(ascii_string.length());
for (const auto& code : ascii_string)
{
if (!code)
break;
out.push_back(static_cast<char16_t>(code));
}
return out;
}
std::u32string utf8_to_u32string(const std::string& utf8_string)
{
std::u32string result;
result.reserve(utf8_string.size());
process_multibyte(utf8_string, [&result](u32 code)
{
result.push_back(static_cast<char32_t>(code));
});
return result;
}
std::u16string u32string_to_utf16(const std::u32string& utf32_string)
{
std::u16string result;
result.reserve(utf32_string.size());
for (const auto& code : utf32_string)
{
result.push_back(static_cast<char16_t>(code));
}
return result;
}
std::u32string utf16_to_u32string(const std::u16string& utf16_string)
{
std::u32string result;
result.reserve(utf16_string.size());
for (const auto& code : utf16_string)
{
result.push_back(code);
}
return result;
}
| 4,595
|
C++
|
.cpp
| 174
| 23.902299
| 132
| 0.625256
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,411
|
overlay_debug_overlay.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_debug_overlay.cpp
|
#include "stdafx.h"
#include "overlay_manager.h"
#include "overlay_debug_overlay.h"
#include "Emu/system_config.h"
namespace rsx
{
namespace overlays
{
debug_overlay::debug_overlay()
{
text_display.set_size(1260, 40);
text_display.set_pos(10, 10);
text_display.set_font("n023055ms.ttf", 10);
text_display.align_text(overlay_element::text_align::left);
text_display.set_wrap_text(true);
text_display.fore_color = { 0.3f, 1.f, 0.3f, 1.f };
text_display.back_color.a = 0.f;
}
compiled_resource debug_overlay::get_compiled()
{
if (!visible)
{
return {};
}
if (const auto [dirty, text] = text_guard.get_text(); dirty)
{
text_display.set_text(text);
}
compiled_resource result;
result.add(text_display.get_compiled());
return result;
}
void debug_overlay::set_text(std::string&& text)
{
text_guard.set_text(std::move(text));
visible = true;
}
extern void reset_debug_overlay()
{
if (!g_cfg.misc.use_native_interface)
return;
if (auto manager = g_fxo->try_get<rsx::overlays::display_manager>())
{
auto overlay = manager->get<rsx::overlays::debug_overlay>();
if (g_cfg.video.overlay || g_cfg.io.debug_overlay)
{
if (!overlay)
{
overlay = manager->create<rsx::overlays::debug_overlay>();
}
}
else if (overlay)
{
manager->remove<rsx::overlays::debug_overlay>();
}
}
}
extern void set_debug_overlay_text(std::string&& text)
{
if (!g_cfg.misc.use_native_interface || (!g_cfg.video.overlay && !g_cfg.io.debug_overlay))
return;
if (auto manager = g_fxo->try_get<rsx::overlays::display_manager>())
{
if (auto overlay = manager->get<rsx::overlays::debug_overlay>())
{
overlay->set_text(std::move(text));
}
}
}
} // namespace overlays
} // namespace rsx
| 1,845
|
C++
|
.cpp
| 71
| 22.056338
| 93
| 0.64966
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,412
|
overlay_save_dialog.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_save_dialog.cpp
|
#include "stdafx.h"
#include "overlay_save_dialog.h"
#include "Utilities/date_time.h"
#include "Emu/System.h"
namespace rsx
{
namespace overlays
{
save_dialog::save_dialog_entry::save_dialog_entry(const std::string& text1, const std::string& text2, const std::string& text3, u8 resource_id, const std::vector<u8>& icon_buf)
{
std::unique_ptr<overlay_element> image = std::make_unique<image_view>();
image->set_size(160, 110);
image->set_padding(36, 36, 11, 11); // Square image, 88x88
if (resource_id != image_resource_id::raw_image)
{
static_cast<image_view*>(image.get())->set_image_resource(resource_id);
}
else if (!icon_buf.empty())
{
image->set_padding(0, 0, 11, 11); // Half sized icon, 320x176->160x88
icon_data = std::make_unique<image_info>(icon_buf);
static_cast<image_view*>(image.get())->set_raw_image(icon_data.get());
}
else
{
// Fallback
static_cast<image_view*>(image.get())->set_image_resource(resource_config::standard_image_resource::save);
}
std::unique_ptr<overlay_element> text_stack = std::make_unique<vertical_layout>();
std::unique_ptr<overlay_element> padding = std::make_unique<spacer>();
std::unique_ptr<overlay_element> header_text = std::make_unique<label>(text1);
std::unique_ptr<overlay_element> subtext = std::make_unique<label>(text2);
padding->set_size(1, 1);
header_text->set_size(800, 40);
header_text->set_font("Arial", 16);
header_text->set_wrap_text(true);
subtext->set_size(800, 0);
subtext->set_font("Arial", 14);
subtext->set_wrap_text(true);
static_cast<label*>(subtext.get())->auto_resize(true);
// Make back color transparent for text
header_text->back_color.a = 0.f;
subtext->back_color.a = 0.f;
static_cast<vertical_layout*>(text_stack.get())->pack_padding = 5;
static_cast<vertical_layout*>(text_stack.get())->add_element(padding);
static_cast<vertical_layout*>(text_stack.get())->add_element(header_text);
static_cast<vertical_layout*>(text_stack.get())->add_element(subtext);
if (!text3.empty())
{
// Detail info actually exists
std::unique_ptr<overlay_element> detail = std::make_unique<label>(text3);
detail->set_size(800, 0);
detail->set_font("Arial", 12);
detail->set_wrap_text(true);
detail->back_color.a = 0.f;
static_cast<label*>(detail.get())->auto_resize(true);
static_cast<vertical_layout*>(text_stack.get())->add_element(detail);
}
if (text_stack->h > image->h)
{
std::unique_ptr<overlay_element> padding2 = std::make_unique<spacer>();
padding2->set_size(1, 5);
static_cast<vertical_layout*>(text_stack.get())->add_element(padding2);
}
// Pack
this->pack_padding = 15;
add_element(image);
add_element(text_stack);
}
save_dialog::save_dialog()
{
m_dim_background = std::make_unique<overlay_element>();
m_dim_background->set_size(virtual_width, virtual_height);
m_list = std::make_unique<list_view>(virtual_width - 2 * 20, 540);
m_description = std::make_unique<label>();
m_time_thingy = std::make_unique<label>();
m_list->set_pos(20, 85);
m_description->set_font("Arial", 20);
m_description->set_pos(20, 37);
m_description->set_text(localized_string_id::RSX_OVERLAYS_SAVE_DIALOG_TITLE);
m_time_thingy->set_font("Arial", 14);
m_time_thingy->set_pos(1000, 30);
m_time_thingy->set_text(date_time::current_time());
m_description->auto_resize();
m_time_thingy->auto_resize();
m_dim_background->back_color.a = 0.5f;
m_description->back_color.a = 0.f;
m_time_thingy->back_color.a = 0.f;
fade_animation.duration_sec = 0.15f;
return_code = selection_code::canceled;
}
void save_dialog::update(u64 timestamp_us)
{
m_time_thingy->set_text(date_time::current_time());
m_time_thingy->auto_resize();
if (fade_animation.active)
{
fade_animation.update(timestamp_us);
}
}
void save_dialog::on_button_pressed(pad_button button_press, bool is_auto_repeat)
{
if (fade_animation.active) return;
bool close_dialog = false;
switch (button_press)
{
case pad_button::cross:
if (m_no_saves)
break;
return_code = m_list->get_selected_index();
Emu.GetCallbacks().play_sound(fs::get_config_dir() + "sounds/snd_decide.wav");
close_dialog = true;
break;
case pad_button::circle:
Emu.GetCallbacks().play_sound(fs::get_config_dir() + "sounds/snd_cancel.wav");
close_dialog = true;
break;
case pad_button::dpad_up:
case pad_button::ls_up:
m_list->select_previous();
break;
case pad_button::dpad_down:
case pad_button::ls_down:
m_list->select_next();
break;
case pad_button::L1:
m_list->select_previous(10);
break;
case pad_button::R1:
m_list->select_next(10);
break;
default:
rsx_log.trace("[ui] Button %d pressed", static_cast<u8>(button_press));
break;
}
if (close_dialog)
{
fade_animation.current = color4f(1.f);
fade_animation.end = color4f(0.f);
fade_animation.active = true;
fade_animation.on_finish = [this]
{
close(true, true);
};
}
// Play a sound unless this is a fast auto repeat which would induce a nasty noise
else if (!is_auto_repeat || m_auto_repeat_ms_interval >= m_auto_repeat_ms_interval_default)
{
Emu.GetCallbacks().play_sound(fs::get_config_dir() + "sounds/snd_cursor.wav");
}
}
compiled_resource save_dialog::get_compiled()
{
if (!visible)
{
return {};
}
compiled_resource result;
result.add(m_dim_background->get_compiled());
result.add(m_list->get_compiled());
result.add(m_description->get_compiled());
result.add(m_time_thingy->get_compiled());
if (m_no_saves)
result.add(m_no_saves_text->get_compiled());
fade_animation.apply(result);
return result;
}
s32 save_dialog::show(std::vector<SaveDataEntry>& save_entries, u32 focused, u32 op, vm::ptr<CellSaveDataListSet> listSet, bool enable_overlay)
{
rsx_log.notice("Showing native UI save_dialog (save_entries=%d, focused=%d, op=0x%x, listSet=*0x%x, enable_overlay=%d)", save_entries.size(), focused, op, listSet, enable_overlay);
visible = false;
if (enable_overlay)
{
m_dim_background->back_color.a = 0.9f;
}
else
{
m_dim_background->back_color.a = 0.5f;
}
std::vector<std::unique_ptr<overlay_element>> entries;
for (auto& entry : save_entries)
{
const std::string date_and_size = fmt::format("%s %s", entry.date(), entry.data_size());
std::unique_ptr<overlay_element> e;
e = std::make_unique<save_dialog_entry>(entry.subtitle, date_and_size, entry.details, image_resource_id::raw_image, entry.iconBuf);
entries.emplace_back(std::move(e));
}
if (op >= 8)
{
m_description->set_text(localized_string_id::RSX_OVERLAYS_SAVE_DIALOG_DELETE);
}
else if (op & 1)
{
m_description->set_text(localized_string_id::RSX_OVERLAYS_SAVE_DIALOG_LOAD);
}
else
{
m_description->set_text(localized_string_id::RSX_OVERLAYS_SAVE_DIALOG_SAVE);
}
const bool newpos_head = listSet && listSet->newData && listSet->newData->iconPosition == CELL_SAVEDATA_ICONPOS_HEAD;
if (!newpos_head)
{
for (auto& entry : entries)
{
m_list->add_entry(entry);
}
}
if (listSet && listSet->newData)
{
std::string title = get_localized_string(localized_string_id::CELL_SAVEDATA_NEW_SAVED_DATA_TITLE);
std::vector<u8> icon;
int id = resource_config::standard_image_resource::new_entry;
if (const auto picon = +listSet->newData->icon)
{
if (picon->title)
title = picon->title.get_ptr();
if (picon->iconBuf && picon->iconBufSize && picon->iconBufSize <= 225280)
{
const auto iconBuf = static_cast<u8*>(picon->iconBuf.get_ptr());
const auto iconEnd = iconBuf + picon->iconBufSize;
icon.assign(iconBuf, iconEnd);
}
}
if (!icon.empty())
{
id = image_resource_id::raw_image;
}
std::unique_ptr<overlay_element> new_stub = std::make_unique<save_dialog_entry>(title, get_localized_string(localized_string_id::CELL_SAVEDATA_NEW_SAVED_DATA_SUB_TITLE), "", id, icon);
m_list->add_entry(new_stub);
}
if (newpos_head)
{
for (auto& entry : entries)
{
m_list->add_entry(entry);
}
}
if (m_list->m_items.empty())
{
m_no_saves_text = std::make_unique<label>(get_localized_string(localized_string_id::CELL_SAVEDATA_NO_DATA));
m_no_saves_text->set_font("Arial", 20);
m_no_saves_text->align_text(overlay_element::text_align::center);
m_no_saves_text->set_pos(m_list->x, m_list->y + m_list->h / 2);
m_no_saves_text->set_size(m_list->w, 30);
m_no_saves_text->back_color.a = 0;
m_no_saves = true;
m_list->set_cancel_only(true);
}
else
{
// Only select an entry if there are entries available
m_list->select_entry(focused);
}
m_description->auto_resize();
fade_animation.current = color4f(0.f);
fade_animation.end = color4f(1.f);
fade_animation.active = true;
visible = true;
if (const auto error = run_input_loop())
{
if (error != selection_code::canceled)
{
rsx_log.error("Save dialog input loop exited with error code=%d", error);
}
return error;
}
if (return_code >= 0)
{
if (newpos_head)
{
return return_code - 1;
}
if (static_cast<usz>(return_code) == entries.size())
{
return selection_code::new_save;
}
}
return return_code;
}
} // namespace overlays
} // namespace RSX
| 9,619
|
C++
|
.cpp
| 281
| 29.743772
| 188
| 0.659162
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,413
|
overlay_edit_text.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_edit_text.cpp
|
#include "stdafx.h"
#include "overlay_edit_text.hpp"
namespace rsx
{
namespace overlays
{
static usz get_line_start(const std::u32string& text, usz pos)
{
if (pos == 0)
{
return 0;
}
const usz line_start = text.rfind('\n', pos - 1);
if (line_start == std::string::npos)
{
return 0;
}
return line_start + 1;
}
static usz get_line_end(const std::u32string& text, usz pos)
{
const usz line_end = text.find('\n', pos);
if (line_end == std::string::npos)
{
return text.length();
}
return line_end;
}
void edit_text::move_caret(direction dir)
{
m_reset_caret_pulse = true;
switch (dir)
{
case direction::left:
{
if (caret_position)
{
caret_position--;
refresh();
}
break;
}
case direction::right:
{
if (caret_position < value.length())
{
caret_position++;
refresh();
}
break;
}
case direction::up:
{
const usz current_line_start = get_line_start(value, caret_position);
if (current_line_start == 0)
{
// This is the first line, so caret moves to the very beginning
caret_position = 0;
refresh();
break;
}
const usz caret_pos_in_line = caret_position - current_line_start;
const usz prev_line_end = current_line_start - 1;
const usz prev_line_start = get_line_start(value, prev_line_end);
// TODO : Save caret position to some kind of buffer, so after switching back and forward, caret would be on initial position
caret_position = std::min(prev_line_end, prev_line_start + caret_pos_in_line);
refresh();
break;
}
case direction::down:
{
const usz current_line_end = get_line_end(value, caret_position);
if (current_line_end == value.length())
{
// This is the last line, so caret moves to the very end
caret_position = current_line_end;
refresh();
break;
}
const usz current_line_start = get_line_start(value, caret_position);
const usz caret_pos_in_line = caret_position - current_line_start;
const usz next_line_start = current_line_end + 1;
const usz next_line_end = get_line_end(value, next_line_start);
// TODO : Save caret position to some kind of buffer, so after switching back and forward, caret would be on initial position
caret_position = std::min(next_line_end, next_line_start + caret_pos_in_line);
refresh();
break;
}
}
}
void edit_text::set_text(const std::string& text)
{
set_unicode_text(utf8_to_u32string(text));
}
void edit_text::set_unicode_text(const std::u32string& text)
{
value = text;
if (value.empty())
{
overlay_element::set_unicode_text(placeholder);
}
else if (password_mode)
{
overlay_element::set_unicode_text(std::u32string(value.size(), U"*"[0]));
}
else
{
overlay_element::set_unicode_text(value);
}
}
void edit_text::set_placeholder(const std::u32string& placeholder_text)
{
placeholder = placeholder_text;
}
void edit_text::insert_text(const std::u32string& str)
{
if (caret_position == 0)
{
// Start
value = str + text;
}
else if (caret_position == text.length())
{
// End
value += str;
}
else
{
// Middle
value.insert(caret_position, str);
}
caret_position += str.length();
m_reset_caret_pulse = true;
set_unicode_text(value);
refresh();
}
void edit_text::erase()
{
if (!caret_position)
{
return;
}
if (caret_position == 1)
{
value = value.length() > 1 ? value.substr(1) : U"";
}
else if (caret_position == text.length())
{
value = value.substr(0, caret_position - 1);
}
else
{
value = value.substr(0, caret_position - 1) + value.substr(caret_position);
}
caret_position--;
m_reset_caret_pulse = true;
set_unicode_text(value);
refresh();
}
void edit_text::del()
{
if (caret_position >= text.length())
{
return;
}
if (caret_position == 0)
{
value = value.length() > 1 ? value.substr(1) : U"";
}
else
{
value = value.substr(0, caret_position) + value.substr(caret_position + 1);
}
m_reset_caret_pulse = true;
set_unicode_text(value);
refresh();
}
compiled_resource& edit_text::get_compiled()
{
if (!is_compiled)
{
auto& compiled = label::get_compiled();
overlay_element caret;
auto renderer = get_font();
const auto caret_loc = renderer->get_char_offset(text.c_str(), caret_position, clip_text ? w : -1, wrap_text);
caret.set_pos(static_cast<u16>(caret_loc.first) + padding_left + x, static_cast<u16>(caret_loc.second) + padding_top + y);
caret.set_size(1, static_cast<u16>(renderer->get_size_px() + 2));
caret.fore_color = fore_color;
caret.back_color = fore_color;
caret.pulse_effect_enabled = true;
if (m_reset_caret_pulse)
{
// Reset the pulse slightly below 1 rising on each user interaction
caret.set_sinus_offset(1.6f);
m_reset_caret_pulse = false;
}
compiled.add(caret.get_compiled());
for (auto& cmd : compiled.draw_commands)
{
// TODO: Scrolling by using scroll offset
cmd.config.clip_region = true;
cmd.config.clip_rect = {static_cast<f32>(x), static_cast<f32>(y), static_cast<f32>(x + w), static_cast<f32>(y + h)};
}
is_compiled = true;
}
return compiled_resources;
}
} // namespace overlays
} // namespace rsx
| 5,505
|
C++
|
.cpp
| 209
| 21.966507
| 129
| 0.63074
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,414
|
overlay_manager.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_manager.cpp
|
#include "stdafx.h"
#include "overlay_manager.h"
#include "Emu/System.h"
#include <util/asm.hpp>
namespace rsx
{
namespace overlays
{
display_manager::display_manager(int) noexcept
{
m_input_thread = std::make_shared<named_thread<overlay_input_thread>>();
(*m_input_thread)([this]()
{
input_thread_loop();
});
}
display_manager::~display_manager()
{
if (m_input_thread)
{
// This keeps the input thread from looping again
m_input_thread_abort.store(true);
// Wake it if it is asleep
const input_thread_context_t wakeup_node =
{
"stop_node",
nullptr,
nullptr,
nullptr,
nullptr
};
m_input_token_stack.push(wakeup_node);
// Wait for join
*m_input_thread = thread_state::aborting;
while (*m_input_thread <= thread_state::aborting)
{
utils::pause();
}
}
}
void display_manager::lock()
{
m_list_mutex.lock();
}
void display_manager::unlock()
{
if (m_pending_removals_count > 0)
{
cleanup_internal();
}
m_list_mutex.unlock();
}
void display_manager::lock_shared()
{
m_list_mutex.lock_shared();
}
void display_manager::unlock_shared()
{
m_list_mutex.unlock_shared();
}
std::shared_ptr<overlay> display_manager::get(u32 uid)
{
reader_lock lock(m_list_mutex);
for (const auto& iface : m_iface_list)
{
if (iface->uid == uid)
return iface;
}
return {};
}
void display_manager::remove(u32 uid)
{
if (m_list_mutex.try_lock())
{
remove_uid(uid);
m_list_mutex.unlock();
return;
}
// Enqueue
m_uids_to_remove.push(uid);
m_pending_removals_count++;
}
void display_manager::dispose(const std::vector<u32>& uids)
{
std::lock_guard lock(m_list_mutex);
if (m_pending_removals_count > 0)
{
cleanup_internal();
}
m_dirty_list.erase
(
std::remove_if(m_dirty_list.begin(), m_dirty_list.end(), [&uids](std::shared_ptr<overlay>& e)
{
return std::find(uids.begin(), uids.end(), e->uid) != uids.end();
}),
m_dirty_list.end()
);
}
bool display_manager::remove_type(u32 type_id)
{
bool found = false;
for (auto It = m_iface_list.begin(); It != m_iface_list.end();)
{
if (It->get()->type_index == type_id)
{
on_overlay_removed(*It);
m_dirty_list.push_back(std::move(*It));
It = m_iface_list.erase(It);
found = true;
}
else
{
++It;
}
}
return found;
}
bool display_manager::remove_uid(u32 uid)
{
for (auto It = m_iface_list.begin(); It != m_iface_list.end(); It++)
{
const auto e = It->get();
if (e->uid == uid)
{
on_overlay_removed(*It);
m_dirty_list.push_back(std::move(*It));
m_iface_list.erase(It);
return true;
}
}
return false;
}
void display_manager::cleanup_internal()
{
for (auto&& uid : m_uids_to_remove.pop_all())
{
remove_uid(uid);
m_pending_removals_count--;
}
for (auto&& type_id : m_type_ids_to_remove.pop_all())
{
remove_type(type_id);
m_pending_removals_count--;
}
}
void display_manager::on_overlay_activated(const std::shared_ptr<overlay>& /*item*/)
{
// TODO: Internal management, callbacks, etc
}
void display_manager::attach_thread_input(
u32 uid,
const std::string_view& name,
std::function<void()> on_input_loop_enter,
std::function<void(s32)> on_input_loop_exit,
std::function<s32()> input_loop_override)
{
if (auto iface = std::dynamic_pointer_cast<user_interface>(get(uid)))
{
std::lock_guard lock(m_input_stack_guard);
// Add our interface to the queue
m_input_token_stack.push(
name,
std::move(iface),
on_input_loop_enter,
on_input_loop_exit,
input_loop_override);
// Signal input thread loop after pushing to avoid a race.
m_input_thread_interrupted = true;
}
}
void display_manager::on_overlay_removed(const std::shared_ptr<overlay>& item)
{
auto iface = std::dynamic_pointer_cast<user_interface>(item);
if (!iface)
{
// Not instance of UI, ignore
return;
}
iface->detach_input();
}
void display_manager::input_thread_loop()
{
// Avoid tail recursion by reinserting pushed-down items
std::vector<input_thread_context_t> interrupted_items;
while (!m_input_thread_abort)
{
// We're about to load the whole list, interruption makes no sense before this point
m_input_thread_interrupted = false;
for (auto&& input_context : m_input_token_stack.pop_all_reversed())
{
if (!input_context.target || input_context.target->is_detached())
{
continue;
}
if (m_input_thread_interrupted)
{
// Someone just pushed something onto the stack. Check if we already saw it.
if (m_input_token_stack)
{
// We actually have new items to read out. Skip the remaining list.
interrupted_items.push_back(input_context);
continue;
}
// False alarm, we already saw it.
m_input_thread_interrupted = false;
}
if (input_context.input_loop_prologue &&
!input_context.prologue_completed)
{
input_context.input_loop_prologue();
input_context.prologue_completed = true;
}
s32 result = 0;
if (!input_context.input_loop_override) [[ likely ]]
{
result = input_context.target->run_input_loop([this]()
{
// Stop if interrupt status is set or input stack is empty
return !m_input_thread_interrupted || !m_input_token_stack;
});
}
else
{
result = input_context.input_loop_override();
}
if (result == user_interface::selection_code::interrupted)
{
// This dialog was exited prematurely, so we must re-run it's input routine later.
ensure(m_input_thread_interrupted);
ensure(m_input_token_stack);
interrupted_items.push_back(input_context);
continue;
}
if (input_context.input_loop_epilogue)
{
input_context.input_loop_epilogue(result);
}
else if (result && result != user_interface::selection_code::canceled)
{
rsx_log.error("%s exited with error code=%d", input_context.name, result);
}
}
if (!interrupted_items.empty())
{
std::lock_guard lock(m_input_stack_guard);
// We need to rebuild the stack in reverse order here
const auto current_stack = m_input_token_stack.pop_all();
// Re-insert interrupted list
for (auto it = interrupted_items.crbegin(); it != interrupted_items.crend(); ++it)
{
m_input_token_stack.push(*it);
}
// Re-insert the 'new' list oldest-first
for (const auto& iface : current_stack)
{
m_input_token_stack.push(iface);
}
// Clear
interrupted_items.clear();
}
else if (!m_input_thread_abort)
{
thread_ctrl::wait_on(m_input_token_stack);
}
}
}
}
}
| 7,274
|
C++
|
.cpp
| 268
| 21.100746
| 98
| 0.605233
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,415
|
overlay_osk_panel.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_osk_panel.cpp
|
#include "stdafx.h"
#include "overlay_osk_panel.h"
namespace rsx
{
namespace overlays
{
osk_panel::osk_panel(u32 panel_mode)
{
osk_panel_mode = panel_mode;
// TODO: Use proper translations for Space/Backspace/Return etc. and make sure they fit in the grid.
switch (panel_mode)
{
case CELL_OSKDIALOG_PANELMODE_DEFAULT:
case CELL_OSKDIALOG_PANELMODE_GERMAN:
case CELL_OSKDIALOG_PANELMODE_ENGLISH:
case CELL_OSKDIALOG_PANELMODE_SPANISH:
case CELL_OSKDIALOG_PANELMODE_FRENCH:
case CELL_OSKDIALOG_PANELMODE_ITALIAN:
case CELL_OSKDIALOG_PANELMODE_DUTCH:
case CELL_OSKDIALOG_PANELMODE_PORTUGUESE:
case CELL_OSKDIALOG_PANELMODE_RUSSIAN:
case CELL_OSKDIALOG_PANELMODE_JAPANESE:
case CELL_OSKDIALOG_PANELMODE_DEFAULT_NO_JAPANESE:
case CELL_OSKDIALOG_PANELMODE_POLISH:
case CELL_OSKDIALOG_PANELMODE_KOREAN:
case CELL_OSKDIALOG_PANELMODE_TURKEY:
case CELL_OSKDIALOG_PANELMODE_TRADITIONAL_CHINESE:
case CELL_OSKDIALOG_PANELMODE_SIMPLIFIED_CHINESE:
case CELL_OSKDIALOG_PANELMODE_PORTUGUESE_BRAZIL:
case CELL_OSKDIALOG_PANELMODE_DANISH:
case CELL_OSKDIALOG_PANELMODE_SWEDISH:
case CELL_OSKDIALOG_PANELMODE_NORWEGIAN:
case CELL_OSKDIALOG_PANELMODE_FINNISH:
case CELL_OSKDIALOG_PANELMODE_JAPANESE_HIRAGANA:
case CELL_OSKDIALOG_PANELMODE_JAPANESE_KATAKANA:
case CELL_OSKDIALOG_PANELMODE_ALPHABET_FULL_WIDTH:
case CELL_OSKDIALOG_PANELMODE_ALPHABET:
case CELL_OSKDIALOG_PANELMODE_LATIN:
case CELL_OSKDIALOG_PANELMODE_NUMERAL_FULL_WIDTH:
case CELL_OSKDIALOG_PANELMODE_NUMERAL:
case CELL_OSKDIALOG_PANELMODE_URL:
case CELL_OSKDIALOG_PANELMODE_PASSWORD:
default:
{
space = U"Space";
backspace = U"Backspace";
enter = U"Return";
}
}
}
// Language specific implementations
// TODO: Check and adjust special characters for each panel
osk_panel_latin::osk_panel_latin(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb, u32 osk_panel_mode)
: osk_panel(osk_panel_mode)
{
num_rows = 5;
num_columns = 10;
cell_size_x = 50;
cell_size_y = 40;
layout =
{
// Row 1
{{{U"1", U"!"}, {U"à", U"À"}, {U"!", U"¡"}}, default_bg, 1},
{{{U"2", U"@"}, {U"á", U"Á"}, {U"?", U"¿"}}, default_bg, 1},
{{{U"3", U"#"}, {U"â", U"Â"}, {U"#", U"~"}}, default_bg, 1},
{{{U"4", U"$"}, {U"ã", U"Ã"}, {U"$", U"„"}}, default_bg, 1},
{{{U"5", U"%"}, {U"ä", U"Ä"}, {U"%", U"´"}}, default_bg, 1},
{{{U"6", U"^"}, {U"å", U"Å"}, {U"&", U"‘"}}, default_bg, 1},
{{{U"7", U"&"}, {U"æ", U"Æ"}, {U"'", U"’"}}, default_bg, 1},
{{{U"8", U"*"}, {U"ç", U"Ç"}, {U"(", U"‚"}}, default_bg, 1},
{{{U"9", U"("}, {U"[", U"<"}, {U")", U"“"}}, default_bg, 1},
{{{U"0", U")"}, {U"]", U">"}, {U"*", U"”"}}, default_bg, 1},
// Row 2
{{{U"q", U"Q"}, {U"è", U"È"}, {U"/", U"¤"}}, default_bg, 1},
{{{U"w", U"W"}, {U"é", U"É"}, {U"\\", U"¢"}}, default_bg, 1},
{{{U"e", U"E"}, {U"ê", U"Ê"}, {U"[", U"€"}}, default_bg, 1},
{{{U"r", U"R"}, {U"ë", U"Ë"}, {U"]", U"£"}}, default_bg, 1},
{{{U"t", U"T"}, {U"ì", U"Ì"}, {U"^", U"¥"}}, default_bg, 1},
{{{U"y", U"Y"}, {U"í", U"Í"}, {U"_", U"§"}}, default_bg, 1},
{{{U"u", U"U"}, {U"î", U"Î"}, {U"`", U"¦"}}, default_bg, 1},
{{{U"i", U"I"}, {U"ï", U"Ï"}, {U"{", U"µ"}}, default_bg, 1},
{{{U"o", U"O"}, {U";", U"="}, {U"}", U""}}, default_bg, 1},
{{{U"p", U"P"}, {U":", U"+"}, {U"|", U""}}, default_bg, 1},
// Row 3
{{{U"a", U"A"}, {U"ñ", U"Ñ"}, {U"@", U""}}, default_bg, 1},
{{{U"s", U"S"}, {U"ò", U"Ò"}, {U"°", U""}}, default_bg, 1},
{{{U"d", U"D"}, {U"ó", U"Ó"}, {U"‹", U""}}, default_bg, 1},
{{{U"f", U"F"}, {U"ô", U"Ô"}, {U"›", U""}}, default_bg, 1},
{{{U"g", U"G"}, {U"õ", U"Õ"}, {U"«", U""}}, default_bg, 1},
{{{U"h", U"H"}, {U"ö", U"Ö"}, {U"»", U""}}, default_bg, 1},
{{{U"j", U"J"}, {U"ø", U"Ø"}, {U"ª", U""}}, default_bg, 1},
{{{U"k", U"K"}, {U"œ", U"Œ"}, {U"º", U""}}, default_bg, 1},
{{{U"l", U"L"}, {U"`", U"~"}, {U"×", U""}}, default_bg, 1},
{{{U"'", U"\""}, {U"¡", U"\""}, {U"÷", U""}}, default_bg, 1},
// Row 4
{{{U"z", U"Z"}, {U"ß", U"ß"}, {U"+", U""}}, default_bg, 1},
{{{U"x", U"X"}, {U"ù", U"Ù"}, {U",", U""}}, default_bg, 1},
{{{U"c", U"C"}, {U"ú", U"Ú"}, {U"-", U""}}, default_bg, 1},
{{{U"v", U"V"}, {U"û", U"Û"}, {U".", U""}}, default_bg, 1},
{{{U"b", U"B"}, {U"ü", U"Ü"}, {U"\"", U""}}, default_bg, 1},
{{{U"n", U"N"}, {U"ý", U"Ý"}, {U":", U""}}, default_bg, 1},
{{{U"m", U"M"}, {U"ÿ", U"Ÿ"}, {U";", U""}}, default_bg, 1},
{{{U",", U"-"}, {U",", U"-"}, {U"<", U""}}, default_bg, 1},
{{{U".", U"_"}, {U".", U"_"}, {U"=", U""}}, default_bg, 1},
{{{U"?", U"/"}, {U"¿", U"/"}, {U">", U""}}, default_bg, 1},
// Control
{{{U"A/a"}, {U"À/à"}, {U"!/¡"}}, special2_bg, 2, button_flags::_shift, shift_cb },
{{{U"ÖÑß"}, {U"@#:"}, {U"ABC"}}, special2_bg, 2, button_flags::_layer, layer_cb },
{{{space}, {space}, {space}}, special_bg, 2, button_flags::_space, space_cb },
{{{backspace}, {backspace}, {backspace}}, special_bg, 2, button_flags::_default, delete_cb },
{{{enter}, {enter}, {enter}}, special2_bg, 2, button_flags::_return, enter_cb },
};
}
osk_panel_english::osk_panel_english(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb)
: osk_panel_latin(shift_cb, layer_cb, space_cb, delete_cb, enter_cb, CELL_OSKDIALOG_PANELMODE_ENGLISH)
{
// English and latin should be mostly the same.
}
osk_panel_spanish::osk_panel_spanish(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb)
: osk_panel_latin(shift_cb, layer_cb, space_cb, delete_cb, enter_cb, CELL_OSKDIALOG_PANELMODE_SPANISH)
{
// Spanish and latin should be mostly the same. Only the translation for the controls should be different.
}
osk_panel_italian::osk_panel_italian(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb)
: osk_panel_latin(shift_cb, layer_cb, space_cb, delete_cb, enter_cb, CELL_OSKDIALOG_PANELMODE_ITALIAN)
{
// Italian and latin should be mostly the same. Only the translation for the controls should be different.
}
osk_panel_danish::osk_panel_danish(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb)
: osk_panel_latin(shift_cb, layer_cb, space_cb, delete_cb, enter_cb, CELL_OSKDIALOG_PANELMODE_DANISH)
{
// Danish and latin should be mostly the same. Only the translation for the controls should be different.
}
osk_panel_norwegian::osk_panel_norwegian(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb)
: osk_panel_latin(shift_cb, layer_cb, space_cb, delete_cb, enter_cb, CELL_OSKDIALOG_PANELMODE_NORWEGIAN)
{
// Norwegian and latin should be mostly the same. Only the translation for the controls should be different.
}
osk_panel_dutch::osk_panel_dutch(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb)
: osk_panel_latin(shift_cb, layer_cb, space_cb, delete_cb, enter_cb, CELL_OSKDIALOG_PANELMODE_DUTCH)
{
// Dutch and latin should be mostly the same. Only the translation for the controls should be different.
}
osk_panel_swedish::osk_panel_swedish(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb)
: osk_panel_latin(shift_cb, layer_cb, space_cb, delete_cb, enter_cb, CELL_OSKDIALOG_PANELMODE_SWEDISH)
{
// Swedish and latin should be mostly the same. Only the translation for the controls should be different.
}
osk_panel_finnish::osk_panel_finnish(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb)
: osk_panel_latin(shift_cb, layer_cb, space_cb, delete_cb, enter_cb, CELL_OSKDIALOG_PANELMODE_FINNISH)
{
// Finnish and latin should be mostly the same. Only the translation for the controls should be different.
}
osk_panel_portuguese_pt::osk_panel_portuguese_pt(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb)
: osk_panel_latin(shift_cb, layer_cb, space_cb, delete_cb, enter_cb, CELL_OSKDIALOG_PANELMODE_PORTUGUESE)
{
// Portuguese (Portugal) and latin should be mostly the same. Only the translation for the controls should be different.
}
osk_panel_portuguese_br::osk_panel_portuguese_br(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb)
: osk_panel_latin(shift_cb, layer_cb, space_cb, delete_cb, enter_cb, CELL_OSKDIALOG_PANELMODE_PORTUGUESE_BRAZIL)
{
// Portuguese (Brazil) and latin should be mostly the same. Only the translation for the controls should be different.
}
osk_panel_french::osk_panel_french(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb)
: osk_panel(CELL_OSKDIALOG_PANELMODE_FRENCH)
{
num_rows = 5;
num_columns = 10;
cell_size_x = 50;
cell_size_y = 40;
layout =
{
// Row 1
{{{U"1", U"!"}, {U"à", U"À"}, {U"!", U"¡"}}, default_bg, 1},
{{{U"2", U"@"}, {U"á", U"Á"}, {U"?", U"¿"}}, default_bg, 1},
{{{U"3", U"#"}, {U"â", U"Â"}, {U"#", U"~"}}, default_bg, 1},
{{{U"4", U"$"}, {U"ã", U"Ã"}, {U"$", U"„"}}, default_bg, 1},
{{{U"5", U"%"}, {U"ä", U"Ä"}, {U"%", U"´"}}, default_bg, 1},
{{{U"6", U"^"}, {U"å", U"Å"}, {U"&", U"‘"}}, default_bg, 1},
{{{U"7", U"&"}, {U"æ", U"Æ"}, {U"'", U"’"}}, default_bg, 1},
{{{U"8", U"*"}, {U"ç", U"Ç"}, {U"(", U"‚"}}, default_bg, 1},
{{{U"9", U"("}, {U"[", U"<"}, {U")", U"“"}}, default_bg, 1},
{{{U"0", U")"}, {U"]", U">"}, {U"*", U"”"}}, default_bg, 1},
// Row 2
{{{U"a", U"A"}, {U"è", U"È"}, {U"/", U"¤"}}, default_bg, 1},
{{{U"z", U"Z"}, {U"é", U"É"}, {U"\\", U"¢"}}, default_bg, 1},
{{{U"e", U"E"}, {U"ê", U"Ê"}, {U"[", U"€"}}, default_bg, 1},
{{{U"r", U"R"}, {U"ë", U"Ë"}, {U"]", U"£"}}, default_bg, 1},
{{{U"t", U"T"}, {U"ì", U"Ì"}, {U"^", U"¥"}}, default_bg, 1},
{{{U"y", U"Y"}, {U"í", U"Í"}, {U"_", U"§"}}, default_bg, 1},
{{{U"u", U"U"}, {U"î", U"Î"}, {U"`", U"¦"}}, default_bg, 1},
{{{U"i", U"I"}, {U"ï", U"Ï"}, {U"{", U"µ"}}, default_bg, 1},
{{{U"o", U"O"}, {U";", U"="}, {U"}", U""}}, default_bg, 1},
{{{U"p", U"P"}, {U":", U"+"}, {U"|", U""}}, default_bg, 1},
// Row 3
{{{U"q", U"Q"}, {U"ñ", U"Ñ"}, {U"@", U""}}, default_bg, 1},
{{{U"s", U"S"}, {U"ò", U"Ò"}, {U"°", U""}}, default_bg, 1},
{{{U"d", U"D"}, {U"ó", U"Ó"}, {U"‹", U""}}, default_bg, 1},
{{{U"f", U"F"}, {U"ô", U"Ô"}, {U"›", U""}}, default_bg, 1},
{{{U"g", U"G"}, {U"õ", U"Õ"}, {U"«", U""}}, default_bg, 1},
{{{U"h", U"H"}, {U"ö", U"Ö"}, {U"»", U""}}, default_bg, 1},
{{{U"j", U"J"}, {U"ø", U"Ø"}, {U"ª", U""}}, default_bg, 1},
{{{U"k", U"K"}, {U"œ", U"Œ"}, {U"º", U""}}, default_bg, 1},
{{{U"l", U"L"}, {U"`", U"~"}, {U"×", U""}}, default_bg, 1},
{{{U"'", U"\""}, {U"¡", U"\""}, {U"÷", U""}}, default_bg, 1},
// Row 4
{{{U"w", U"W"}, {U"ß", U"ß"}, {U"+", U""}}, default_bg, 1},
{{{U"x", U"X"}, {U"ù", U"Ù"}, {U",", U""}}, default_bg, 1},
{{{U"c", U"C"}, {U"ú", U"Ú"}, {U"-", U""}}, default_bg, 1},
{{{U"v", U"V"}, {U"û", U"Û"}, {U".", U""}}, default_bg, 1},
{{{U"b", U"B"}, {U"ü", U"Ü"}, {U"\"", U""}}, default_bg, 1},
{{{U"n", U"N"}, {U"ý", U"Ý"}, {U":", U""}}, default_bg, 1},
{{{U"m", U"M"}, {U"ÿ", U"Ÿ"}, {U";", U""}}, default_bg, 1},
{{{U",", U"-"}, {U",", U"-"}, {U"<", U""}}, default_bg, 1},
{{{U".", U"_"}, {U".", U"_"}, {U"=", U""}}, default_bg, 1},
{{{U"?", U"/"}, {U"¿", U"/"}, {U">", U""}}, default_bg, 1},
// Control
{{{U"A/a"}, {U"À/à"}, {U"!/¡"}}, special2_bg, 2, button_flags::_shift, shift_cb },
{{{U"ÖÑß"}, {U"@#:"}, {U"ABC"}}, special2_bg, 2, button_flags::_layer, layer_cb },
{{{space}, {space}, {space}}, special_bg, 2, button_flags::_space, space_cb },
{{{backspace}, {backspace}, {backspace}}, special_bg, 2, button_flags::_default, delete_cb },
{{{enter}, {enter}, {enter}}, special2_bg, 2, button_flags::_return, enter_cb },
};
}
osk_panel_german::osk_panel_german(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb)
: osk_panel(CELL_OSKDIALOG_PANELMODE_GERMAN)
{
num_rows = 5;
num_columns = 10;
cell_size_x = 50;
cell_size_y = 40;
layout =
{
// Row 1
{{{U"1", U"!"}, {U"à", U"À"}, {U"!", U"¡"}}, default_bg, 1},
{{{U"2", U"@"}, {U"á", U"Á"}, {U"?", U"¿"}}, default_bg, 1},
{{{U"3", U"#"}, {U"â", U"Â"}, {U"#", U"~"}}, default_bg, 1},
{{{U"4", U"$"}, {U"ã", U"Ã"}, {U"$", U"„"}}, default_bg, 1},
{{{U"5", U"%"}, {U"ä", U"Ä"}, {U"%", U"´"}}, default_bg, 1},
{{{U"6", U"^"}, {U"å", U"Å"}, {U"&", U"‘"}}, default_bg, 1},
{{{U"7", U"&"}, {U"æ", U"Æ"}, {U"'", U"’"}}, default_bg, 1},
{{{U"8", U"*"}, {U"ç", U"Ç"}, {U"(", U"‚"}}, default_bg, 1},
{{{U"9", U"("}, {U"[", U"<"}, {U")", U"“"}}, default_bg, 1},
{{{U"0", U")"}, {U"]", U">"}, {U"*", U"”"}}, default_bg, 1},
// Row 2
{{{U"q", U"Q"}, {U"è", U"È"}, {U"/", U"¤"}}, default_bg, 1},
{{{U"w", U"W"}, {U"é", U"É"}, {U"\\", U"¢"}}, default_bg, 1},
{{{U"e", U"E"}, {U"ê", U"Ê"}, {U"[", U"€"}}, default_bg, 1},
{{{U"r", U"R"}, {U"ë", U"Ë"}, {U"]", U"£"}}, default_bg, 1},
{{{U"t", U"T"}, {U"ì", U"Ì"}, {U"^", U"¥"}}, default_bg, 1},
{{{U"z", U"Z"}, {U"í", U"Í"}, {U"_", U"§"}}, default_bg, 1},
{{{U"u", U"U"}, {U"î", U"Î"}, {U"`", U"¦"}}, default_bg, 1},
{{{U"i", U"I"}, {U"ï", U"Ï"}, {U"{", U"µ"}}, default_bg, 1},
{{{U"o", U"O"}, {U";", U"="}, {U"}", U""}}, default_bg, 1},
{{{U"p", U"P"}, {U":", U"+"}, {U"|", U""}}, default_bg, 1},
// Row 3
{{{U"a", U"A"}, {U"ñ", U"Ñ"}, {U"@", U""}}, default_bg, 1},
{{{U"s", U"S"}, {U"ò", U"Ò"}, {U"°", U""}}, default_bg, 1},
{{{U"d", U"D"}, {U"ó", U"Ó"}, {U"‹", U""}}, default_bg, 1},
{{{U"f", U"F"}, {U"ô", U"Ô"}, {U"›", U""}}, default_bg, 1},
{{{U"g", U"G"}, {U"õ", U"Õ"}, {U"«", U""}}, default_bg, 1},
{{{U"h", U"H"}, {U"ö", U"Ö"}, {U"»", U""}}, default_bg, 1},
{{{U"j", U"J"}, {U"ø", U"Ø"}, {U"ª", U""}}, default_bg, 1},
{{{U"k", U"K"}, {U"œ", U"Œ"}, {U"º", U""}}, default_bg, 1},
{{{U"l", U"L"}, {U"`", U"~"}, {U"×", U""}}, default_bg, 1},
{{{U"'", U"\""}, {U"¡", U"\""}, {U"÷", U""}}, default_bg, 1},
// Row 4
{{{U"y", U"Y"}, {U"ß", U"ß"}, {U"+", U""}}, default_bg, 1},
{{{U"x", U"X"}, {U"ù", U"Ù"}, {U",", U""}}, default_bg, 1},
{{{U"c", U"C"}, {U"ú", U"Ú"}, {U"-", U""}}, default_bg, 1},
{{{U"v", U"V"}, {U"û", U"Û"}, {U".", U""}}, default_bg, 1},
{{{U"b", U"B"}, {U"ü", U"Ü"}, {U"\"", U""}}, default_bg, 1},
{{{U"n", U"N"}, {U"ý", U"Ý"}, {U":", U""}}, default_bg, 1},
{{{U"m", U"M"}, {U"ÿ", U"Ÿ"}, {U";", U""}}, default_bg, 1},
{{{U",", U"-"}, {U",", U"-"}, {U"<", U""}}, default_bg, 1},
{{{U".", U"_"}, {U".", U"_"}, {U"=", U""}}, default_bg, 1},
{{{U"?", U"/"}, {U"¿", U"/"}, {U">", U""}}, default_bg, 1},
// Control
{{{U"A/a"}, {U"À/à"}, {U"!/¡"}}, special2_bg, 2, button_flags::_shift, shift_cb },
{{{U"ÖÑß"}, {U"@#:"}, {U"ABC"}}, special2_bg, 2, button_flags::_layer, layer_cb },
{{{space}, {space}, {space}}, special_bg, 2, button_flags::_space, space_cb },
{{{backspace}, {backspace}, {backspace}}, special_bg, 2, button_flags::_default, delete_cb },
{{{enter}, {enter}, {enter}}, special2_bg, 2, button_flags::_return, enter_cb },
};
}
osk_panel_turkey::osk_panel_turkey(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb)
: osk_panel(CELL_OSKDIALOG_PANELMODE_TURKEY)
{
num_rows = 5;
num_columns = 10;
cell_size_x = 50;
cell_size_y = 40;
layout =
{
// Row 1
{{{U"1", U"!"}, {U"1", U"!"}, {U"!", U"¡"}}, default_bg, 1},
{{{U"2", U"@"}, {U"2", U"@"}, {U"?", U"¿"}}, default_bg, 1},
{{{U"3", U"#"}, {U"3", U"#"}, {U"#", U"~"}}, default_bg, 1},
{{{U"4", U"$"}, {U"4", U"$"}, {U"$", U"„"}}, default_bg, 1},
{{{U"5", U"%"}, {U"5", U"%"}, {U"%", U"´"}}, default_bg, 1},
{{{U"6", U"^"}, {U"6", U"^"}, {U"&", U"‘"}}, default_bg, 1},
{{{U"7", U"&"}, {U"7", U"&"}, {U"'", U"’"}}, default_bg, 1},
{{{U"8", U"*"}, {U"8", U"*"}, {U"(", U"‚"}}, default_bg, 1},
{{{U"9", U"("}, {U"9", U"("}, {U")", U"“"}}, default_bg, 1},
{{{U"0", U")"}, {U"0", U")"}, {U"*", U"”"}}, default_bg, 1},
// Row 2
{{{U"q", U"Q"}, {U"q", U"Q"}, {U"/", U"¤"}}, default_bg, 1},
{{{U"w", U"W"}, {U"w", U"W"}, {U"\\", U"¢"}}, default_bg, 1},
{{{U"e", U"€"}, {U"e", U"€"}, {U"[", U"€"}}, default_bg, 1},
{{{U"r", U"R"}, {U"r", U"R"}, {U"]", U"£"}}, default_bg, 1},
{{{U"t", U"T"}, {U"t", U"T"}, {U"^", U"¥"}}, default_bg, 1},
{{{U"y", U"Y"}, {U"y", U"Y"}, {U"_", U"§"}}, default_bg, 1},
{{{U"u", U"U"}, {U"ü", U"Ü"}, {U"`", U"¦"}}, default_bg, 1},
{{{U"i", U"I"}, {U"ı", U"İ"}, {U"{", U"µ"}}, default_bg, 1}, // I couldn't find ı and İ in the PS3 OSK, but I'll put them here anyway
{{{U"o", U"O"}, {U"ö", U"Ö"}, {U"}", U""}}, default_bg, 1},
{{{U"p", U"P"}, {U"p", U"P"}, {U"|", U""}}, default_bg, 1},
// Row 3
{{{U"a", U"A"}, {U"a", U"A"}, {U"@", U""}}, default_bg, 1},
{{{U"s", U"S"}, {U"ş", U"Ş"}, {U"°", U""}}, default_bg, 1},
{{{U"d", U"D"}, {U"d", U"D"}, {U"‹", U""}}, default_bg, 1},
{{{U"f", U"F"}, {U"f", U"F"}, {U"›", U""}}, default_bg, 1},
{{{U"g", U"G"}, {U"ğ", U"Ğ"}, {U"«", U""}}, default_bg, 1},
{{{U"h", U"H"}, {U"h", U"H"}, {U"»", U""}}, default_bg, 1},
{{{U"j", U"J"}, {U"j", U"J"}, {U"ª", U""}}, default_bg, 1},
{{{U"k", U"K"}, {U"k", U"K"}, {U"º", U""}}, default_bg, 1},
{{{U"l", U"L"}, {U"l", U"L"}, {U"×", U""}}, default_bg, 1},
{{{U"'", U"\""}, {U"'", U"\""}, {U"÷", U""}}, default_bg, 1},
// Row 4
{{{U"z", U"Z"}, {U"z", U"Z"}, {U"+", U""}}, default_bg, 1},
{{{U"x", U"X"}, {U"x", U"X"}, {U",", U""}}, default_bg, 1},
{{{U"c", U"C"}, {U"ç", U"Ç"}, {U"-", U""}}, default_bg, 1},
{{{U"v", U"V"}, {U"v", U"V"}, {U".", U""}}, default_bg, 1},
{{{U"b", U"B"}, {U"b", U"B"}, {U"\"", U""}}, default_bg, 1},
{{{U"n", U"N"}, {U"n", U"N"}, {U":", U""}}, default_bg, 1},
{{{U"m", U"M"}, {U"m", U"M"}, {U";", U""}}, default_bg, 1},
{{{U",", U"-"}, {U",", U"-"}, {U"<", U""}}, default_bg, 1},
{{{U".", U"_"}, {U".", U"_"}, {U"=", U""}}, default_bg, 1},
{{{U"?", U"/"}, {U"?", U"/"}, {U">", U""}}, default_bg, 1},
// Control
{{{U"A/a"}, {U"A/a"}, {U"!/¡"}}, special2_bg, 2, button_flags::_shift, shift_cb },
{{{U"AltGr"}, {U"@#:"}, {U"ABC"}}, special2_bg, 2, button_flags::_layer, layer_cb },
{{{space}, {space}, {space}}, special_bg, 2, button_flags::_space, space_cb },
{{{backspace}, {backspace}, {backspace}}, special_bg, 2, button_flags::_default, delete_cb },
{{{enter}, {enter}, {enter}}, special2_bg, 2, button_flags::_return, enter_cb },
};
}
osk_panel_polish::osk_panel_polish(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb)
: osk_panel(CELL_OSKDIALOG_PANELMODE_POLISH)
{
num_rows = 5;
num_columns = 10;
cell_size_x = 50;
cell_size_y = 40;
layout =
{
// Row 1
{{{U"1", U"!"}, {U"1", U"!"}, {U"!", U"¡"}}, default_bg, 1},
{{{U"2", U"@"}, {U"2", U"@"}, {U"?", U"¿"}}, default_bg, 1},
{{{U"3", U"#"}, {U"3", U"#"}, {U"#", U"~"}}, default_bg, 1},
{{{U"4", U"$"}, {U"4", U"$"}, {U"$", U"„"}}, default_bg, 1},
{{{U"5", U"%"}, {U"5", U"%"}, {U"%", U"´"}}, default_bg, 1},
{{{U"6", U"^"}, {U"6", U"^"}, {U"&", U"‘"}}, default_bg, 1},
{{{U"7", U"&"}, {U"7", U"&"}, {U"'", U"’"}}, default_bg, 1},
{{{U"8", U"*"}, {U"8", U"*"}, {U"(", U"‚"}}, default_bg, 1},
{{{U"9", U"("}, {U"9", U"("}, {U")", U"“"}}, default_bg, 1},
{{{U"0", U")"}, {U"0", U")"}, {U"*", U"”"}}, default_bg, 1},
// Row 2
{{{U"q", U"Q"}, {U"q", U"Q"}, {U"/", U"¤"}}, default_bg, 1},
{{{U"w", U"W"}, {U"w", U"W"}, {U"\\", U"¢"}}, default_bg, 1},
{{{U"e", U"E"}, {U"ę", U"Ę"}, {U"[", U"€"}}, default_bg, 1},
{{{U"r", U"R"}, {U"r", U"R"}, {U"]", U"£"}}, default_bg, 1},
{{{U"t", U"T"}, {U"t", U"T"}, {U"^", U"¥"}}, default_bg, 1},
{{{U"y", U"Y"}, {U"y", U"Y"}, {U"_", U"§"}}, default_bg, 1},
{{{U"u", U"U"}, {U"€", U"€"}, {U"`", U"¦"}}, default_bg, 1},
{{{U"i", U"I"}, {U"i", U"I"}, {U"{", U"µ"}}, default_bg, 1},
{{{U"o", U"O"}, {U"ó", U"Ó"}, {U"}", U""}}, default_bg, 1},
{{{U"p", U"P"}, {U"p", U"P"}, {U"|", U""}}, default_bg, 1},
// Row 3
{{{U"a", U"A"}, {U"ą", U"Ą"}, {U"@", U""}}, default_bg, 1},
{{{U"s", U"S"}, {U"ś", U"Ś"}, {U"°", U""}}, default_bg, 1},
{{{U"d", U"D"}, {U"d", U"D"}, {U"‹", U""}}, default_bg, 1},
{{{U"f", U"F"}, {U"f", U"F"}, {U"›", U""}}, default_bg, 1},
{{{U"g", U"G"}, {U"g", U"G"}, {U"«", U""}}, default_bg, 1},
{{{U"h", U"H"}, {U"h", U"H"}, {U"»", U""}}, default_bg, 1},
{{{U"j", U"J"}, {U"j", U"J"}, {U"ª", U""}}, default_bg, 1},
{{{U"k", U"K"}, {U"k", U"K"}, {U"º", U""}}, default_bg, 1},
{{{U"l", U"L"}, {U"ł", U"Ł"}, {U"×", U""}}, default_bg, 1},
{{{U"'", U"\""}, {U"'", U"\""}, {U"÷", U""}}, default_bg, 1},
// Row 4
{{{U"z", U"Z"}, {U"ż", U"Ż"}, {U"+", U""}}, default_bg, 1},
{{{U"x", U"X"}, {U"ź", U"Ź"}, {U",", U""}}, default_bg, 1},
{{{U"c", U"C"}, {U"ć", U"Ć"}, {U"-", U""}}, default_bg, 1},
{{{U"v", U"V"}, {U"v", U"V"}, {U".", U""}}, default_bg, 1},
{{{U"b", U"B"}, {U"b", U"B"}, {U"\"", U""}}, default_bg, 1},
{{{U"n", U"N"}, {U"ń", U"Ń"}, {U":", U""}}, default_bg, 1},
{{{U"m", U"M"}, {U"m", U"M"}, {U";", U""}}, default_bg, 1},
{{{U",", U"-"}, {U",", U"-"}, {U"<", U""}}, default_bg, 1},
{{{U".", U"_"}, {U".", U"_"}, {U"=", U""}}, default_bg, 1},
{{{U"?", U"/"}, {U"?", U"/"}, {U">", U""}}, default_bg, 1},
// Control
{{{U"A/a"}, {U"Ą/ą"}, {U"!/¡"}}, special2_bg, 2, button_flags::_shift, shift_cb },
{{{U"AltGr"}, {U"@#:"}, {U"ABC"}}, special2_bg, 2, button_flags::_layer, layer_cb },
{{{space}, {space}, {space}}, special_bg, 2, button_flags::_space, space_cb },
{{{backspace}, {backspace}, {backspace}}, special_bg, 2, button_flags::_default, delete_cb },
{{{enter}, {enter}, {enter}}, special2_bg, 2, button_flags::_return, enter_cb },
};
}
osk_panel_russian::osk_panel_russian(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb)
: osk_panel(CELL_OSKDIALOG_PANELMODE_RUSSIAN)
{
num_rows = 6;
num_columns = 10;
cell_size_x = 50;
cell_size_y = 40;
layout =
{
// Row 1
{{{U"1", U"!"}, {U"1", U"!"}, {U"à", U"À"}, {U"!", U"¡"}}, default_bg, 1},
{{{U"2", U"\""}, {U"2", U"@"}, {U"á", U"Á"}, {U"?", U"¿"}}, default_bg, 1},
{{{U"3", U"#"}, {U"3", U"#"}, {U"â", U"Â"}, {U"#", U"~"}}, default_bg, 1},
{{{U"4", U";"}, {U"4", U"$"}, {U"ã", U"Ã"}, {U"$", U"„"}}, default_bg, 1},
{{{U"5", U"%"}, {U"5", U"%"}, {U"ä", U"Ä"}, {U"%", U"´"}}, default_bg, 1},
{{{U"6", U":"}, {U"6", U"^"}, {U"å", U"Å"}, {U"&", U"‘"}}, default_bg, 1},
{{{U"7", U"?"}, {U"7", U"&"}, {U"æ", U"Æ"}, {U"'", U"’"}}, default_bg, 1},
{{{U"8", U"*"}, {U"8", U"*"}, {U"ç", U"Ç"}, {U"(", U"‚"}}, default_bg, 1},
{{{U"9", U"("}, {U"9", U"("}, {U"[", U"<"}, {U")", U"“"}}, default_bg, 1},
{{{U"0", U")"}, {U"0", U")"}, {U"]", U">"}, {U"*", U"”"}}, default_bg, 1},
// Row 2
{{{U"ё", U"Ё"}, {U"q", U"Q"}, {U"è", U"È"}, {U"/", U"¤"}}, default_bg, 1},
{{{U"'", U"@"}, {U"w", U"W"}, {U"é", U"É"}, {U"\\", U"¢"}}, default_bg, 1},
{{{U",", U"$"}, {U"e", U"E"}, {U"ê", U"Ê"}, {U"[", U"€"}}, default_bg, 1},
{{{U".", U"^"}, {U"r", U"R"}, {U"ë", U"Ë"}, {U"]", U"£"}}, default_bg, 1},
{{{U"?", U"&"}, {U"t", U"T"}, {U"ì", U"Ì"}, {U"^", U"¥"}}, default_bg, 1},
{{{U"!", U"/"}, {U"y", U"Y"}, {U"í", U"Í"}, {U"_", U"§"}}, default_bg, 1},
{{{U"-", U"_"}, {U"u", U"U"}, {U"î", U"Î"}, {U"`", U"¦"}}, default_bg, 1},
{{{U"=", U"+"}, {U"i", U"I"}, {U"ï", U"Ï"}, {U"{", U"µ"}}, default_bg, 1},
{{{U"х", U"Х"}, {U"o", U"O"}, {U";", U"="}, {U"}", U""}}, default_bg, 1},
{{{U"ъ", U"Ъ"}, {U"p", U"P"}, {U":", U"+"}, {U"|", U""}}, default_bg, 1},
// Row 3
{{{U"й", U"Й"}, {U"a", U"A"}, {U"ñ", U"Ñ"}, {U"@", U""}}, default_bg, 1},
{{{U"ц", U"Ц"}, {U"s", U"S"}, {U"ò", U"Ò"}, {U"°", U""}}, default_bg, 1},
{{{U"у", U"У"}, {U"d", U"D"}, {U"ó", U"Ó"}, {U"‹", U""}}, default_bg, 1},
{{{U"к", U"К"}, {U"f", U"F"}, {U"ô", U"Ô"}, {U"›", U""}}, default_bg, 1},
{{{U"е", U"Е"}, {U"g", U"G"}, {U"õ", U"Õ"}, {U"«", U""}}, default_bg, 1},
{{{U"н", U"Н"}, {U"h", U"H"}, {U"ö", U"Ö"}, {U"»", U""}}, default_bg, 1},
{{{U"г", U"Г"}, {U"j", U"J"}, {U"ø", U"Ø"}, {U"ª", U""}}, default_bg, 1},
{{{U"ш", U"Ш"}, {U"k", U"K"}, {U"œ", U"Œ"}, {U"º", U""}}, default_bg, 1},
{{{U"щ", U"Щ"}, {U"l", U"L"}, {U"`", U"~"}, {U"×", U""}}, default_bg, 1},
{{{U"з", U"З"}, {U"'", U"\""}, {U"¡", U"\""}, {U"÷", U""}}, default_bg, 1},
// Row 4
{{{U"ф", U"Ф"}, {U"z", U"Z"}, {U"ß", U"ß"}, {U"+", U""}}, default_bg, 1},
{{{U"ы", U"Ы"}, {U"x", U"X"}, {U"ù", U"Ù"}, {U",", U""}}, default_bg, 1},
{{{U"в", U"В"}, {U"c", U"C"}, {U"ú", U"Ú"}, {U"-", U""}}, default_bg, 1},
{{{U"а", U"А"}, {U"v", U"V"}, {U"û", U"Û"}, {U".", U""}}, default_bg, 1},
{{{U"п", U"П"}, {U"b", U"B"}, {U"ü", U"Ü"}, {U"\"", U""}}, default_bg, 1},
{{{U"р", U"Р"}, {U"n", U"N"}, {U"ý", U"Ý"}, {U":", U""}}, default_bg, 1},
{{{U"о", U"О"}, {U"m", U"M"}, {U"ÿ", U"Ÿ"}, {U";", U""}}, default_bg, 1},
{{{U"л", U"Л"}, {U",", U"-"}, {U",", U"-"}, {U"<", U""}}, default_bg, 1},
{{{U"д", U"Д"}, {U".", U"_"}, {U".", U"_"}, {U"=", U""}}, default_bg, 1},
{{{U"Ж", U"ж"}, {U"?", U"/"}, {U"¿", U"/"}, {U">", U""}}, default_bg, 1},
// Row 5
{{{U"я", U"Я"}, {U""}, {U""}, {U""}}, default_bg, 1},
{{{U"ч", U"Ч"}, {U""}, {U""}, {U""}}, default_bg, 1},
{{{U"с", U"С"}, {U""}, {U""}, {U""}}, default_bg, 1},
{{{U"м", U"М"}, {U""}, {U""}, {U""}}, default_bg, 1},
{{{U"и", U"И"}, {U""}, {U""}, {U""}}, default_bg, 1},
{{{U"т", U"Т"}, {U""}, {U""}, {U""}}, default_bg, 1},
{{{U"ь", U"Ь"}, {U""}, {U""}, {U""}}, default_bg, 1},
{{{U"б", U"Б"}, {U""}, {U""}, {U""}}, default_bg, 1},
{{{U"ю", U"Ю"}, {U""}, {U""}, {U""}}, default_bg, 1},
{{{U"э", U"Э"}, {U""}, {U""}, {U""}}, default_bg, 1},
// Control
{{{U"У/у"}, {U"A/a"}, {U"À/à"}, {U"!/¡"}}, special2_bg, 2, button_flags::_shift, shift_cb },
{{{U"ABC"}, {U"ÖÑß"}, {U"@#:"}, {U"РУ"}}, special2_bg, 2, button_flags::_layer, layer_cb },
{{{space}, {space}, {space}, {space}}, special_bg, 2, button_flags::_space, space_cb },
{{{backspace}, {backspace}, {backspace}, {backspace}}, special_bg, 2, button_flags::_default, delete_cb },
{{{enter}, {enter}, {enter}, {enter}}, special2_bg, 2, button_flags::_return, enter_cb },
};
}
osk_panel_korean::osk_panel_korean(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb)
: osk_panel(CELL_OSKDIALOG_PANELMODE_KOREAN)
{
num_rows = 5;
num_columns = 10;
cell_size_x = 50;
cell_size_y = 40;
layout =
{
// Row 1
{{{U"ㅃ"}, {U"1", U"!"}, {U"!"}}, default_bg, 1},
{{{U"ㅉ"}, {U"2", U"@"}, {U"\""}}, default_bg, 1},
{{{U"ㄸ"}, {U"3", U"#"}, {U"#"}}, default_bg, 1},
{{{U"ㄲ"}, {U"4", U"$"}, {U"$"}}, default_bg, 1},
{{{U"ㅆ"}, {U"5", U"%"}, {U"%"}}, default_bg, 1},
{{{U"^"}, {U"6", U"^"}, {U"&"}}, default_bg, 1},
{{{U"*"}, {U"7", U"&"}, {U"'"}}, default_bg, 1},
{{{U"-"}, {U"8", U"*"}, {U"("}}, default_bg, 1},
{{{U"ㅒ"}, {U"9", U"("}, {U")"}}, default_bg, 1},
{{{U"ㅖ"}, {U"0", U")"}, {U"*"}}, default_bg, 1},
// Row 2
{{{U"ㅂ"}, {U"q", U"Q"}, {U"+"}}, default_bg, 1},
{{{U"ㅈ"}, {U"w", U"W"}, {U","}}, default_bg, 1},
{{{U"ㄷ"}, {U"e", U"E"}, {U"-"}}, default_bg, 1},
{{{U"ㄱ"}, {U"r", U"R"}, {U"."}}, default_bg, 1},
{{{U"ㅅ"}, {U"t", U"T"}, {U"/"}}, default_bg, 1},
{{{U"ㅛ"}, {U"y", U"Y"}, {U":"}}, default_bg, 1},
{{{U"ㅕ"}, {U"u", U"U"}, {U";"}}, default_bg, 1},
{{{U"ㅑ"}, {U"i", U"I"}, {U"<"}}, default_bg, 1},
{{{U"ㅐ"}, {U"o", U"O"}, {U"="}}, default_bg, 1},
{{{U"ㅔ"}, {U"p", U"P"}, {U">"}}, default_bg, 1},
// Row 3
{{{U"ㅁ"}, {U"a", U"A"}, {U"?"}}, default_bg, 1},
{{{U"ㄴ"}, {U"s", U"S"}, {U"@"}}, default_bg, 1},
{{{U"ㅇ"}, {U"d", U"D"}, {U"["}}, default_bg, 1},
{{{U"ㄹ"}, {U"f", U"F"}, {U"\\"}}, default_bg, 1},
{{{U"ㅎ"}, {U"g", U"G"}, {U"]"}}, default_bg, 1},
{{{U"ㅗ"}, {U"h", U"H"}, {U"^"}}, default_bg, 1},
{{{U"ㅓ"}, {U"j", U"J"}, {U"_"}}, default_bg, 1},
{{{U"ㅏ"}, {U"k", U"K"}, {U"`"}}, default_bg, 1},
{{{U"ㅣ"}, {U"l", U"L"}, {U"{"}}, default_bg, 1},
{{{U";"}, {U"'", U"\""}, {U"}"}}, default_bg, 1},
// Row 4
{{{U"ㅋ"}, {U"z", U"Z"}, {U"|"}}, default_bg, 1},
{{{U"ㅌ"}, {U"x", U"X"}, {U"~"}}, default_bg, 1},
{{{U"ㅊ"}, {U"c", U"C"}, {U"₩"}}, default_bg, 1},
{{{U"ㅍ"}, {U"v", U"V"}, {U""}}, default_bg, 1},
{{{U"ㅠ"}, {U"b", U"B"}, {U""}}, default_bg, 1},
{{{U"ㅜ"}, {U"n", U"N"}, {U""}}, default_bg, 1},
{{{U"ㅡ"}, {U"m", U"M"}, {U""}}, default_bg, 1},
{{{U","}, {U",", U"-"}, {U""}}, default_bg, 1},
{{{U"."}, {U".", U"_"}, {U""}}, default_bg, 1},
{{{U"?"}, {U"?", U"/"}, {U""}}, default_bg, 1},
// Control
{{{U""}, {U"A/a"}, {U""}}, special2_bg, 2, button_flags::_shift, shift_cb },
{{{U"ABC"}, {U"@#:"}, {U"가"}}, special2_bg, 2, button_flags::_layer, layer_cb },
{{{space}, {space}, {space}}, special_bg, 2, button_flags::_space, space_cb },
{{{backspace}, {backspace}, {backspace}}, special_bg, 2, button_flags::_default, delete_cb },
{{{enter}, {enter}, {enter}}, special2_bg, 2, button_flags::_return, enter_cb },
};
}
osk_panel_chinese::osk_panel_chinese(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb, u32 osk_panel_mode)
: osk_panel(osk_panel_mode)
{
num_rows = 5;
num_columns = 10;
cell_size_x = 50;
cell_size_y = 40;
// TODO: add mode for chinese dictionaries
layout =
{
// Row 1
{{{U"1", U"!"}, {U"1", U"!"}, {U","}}, default_bg, 1},
{{{U"2", U"@"}, {U"2", U"@"}, {U"."}}, default_bg, 1},
{{{U"3", U"#"}, {U"3", U"#"}, {U":"}}, default_bg, 1},
{{{U"4", U"$"}, {U"4", U"$"}, {U";"}}, default_bg, 1},
{{{U"5", U"%"}, {U"5", U"%"}, {U"!"}}, default_bg, 1},
{{{U"6", U"^"}, {U"6", U"^"}, {U"?"}}, default_bg, 1},
{{{U"7", U"&"}, {U"7", U"&"}, {U"""}}, default_bg, 1},
{{{U"8", U"*"}, {U"8", U"*"}, {U"'"}}, default_bg, 1},
{{{U"9", U"("}, {U"9", U"("}, {U"`"}}, default_bg, 1},
{{{U"0", U")"}, {U"0", U")"}, {U"^"}}, default_bg, 1},
// Row 2
{{{U"q", U"Q"}, {U"q", U"Q"}, {U"~"}}, default_bg, 1},
{{{U"w", U"W"}, {U"w", U"W"}, {U"_"}}, default_bg, 1},
{{{U"e", U"E"}, {U"e", U"E"}, {U"&"}}, default_bg, 1},
{{{U"r", U"R"}, {U"r", U"R"}, {U"@"}}, default_bg, 1},
{{{U"t", U"T"}, {U"t", U"T"}, {U"#"}}, default_bg, 1},
{{{U"y", U"Y"}, {U"y", U"Y"}, {U"%"}}, default_bg, 1},
{{{U"u", U"U"}, {U"u", U"U"}, {U"+"}}, default_bg, 1},
{{{U"i", U"I"}, {U"i", U"I"}, {U"-"}}, default_bg, 1},
{{{U"o", U"O"}, {U"o", U"O"}, {U"*"}}, default_bg, 1},
{{{U"p", U"P"}, {U"p", U"P"}, {U"・"}}, default_bg, 1},
// Row 3
{{{U"a", U"A"}, {U"a", U"A"}, {U"<"}}, default_bg, 1},
{{{U"s", U"S"}, {U"s", U"S"}, {U">"}}, default_bg, 1},
{{{U"d", U"D"}, {U"d", U"D"}, {U"("}}, default_bg, 1},
{{{U"f", U"F"}, {U"f", U"F"}, {U")"}}, default_bg, 1},
{{{U"g", U"G"}, {U"g", U"G"}, {U"["}}, default_bg, 1},
{{{U"h", U"H"}, {U"h", U"H"}, {U"]"}}, default_bg, 1},
{{{U"j", U"J"}, {U"j", U"J"}, {U"{"}}, default_bg, 1},
{{{U"k", U"K"}, {U"k", U"K"}, {U"}"}}, default_bg, 1},
{{{U"l", U"L"}, {U"l", U"L"}, {U"「"}}, default_bg, 1},
{{{U"'", U"\""}, {U"'", U"""}, {U"」"}}, default_bg, 1},
// Row 4
{{{U"z", U"Z"}, {U"z", U"Z"}, {U"="}}, default_bg, 1},
{{{U"x", U"X"}, {U"x", U"X"}, {U"|"}}, default_bg, 1},
{{{U"c", U"C"}, {U"c", U"C"}, {U"。"}}, default_bg, 1},
{{{U"v", U"V"}, {U"v", U"V"}, {U"/"}}, default_bg, 1},
{{{U"b", U"B"}, {U"b", U"B"}, {U"\"}}, default_bg, 1},
{{{U"n", U"N"}, {U"n", U"N"}, {U"¬"}}, default_bg, 1},
{{{U"m", U"M"}, {U"m", U"M"}, {U"$"}}, default_bg, 1},
{{{U",", U"-"}, {U",", U"-"}, {U"¥"}}, default_bg, 1},
{{{U".", U"_"}, {U".", U"_"}, {U"、"}}, default_bg, 1},
{{{U"?", U"/"}, {U"?", U"/"}, {U""}}, default_bg, 1},
// Control
{{{U"A/a"}, {U"A/a"}, {U""}}, special2_bg, 2, button_flags::_shift, shift_cb },
{{{U"全半"}, {U"@%"}, {U"abc"}}, special2_bg, 2, button_flags::_layer, layer_cb},
{{{space}, {space}, {space}}, special_bg, 2, button_flags::_space, space_cb},
{{{backspace}, {backspace}, {backspace}}, special_bg, 2, button_flags::_default, delete_cb },
{{{enter}, {enter}, {enter}}, special2_bg, 2, button_flags::_return, enter_cb },
};
}
osk_panel_simplified_chinese::osk_panel_simplified_chinese(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb)
: osk_panel_chinese(shift_cb, layer_cb, space_cb, delete_cb, enter_cb, CELL_OSKDIALOG_PANELMODE_SIMPLIFIED_CHINESE)
{
// Simplified chinese uses osk_panel_chinese. Only the translation for the controls and the dictionary should be different than for traditional chinese.
}
osk_panel_traditional_chinese::osk_panel_traditional_chinese(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb)
: osk_panel_chinese(shift_cb, layer_cb, space_cb, delete_cb, enter_cb, CELL_OSKDIALOG_PANELMODE_TRADITIONAL_CHINESE)
{
// Traditional chinese uses osk_panel_chinese. Only the translation for the controls and the dictionary should be different than for simplified chinese.
}
osk_panel_japanese::osk_panel_japanese(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb)
: osk_panel(CELL_OSKDIALOG_PANELMODE_JAPANESE)
{
num_rows = 6;
num_columns = 10;
cell_size_x = 50;
cell_size_y = 40;
layout =
{
// Row 1
{{{U"あ", U"ア"}, {U"1", U"!"}, {U"1", U"!"}, {U","}}, default_bg, 1},
{{{U"か", U"カ"}, {U"2", U"@"}, {U"2", U"@"}, {U"."}}, default_bg, 1},
{{{U"さ", U"サ"}, {U"3", U"#"}, {U"3", U"#"}, {U":"}}, default_bg, 1},
{{{U"た", U"タ"}, {U"4", U"$"}, {U"4", U"$"}, {U";"}}, default_bg, 1},
{{{U"な", U"ナ"}, {U"5", U"%"}, {U"5", U"%"}, {U"!"}}, default_bg, 1},
{{{U"は", U"ハ"}, {U"6", U"^"}, {U"6", U"^"}, {U"?"}}, default_bg, 1},
{{{U"ま", U"マ"}, {U"7", U"&"}, {U"7", U"&"}, {U"""}}, default_bg, 1},
{{{U"や", U"ヤ"}, {U"8", U"*"}, {U"8", U"*"}, {U"'"}}, default_bg, 1},
{{{U"ら", U"ラ"}, {U"9", U"("}, {U"9", U"("}, {U"`"}}, default_bg, 1},
{{{U"わ", U"ワ"}, {U"0", U")"}, {U"0", U")"}, {U"^"}}, default_bg, 1},
// Row 2
{{{U"い", U"イ"}, {U"q", U"Q"}, {U"q", U"Q"}, {U"~"}}, default_bg, 1},
{{{U"き", U"キ"}, {U"w", U"W"}, {U"w", U"W"}, {U"_"}}, default_bg, 1},
{{{U"し", U"シ"}, {U"e", U"E"}, {U"e", U"E"}, {U"&"}}, default_bg, 1},
{{{U"ち", U"チ"}, {U"r", U"R"}, {U"r", U"R"}, {U"@"}}, default_bg, 1},
{{{U"に", U"ニ"}, {U"t", U"T"}, {U"t", U"T"}, {U"#"}}, default_bg, 1},
{{{U"ひ", U"ヒ"}, {U"y", U"Y"}, {U"y", U"Y"}, {U"%"}}, default_bg, 1},
{{{U"み", U"ミ"}, {U"u", U"U"}, {U"u", U"U"}, {U"+"}}, default_bg, 1},
{{{U"ゆ", U"ユ"}, {U"i", U"I"}, {U"i", U"I"}, {U"-"}}, default_bg, 1},
{{{U"り", U"リ"}, {U"o", U"O"}, {U"o", U"O"}, {U"*"}}, default_bg, 1},
{{{U"を", U"ヲ"}, {U"p", U"P"}, {U"p", U"P"}, {U"・"}}, default_bg, 1},
// Row 3
{{{U"う", U"ウ"}, {U"a", U"A"}, {U"a", U"A"}, {U"<"}}, default_bg, 1},
{{{U"く", U"ク"}, {U"s", U"S"}, {U"s", U"S"}, {U">"}}, default_bg, 1},
{{{U"す", U"ス"}, {U"d", U"D"}, {U"d", U"D"}, {U"("}}, default_bg, 1},
{{{U"つ", U"ツ"}, {U"f", U"F"}, {U"f", U"F"}, {U")"}}, default_bg, 1},
{{{U"ぬ", U"ヌ"}, {U"g", U"G"}, {U"g", U"G"}, {U"["}}, default_bg, 1},
{{{U"ふ", U"フ"}, {U"h", U"H"}, {U"h", U"H"}, {U"]"}}, default_bg, 1},
{{{U"む", U"ム"}, {U"j", U"J"}, {U"j", U"J"}, {U"{"}}, default_bg, 1},
{{{U"よ", U"ヨ"}, {U"k", U"K"}, {U"k", U"K"}, {U"}"}}, default_bg, 1},
{{{U"る", U"ル"}, {U"l", U"L"}, {U"l", U"L"}, {U"「"}}, default_bg, 1},
{{{U"ん", U"ン"}, {U"'", U"\""}, {U"'", U"""}, {U"」"}}, default_bg, 1},
// Row 4
{{{U"え", U"エ"}, {U"z", U"Z"}, {U"z", U"Z"}, {U"="}}, default_bg, 1},
{{{U"け", U"ケ"}, {U"x", U"X"}, {U"x", U"X"}, {U"|"}}, default_bg, 1},
{{{U"せ", U"セ"}, {U"c", U"C"}, {U"c", U"C"}, {U"。"}}, default_bg, 1},
{{{U"て", U"テ"}, {U"v", U"V"}, {U"v", U"V"}, {U"/"}}, default_bg, 1},
{{{U"ね", U"ネ"}, {U"b", U"B"}, {U"b", U"B"}, {U"\"}}, default_bg, 1},
{{{U"へ", U"ヘ"}, {U"n", U"N"}, {U"n", U"N"}, {U"¬"}}, default_bg, 1},
{{{U"め", U"メ"}, {U"m", U"M"}, {U"m", U"M"}, {U"$"}}, default_bg, 1},
{{{U"゛", U"゛"}, {U",", U"-"}, {U",", U"-"}, {U"¥"}}, default_bg, 1},
{{{U"れ", U"レ"}, {U".", U"_"}, {U".", U"_"}, {U"、"}}, default_bg, 1},
{{{U"ー", U"ー"}, {U"?", U"/"}, {U"?", U"/"}, {U""}}, default_bg, 1},
// Row 5
{{{U"お", U"オ"}, {U"", U""}, {U"", U""}, {U""}}, default_bg, 1},
{{{U"こ", U"コ"}, {U"", U""}, {U"", U""}, {U""}}, default_bg, 1},
{{{U"そ", U"ソ"}, {U"", U""}, {U"", U""}, {U""}}, default_bg, 1},
{{{U"と", U"ト"}, {U"", U""}, {U"", U""}, {U""}}, default_bg, 1},
{{{U"の", U"ノ"}, {U"", U""}, {U"", U""}, {U""}}, default_bg, 1},
{{{U"ほ", U"ホ"}, {U"", U""}, {U"", U""}, {U""}}, default_bg, 1},
{{{U"も", U"モ"}, {U"", U""}, {U"", U""}, {U""}}, default_bg, 1},
{{{U"゜", U"゜"}, {U"", U""}, {U"", U""}, {U""}}, default_bg, 1},
{{{U"ろ", U"ロ"}, {U"", U""}, {U"", U""}, {U""}}, default_bg, 1},
{{{U"", U""}, {U"", U""}, {U"", U""}, {U""}}, default_bg, 1},
// Control
{{{U"あ/ア"}, {U"A/a"}, {U"A/a"}, {U""}}, special2_bg, 2, button_flags::_shift, shift_cb },
{{{U"abc"}, {U"全半"}, {U"@%"}, {U"あア"}}, special2_bg, 2, button_flags::_layer, layer_cb},
{{{space}, {space}, {space}, {space}}, special_bg, 2, button_flags::_space, space_cb},
{{{backspace}, {backspace}, {backspace}, {backspace}}, special_bg, 2, button_flags::_default, delete_cb },
{{{enter}, {enter}, {enter}, {enter}}, special2_bg, 2, button_flags::_return, enter_cb },
};
}
osk_panel_japanese_hiragana::osk_panel_japanese_hiragana(callback_t /*shift_cb*/, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb)
: osk_panel(CELL_OSKDIALOG_PANELMODE_JAPANESE_HIRAGANA)
{
num_rows = 6;
num_columns = 10;
cell_size_x = 50;
cell_size_y = 40;
layout =
{
// Row 1
{{{U"あ"}, {U","}}, default_bg, 1},
{{{U"か"}, {U"."}}, default_bg, 1},
{{{U"さ"}, {U":"}}, default_bg, 1},
{{{U"た"}, {U";"}}, default_bg, 1},
{{{U"な"}, {U"!"}}, default_bg, 1},
{{{U"は"}, {U"?"}}, default_bg, 1},
{{{U"ま"}, {U"""}}, default_bg, 1},
{{{U"や"}, {U"'"}}, default_bg, 1},
{{{U"ら"}, {U"`"}}, default_bg, 1},
{{{U"わ"}, {U"^"}}, default_bg, 1},
// Row 2
{{{U"い"}, {U"~"}}, default_bg, 1},
{{{U"き"}, {U"_"}}, default_bg, 1},
{{{U"し"}, {U"&"}}, default_bg, 1},
{{{U"ち"}, {U"@"}}, default_bg, 1},
{{{U"に"}, {U"#"}}, default_bg, 1},
{{{U"ひ"}, {U"%"}}, default_bg, 1},
{{{U"み"}, {U"+"}}, default_bg, 1},
{{{U"ゆ"}, {U"-"}}, default_bg, 1},
{{{U"り"}, {U"*"}}, default_bg, 1},
{{{U"を"}, {U"・"}}, default_bg, 1},
// Row 3
{{{U"う"}, {U"<"}}, default_bg, 1},
{{{U"く"}, {U">"}}, default_bg, 1},
{{{U"す"}, {U"("}}, default_bg, 1},
{{{U"つ"}, {U")"}}, default_bg, 1},
{{{U"ぬ"}, {U"["}}, default_bg, 1},
{{{U"ふ"}, {U"]"}}, default_bg, 1},
{{{U"む"}, {U"{"}}, default_bg, 1},
{{{U"よ"}, {U"}"}}, default_bg, 1},
{{{U"る"}, {U"「"}}, default_bg, 1},
{{{U"ん"}, {U"」"}}, default_bg, 1},
// Row 4
{{{U"え"}, {U"="}}, default_bg, 1},
{{{U"け"}, {U"|"}}, default_bg, 1},
{{{U"せ"}, {U"。"}}, default_bg, 1},
{{{U"て"}, {U"/"}}, default_bg, 1},
{{{U"ね"}, {U"\"}}, default_bg, 1},
{{{U"へ"}, {U"¬"}}, default_bg, 1},
{{{U"め"}, {U"$"}}, default_bg, 1},
{{{U"゛"}, {U"¥"}}, default_bg, 1},
{{{U"れ"}, {U"、"}}, default_bg, 1},
{{{U"ー"}, {U""}}, default_bg, 1},
// Row 5
{{{U"お"}, {U""}}, default_bg, 1},
{{{U"こ"}, {U""}}, default_bg, 1},
{{{U"そ"}, {U""}}, default_bg, 1},
{{{U"と"}, {U""}}, default_bg, 1},
{{{U"の"}, {U""}}, default_bg, 1},
{{{U"ほ"}, {U""}}, default_bg, 1},
{{{U"も"}, {U""}}, default_bg, 1},
{{{U"゜"}, {U""}}, default_bg, 1},
{{{U"ろ"}, {U""}}, default_bg, 1},
{{{U""}, {U""}}, default_bg, 1},
// Control
{{{U""}, {U""}}, special2_bg, 2, button_flags::_shift, nullptr },
{{{U"@%"}, {U"あ"}}, special2_bg, 2, button_flags::_layer, layer_cb},
{{{space}, {space}}, special_bg, 2, button_flags::_space, space_cb},
{{{backspace}, {backspace}}, special_bg, 2, button_flags::_default, delete_cb },
{{{enter}, {enter}}, special2_bg, 2, button_flags::_return, enter_cb },
};
}
osk_panel_japanese_katakana::osk_panel_japanese_katakana(callback_t /*shift_cb*/, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb)
: osk_panel(CELL_OSKDIALOG_PANELMODE_JAPANESE_KATAKANA)
{
num_rows = 6;
num_columns = 10;
cell_size_x = 50;
cell_size_y = 40;
layout =
{
// Row 1
{{{U"ア"}, {U","}}, default_bg, 1},
{{{U"カ"}, {U"."}}, default_bg, 1},
{{{U"サ"}, {U":"}}, default_bg, 1},
{{{U"タ"}, {U";"}}, default_bg, 1},
{{{U"ナ"}, {U"!"}}, default_bg, 1},
{{{U"ハ"}, {U"?"}}, default_bg, 1},
{{{U"マ"}, {U"""}}, default_bg, 1},
{{{U"ヤ"}, {U"'"}}, default_bg, 1},
{{{U"ラ"}, {U"`"}}, default_bg, 1},
{{{U"ワ"}, {U"^"}}, default_bg, 1},
// Row 2
{{{U"イ"}, {U"~"}}, default_bg, 1},
{{{U"キ"}, {U"_"}}, default_bg, 1},
{{{U"シ"}, {U"&"}}, default_bg, 1},
{{{U"チ"}, {U"@"}}, default_bg, 1},
{{{U"ニ"}, {U"#"}}, default_bg, 1},
{{{U"ヒ"}, {U"%"}}, default_bg, 1},
{{{U"ミ"}, {U"+"}}, default_bg, 1},
{{{U"ユ"}, {U"-"}}, default_bg, 1},
{{{U"リ"}, {U"*"}}, default_bg, 1},
{{{U"ヲ"}, {U"・"}}, default_bg, 1},
// Row 3
{{{U"ウ"}, {U"<"}}, default_bg, 1},
{{{U"ク"}, {U">"}}, default_bg, 1},
{{{U"ス"}, {U"("}}, default_bg, 1},
{{{U"ツ"}, {U")"}}, default_bg, 1},
{{{U"ヌ"}, {U"["}}, default_bg, 1},
{{{U"フ"}, {U"]"}}, default_bg, 1},
{{{U"ム"}, {U"{"}}, default_bg, 1},
{{{U"ヨ"}, {U"}"}}, default_bg, 1},
{{{U"ル"}, {U"「"}}, default_bg, 1},
{{{U"ン"}, {U"」"}}, default_bg, 1},
// Row 4
{{{U"エ"}, {U"="}}, default_bg, 1},
{{{U"ケ"}, {U"|"}}, default_bg, 1},
{{{U"セ"}, {U"。"}}, default_bg, 1},
{{{U"テ"}, {U"/"}}, default_bg, 1},
{{{U"ネ"}, {U"\"}}, default_bg, 1},
{{{U"ヘ"}, {U"¬"}}, default_bg, 1},
{{{U"メ"}, {U"$"}}, default_bg, 1},
{{{U"゛"}, {U"¥"}}, default_bg, 1},
{{{U"レ"}, {U"、"}}, default_bg, 1},
{{{U"ー"}, {U""}}, default_bg, 1},
// Row 5
{{{U"オ"}, {U""}}, default_bg, 1},
{{{U"コ"}, {U""}}, default_bg, 1},
{{{U"ソ"}, {U""}}, default_bg, 1},
{{{U"ト"}, {U""}}, default_bg, 1},
{{{U"ノ"}, {U""}}, default_bg, 1},
{{{U"ホ"}, {U""}}, default_bg, 1},
{{{U"モ"}, {U""}}, default_bg, 1},
{{{U"゜"}, {U""}}, default_bg, 1},
{{{U"ロ"}, {U""}}, default_bg, 1},
{{{U""}, {U""}}, default_bg, 1},
// Control
{{{U""}, {U""}}, special2_bg, 2, button_flags::_shift, nullptr },
{{{U"@%"}, {U"ア"}}, special2_bg, 2, button_flags::_layer, layer_cb},
{{{space}, {space}}, special_bg, 2, button_flags::_space, space_cb},
{{{backspace}, {backspace}}, special_bg, 2, button_flags::_default, delete_cb },
{{{enter}, {enter}}, special2_bg, 2, button_flags::_return, enter_cb },
};
}
osk_panel_alphabet_half_width::osk_panel_alphabet_half_width(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb, u32 osk_panel_mode)
: osk_panel(osk_panel_mode)
{
num_rows = 5;
num_columns = 10;
cell_size_x = 50;
cell_size_y = 40;
layout =
{
// Row 1
{{{U"1", U"!"}, {U"!"}}, default_bg, 1},
{{{U"2", U"@"}, {U"?"}}, default_bg, 1},
{{{U"3", U"#"}, {U"#"}}, default_bg, 1},
{{{U"4", U"$"}, {U"$"}}, default_bg, 1},
{{{U"5", U"%"}, {U"%"}}, default_bg, 1},
{{{U"6", U"^"}, {U"&"}}, default_bg, 1},
{{{U"7", U"&"}, {U"'"}}, default_bg, 1},
{{{U"8", U"*"}, {U"("}}, default_bg, 1},
{{{U"9", U"("}, {U")"}}, default_bg, 1},
{{{U"0", U")"}, {U"*"}}, default_bg, 1},
// Row 2
{{{U"q", U"Q"}, {U"/"}}, default_bg, 1},
{{{U"w", U"W"}, {U"\\"}}, default_bg, 1},
{{{U"e", U"E"}, {U"["}}, default_bg, 1},
{{{U"r", U"R"}, {U"]"}}, default_bg, 1},
{{{U"t", U"T"}, {U"^"}}, default_bg, 1},
{{{U"y", U"Y"}, {U"_"}}, default_bg, 1},
{{{U"u", U"U"}, {U"`"}}, default_bg, 1},
{{{U"i", U"I"}, {U"{"}}, default_bg, 1},
{{{U"o", U"O"}, {U"}"}}, default_bg, 1},
{{{U"p", U"P"}, {U"|"}}, default_bg, 1},
// Row 3
{{{U"a", U"A"}, {U"@"}}, default_bg, 1},
{{{U"s", U"S"}, {U"°"}}, default_bg, 1},
{{{U"d", U"D"}, {U"‹"}}, default_bg, 1},
{{{U"f", U"F"}, {U"›"}}, default_bg, 1},
{{{U"g", U"G"}, {U"«"}}, default_bg, 1},
{{{U"h", U"H"}, {U"»"}}, default_bg, 1},
{{{U"j", U"J"}, {U"ª"}}, default_bg, 1},
{{{U"k", U"K"}, {U"º"}}, default_bg, 1},
{{{U"l", U"L"}, {U"×"}}, default_bg, 1},
{{{U"'", U"\""}, {U"÷"}}, default_bg, 1},
// Row 4
{{{U"z", U"Z"}, {U"+"}}, default_bg, 1},
{{{U"x", U"X"}, {U","}}, default_bg, 1},
{{{U"c", U"C"}, {U"-"}}, default_bg, 1},
{{{U"v", U"V"}, {U"."}}, default_bg, 1},
{{{U"b", U"B"}, {U"\""}}, default_bg, 1},
{{{U"n", U"N"}, {U":"}}, default_bg, 1},
{{{U"m", U"M"}, {U";"}}, default_bg, 1},
{{{U",", U"-"}, {U"<"}}, default_bg, 1},
{{{U".", U"_"}, {U"="}}, default_bg, 1},
{{{U"?", U"/"}, {U">"}}, default_bg, 1},
// Control
{{{U"A/a"}, {U""}}, special2_bg, 2, button_flags::_shift, shift_cb },
{{{U"@#:"}, {U"ABC"}}, special2_bg, 2, button_flags::_layer, layer_cb },
{{{space}, {space}}, special_bg, 2, button_flags::_space, space_cb },
{{{backspace}, {backspace}}, special_bg, 2, button_flags::_default, delete_cb },
{{{enter}, {enter}}, special2_bg, 2, button_flags::_return, enter_cb },
};
}
osk_panel_alphabet_full_width::osk_panel_alphabet_full_width(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb)
: osk_panel(CELL_OSKDIALOG_PANELMODE_ALPHABET_FULL_WIDTH)
{
num_rows = 5;
num_columns = 10;
cell_size_x = 50;
cell_size_y = 40;
layout =
{
// Row 1
{{{U"1", U"!"}, {U"!"}}, default_bg, 1},
{{{U"2", U"@"}, {U"@"}}, default_bg, 1},
{{{U"3", U"#"}, {U"#"}}, default_bg, 1},
{{{U"4", U"$"}, {U"$"}}, default_bg, 1},
{{{U"5", U"%"}, {U"%"}}, default_bg, 1},
{{{U"6", U"^"}, {U"^"}}, default_bg, 1},
{{{U"7", U"&"}, {U"&"}}, default_bg, 1},
{{{U"8", U"*"}, {U"*"}}, default_bg, 1},
{{{U"9", U"("}, {U"("}}, default_bg, 1},
{{{U"0", U")"}, {U")"}}, default_bg, 1},
// Row 2
{{{U"q", U"Q"}, {U"~"}}, default_bg, 1},
{{{U"w", U"W"}, {U"_"}}, default_bg, 1},
{{{U"e", U"E"}, {U"&"}}, default_bg, 1},
{{{U"r", U"R"}, {U"@"}}, default_bg, 1},
{{{U"t", U"T"}, {U"#"}}, default_bg, 1},
{{{U"y", U"Y"}, {U"%"}}, default_bg, 1},
{{{U"u", U"U"}, {U"+"}}, default_bg, 1},
{{{U"i", U"I"}, {U"-"}}, default_bg, 1},
{{{U"o", U"O"}, {U"*"}}, default_bg, 1},
{{{U"p", U"P"}, {U"・"}}, default_bg, 1},
// Row 3
{{{U"a", U"A"}, {U"<"}}, default_bg, 1},
{{{U"s", U"S"}, {U">"}}, default_bg, 1},
{{{U"d", U"D"}, {U"("}}, default_bg, 1},
{{{U"f", U"F"}, {U")"}}, default_bg, 1},
{{{U"g", U"G"}, {U"["}}, default_bg, 1},
{{{U"h", U"H"}, {U"]"}}, default_bg, 1},
{{{U"j", U"J"}, {U"{"}}, default_bg, 1},
{{{U"k", U"K"}, {U"}"}}, default_bg, 1},
{{{U"l", U"L"}, {U"「"}}, default_bg, 1},
{{{U"'", U"""}, {U"」"}}, default_bg, 1},
// Row 4
{{{U"z", U"Z"}, {U"="}}, default_bg, 1},
{{{U"x", U"X"}, {U"|"}}, default_bg, 1},
{{{U"c", U"C"}, {U"。"}}, default_bg, 1},
{{{U"v", U"V"}, {U"/"}}, default_bg, 1},
{{{U"b", U"B"}, {U"\"}}, default_bg, 1},
{{{U"n", U"N"}, {U"¬"}}, default_bg, 1},
{{{U"m", U"M"}, {U"$"}}, default_bg, 1},
{{{U",", U"-"}, {U"¥"}}, default_bg, 1},
{{{U".", U"_"}, {U"、"}}, default_bg, 1},
{{{U"?", U"/"}, {U""}}, default_bg, 1},
// Control
{{{U"A/a"}, {U""}}, special2_bg, 2, button_flags::_shift, shift_cb },
{{{U"@#:"}, {U"ABC"}}, special2_bg, 2, button_flags::_layer, layer_cb },
{{{space}, {space}}, special_bg, 2, button_flags::_space, space_cb },
{{{backspace}, {backspace}}, special_bg, 2, button_flags::_default, delete_cb },
{{{enter}, {enter}}, special2_bg, 2, button_flags::_return, enter_cb },
};
}
osk_panel_numeral_half_width::osk_panel_numeral_half_width(callback_t /*shift_cb*/, callback_t /*layer_cb*/, callback_t space_cb, callback_t delete_cb, callback_t enter_cb)
: osk_panel(CELL_OSKDIALOG_PANELMODE_NUMERAL)
{
num_rows = 2;
num_columns = 10;
cell_size_x = 50;
cell_size_y = 40;
layout =
{
// Row 1
{{{U"1"}}, default_bg, 1},
{{{U"2"}}, default_bg, 1},
{{{U"3"}}, default_bg, 1},
{{{U"4"}}, default_bg, 1},
{{{U"5"}}, default_bg, 1},
{{{U"6"}}, default_bg, 1},
{{{U"7"}}, default_bg, 1},
{{{U"8"}}, default_bg, 1},
{{{U"9"}}, default_bg, 1},
{{{U"0"}}, default_bg, 1},
// Control
{{{U""}}, special2_bg, 2, button_flags::_shift, nullptr },
{{{U""}}, special2_bg, 2, button_flags::_layer, nullptr },
{{{space}}, special_bg, 2, button_flags::_space, space_cb },
{{{backspace}}, special_bg, 2, button_flags::_default, delete_cb },
{{{enter}}, special2_bg, 2, button_flags::_return, enter_cb },
};
}
osk_panel_numeral_full_width::osk_panel_numeral_full_width(callback_t /*shift_cb*/, callback_t /*layer_cb*/, callback_t space_cb, callback_t delete_cb, callback_t enter_cb)
: osk_panel(CELL_OSKDIALOG_PANELMODE_NUMERAL_FULL_WIDTH)
{
num_rows = 2;
num_columns = 10;
cell_size_x = 50;
cell_size_y = 40;
layout =
{
// Row 1
{{{U"1"}}, default_bg, 1},
{{{U"2"}}, default_bg, 1},
{{{U"3"}}, default_bg, 1},
{{{U"4"}}, default_bg, 1},
{{{U"5"}}, default_bg, 1},
{{{U"6"}}, default_bg, 1},
{{{U"7"}}, default_bg, 1},
{{{U"8"}}, default_bg, 1},
{{{U"9"}}, default_bg, 1},
{{{U"0"}}, default_bg, 1},
// Control
{{{U""}}, special2_bg, 2, button_flags::_shift, nullptr },
{{{U""}}, special2_bg, 2, button_flags::_layer, nullptr },
{{{space}}, special_bg, 2, button_flags::_space, space_cb },
{{{backspace}}, special_bg, 2, button_flags::_default, delete_cb },
{{{enter}}, special2_bg, 2, button_flags::_return, enter_cb },
};
}
osk_panel_url::osk_panel_url(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb)
: osk_panel(CELL_OSKDIALOG_PANELMODE_URL)
{
num_rows = 5;
num_columns = 10;
cell_size_x = 50;
cell_size_y = 40;
layout =
{
// Row 1
{{{U"1", U"!"}, {U"!"}}, default_bg, 1},
{{{U"2", U"@"}, {U"?"}}, default_bg, 1},
{{{U"3", U"#"}, {U"#"}}, default_bg, 1},
{{{U"4", U"$"}, {U"$"}}, default_bg, 1},
{{{U"5", U"%"}, {U"%"}}, default_bg, 1},
{{{U"6", U"~"}, {U"&"}}, default_bg, 1},
{{{U"7", U"&"}, {U"'"}}, default_bg, 1},
{{{U"8", U"*"}, {U"("}}, default_bg, 1},
{{{U"9", U";"}, {U")"}}, default_bg, 1},
{{{U"0", U"+"}, {U"*"}}, default_bg, 1},
// Row 2
{{{U"q", U"Q"}, {U"/"}}, default_bg, 1},
{{{U"w", U"W"}, {U"\\"}}, default_bg, 1},
{{{U"e", U"E"}, {U"["}}, default_bg, 1},
{{{U"r", U"R"}, {U"]"}}, default_bg, 1},
{{{U"t", U"T"}, {U"^"}}, default_bg, 1},
{{{U"y", U"Y"}, {U"_"}}, default_bg, 1},
{{{U"u", U"U"}, {U"`"}}, default_bg, 1},
{{{U"i", U"I"}, {U"{"}}, default_bg, 1},
{{{U"o", U"O"}, {U"}"}}, default_bg, 1},
{{{U"p", U"P"}, {U"|"}}, default_bg, 1},
// Row 3
{{{U"a", U"A"}, {U"@"}}, default_bg, 1},
{{{U"s", U"S"}, {U"°"}}, default_bg, 1},
{{{U"d", U"D"}, {U"‹"}}, default_bg, 1},
{{{U"f", U"F"}, {U"›"}}, default_bg, 1},
{{{U"g", U"G"}, {U"«"}}, default_bg, 1},
{{{U"h", U"H"}, {U"»"}}, default_bg, 1},
{{{U"j", U"J"}, {U"ª"}}, default_bg, 1},
{{{U"k", U"K"}, {U"º"}}, default_bg, 1},
{{{U"l", U"L"}, {U"×"}}, default_bg, 1},
{{{U"-", U"="}, {U"÷"}}, default_bg, 1},
// Row 4
{{{U"z", U"Z"}, {U"+"}}, default_bg, 1},
{{{U"x", U"X"}, {U","}}, default_bg, 1},
{{{U"c", U"C"}, {U"-"}}, default_bg, 1},
{{{U"v", U"V"}, {U"."}}, default_bg, 1},
{{{U"b", U"B"}, {U"\""}}, default_bg, 1},
{{{U"n", U"N"}, {U":"}}, default_bg, 1},
{{{U"m", U"M"}, {U";"}}, default_bg, 1},
{{{U"/", U":"}, {U"<"}}, default_bg, 1},
{{{U".", U","}, {U"="}}, default_bg, 1},
{{{U"_", U"?"}, {U">"}}, default_bg, 1},
// Control
{{{U"A/a"}, {U""}}, special2_bg, 2, button_flags::_shift, shift_cb },
{{{U"@#:"}, {U"ABC"}}, special2_bg, 2, button_flags::_layer, layer_cb },
{{{space}, {space}}, special_bg, 2, button_flags::_space, space_cb },
{{{backspace}, {backspace}}, special_bg, 2, button_flags::_default, delete_cb },
{{{enter}, {enter}}, special2_bg, 2, button_flags::_return, enter_cb },
};
}
osk_panel_password::osk_panel_password(callback_t shift_cb, callback_t layer_cb, callback_t space_cb, callback_t delete_cb, callback_t enter_cb)
: osk_panel_alphabet_half_width(shift_cb, layer_cb, space_cb, delete_cb, enter_cb, CELL_OSKDIALOG_PANELMODE_PASSWORD)
{
// Same as the half-width alphanumeric character panel.
}
}
}
| 56,360
|
C++
|
.cpp
| 1,074
| 45.86406
| 188
| 0.435313
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
5,416
|
overlay_fonts.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_fonts.cpp
|
#include "stdafx.h"
#include "overlay_controls.h"
#include "Emu/System.h"
#include "Emu/vfs_config.h"
#ifndef _WIN32
#include <unistd.h>
#include <sys/types.h>
#include <pwd.h>
#if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__NetBSD__)
#include <sys/sysctl.h>
#endif
#endif
namespace rsx
{
namespace overlays
{
void codepage::initialize_glyphs(char32_t codepage_id, f32 font_size, const std::vector<u8>& ttf_data)
{
glyph_base = (codepage_id * 256);
glyph_data.resize(bitmap_width * bitmap_height);
pack_info.resize(256);
stbtt_pack_context context;
if (!stbtt_PackBegin(&context, glyph_data.data(), bitmap_width, bitmap_height, 0, 1, nullptr))
{
rsx_log.error("Font packing failed");
return;
}
stbtt_PackSetOversampling(&context, oversample, oversample);
if (!stbtt_PackFontRange(&context, ttf_data.data(), 0, font_size, (codepage_id * 256), 256, pack_info.data()))
{
rsx_log.error("Font packing failed");
stbtt_PackEnd(&context);
return;
}
stbtt_PackEnd(&context);
}
stbtt_aligned_quad codepage::get_char(char32_t c, f32& x_advance, f32& y_advance)
{
stbtt_aligned_quad quad;
stbtt_GetPackedQuad(pack_info.data(), bitmap_width, bitmap_height, (c - glyph_base), &x_advance, &y_advance, &quad, false);
quad.t0 += sampler_z;
quad.t1 += sampler_z;
return quad;
}
font::font(const char* ttf_name, f32 size)
{
// Convert pt to px
size_px = ceilf(size * 96.f / 72.f);
size_pt = size;
font_name = ttf_name;
initialized = true;
}
language_class font::classify(char32_t codepage_id)
{
switch (codepage_id)
{
case 00: // Extended ASCII
case 04: // Cyrillic
{
return language_class::default_;
}
case 0x11: // Hangul jamo
case 0x31: // Compatibility jamo 3130-318F
// case 0xA9: // Hangul jamo extended block A A960-A97F
{
return language_class::hangul;
}
case 0xFF: // Halfwidth and Fullwidth Forms
{
// Found in SCE-PS3-SR-R-JPN.TTF, so we'll use cjk_base for now
return language_class::cjk_base;
}
default:
{
if (codepage_id >= 0xAC && codepage_id <= 0xD7)
{
// Hangul syllables + jamo extended block B
return language_class::hangul;
}
if (codepage_id >= 0x2E && codepage_id <= 0x9F)
{
// Generic CJK blocks, mostly chinese and japanese kana
// Should typically be compatible with JPN ttf fonts
return language_class::cjk_base;
}
// TODO
return language_class::default_;
}
}
}
glyph_load_setup font::get_glyph_files(language_class class_) const
{
glyph_load_setup result;
result.font_names.push_back(font_name);
const std::vector<std::string> font_dirs = Emu.GetCallbacks().get_font_dirs();
result.lookup_font_dirs.insert(result.lookup_font_dirs.end(), font_dirs.begin(), font_dirs.end());
// Search dev_flash for the font too
result.lookup_font_dirs.push_back(g_cfg_vfs.get_dev_flash() + "data/font/");
result.lookup_font_dirs.push_back(g_cfg_vfs.get_dev_flash() + "data/font/SONY-CC/");
switch (class_)
{
case language_class::default_:
{
result.font_names.emplace_back("Arial.ttf");
result.font_names.emplace_back("arial.ttf");
#ifndef _WIN32
result.font_names.emplace_back("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf"); // ubuntu
result.font_names.emplace_back("/usr/share/fonts/TTF/DejaVuSans.ttf"); // arch
#endif
// Attempt to load a font from dev_flash as a last resort
result.font_names.emplace_back("SCE-PS3-VR-R-LATIN.TTF");
break;
}
case language_class::cjk_base:
{
// Skip loading font files directly
result.font_names.clear();
// Attempt to load a font from dev_flash before any other source
result.font_names.emplace_back("SCE-PS3-SR-R-JPN.TTF");
// Known system font as last fallback
result.font_names.emplace_back("Yu Gothic.ttf");
result.font_names.emplace_back("YuGothR.ttc");
break;
}
case language_class::hangul:
{
// Skip loading font files directly
result.font_names.clear();
// Attempt to load a font from dev_flash before any other source
result.font_names.emplace_back("SCE-PS3-YG-R-KOR.TTF");
// Known system font as last fallback
result.font_names.emplace_back("Malgun Gothic.ttf");
result.font_names.emplace_back("malgun.ttf");
break;
}
}
return result;
}
codepage* font::initialize_codepage(char32_t codepage_id)
{
// Init glyph
const auto class_ = classify(codepage_id);
const auto fs_settings = get_glyph_files(class_);
// Attemt to load requested font
std::vector<u8> bytes;
std::string file_path;
bool font_found = false;
for (const auto& font_file : fs_settings.font_names)
{
if (fs::is_file(font_file))
{
// Check for absolute paths or fonts 'installed' to executable folder
file_path = font_file;
font_found = true;
break;
}
std::string extension;
if (const auto extension_start = font_file.find_last_of('.');
extension_start != umax)
{
extension = font_file.substr(extension_start + 1);
}
std::string file_name = font_file;
if (extension.length() != 3)
{
// Allow other extensions to support other truetype formats
file_name += ".ttf";
}
for (const auto& font_dir : fs_settings.lookup_font_dirs)
{
file_path = font_dir + file_name;
if (fs::is_file(file_path))
{
font_found = true;
break;
}
}
if (font_found)
{
break;
}
}
// Read font
if (font_found)
{
fs::file f(file_path);
f.read(bytes, f.size());
}
else
{
rsx_log.error("Failed to initialize font '%s.ttf' on codepage %d", font_name, static_cast<u32>(codepage_id));
return nullptr;
}
codepage_cache.page = nullptr;
auto page = std::make_unique<codepage>();
page->initialize_glyphs(codepage_id, size_px, bytes);
page->sampler_z = static_cast<f32>(m_glyph_map.size());
auto ret = page.get();
m_glyph_map.emplace_back(codepage_id, std::move(page));
if (codepage_id == 0)
{
// Latin-1
f32 unused;
get_char('m', em_size, unused);
}
return ret;
}
stbtt_aligned_quad font::get_char(char32_t c, f32& x_advance, f32& y_advance)
{
if (!initialized)
return {};
const auto page_id = (c >> 8);
if (codepage_cache.codepage_id == page_id && codepage_cache.page) [[likely]]
{
return codepage_cache.page->get_char(c, x_advance, y_advance);
}
else
{
codepage_cache.codepage_id = page_id;
codepage_cache.page = nullptr;
for (const auto& e : m_glyph_map)
{
if (e.first == unsigned(page_id))
{
codepage_cache.page = e.second.get();
break;
}
}
if (!codepage_cache.page) [[unlikely]]
{
codepage_cache.page = initialize_codepage(page_id);
}
return codepage_cache.page->get_char(c, x_advance, y_advance);
}
}
std::vector<vertex> font::render_text_ex(f32& x_advance, f32& y_advance, const char32_t* text, usz char_limit, u16 max_width, bool wrap)
{
x_advance = 0.f;
y_advance = 0.f;
std::vector<vertex> result;
if (!initialized)
{
return result;
}
// Render as many characters as possible as glyphs.
for (usz i = 0u, begin_of_word = 0u; i < char_limit; i++)
{
switch (const auto& c = text[i])
{
case '\0':
{
// We're done.
return result;
}
case '\n':
{
// Reset x to 0 and increase y to advance to the new line.
x_advance = 0.f;
y_advance += size_px + 2.f;
begin_of_word = result.size();
continue;
}
case '\r':
{
// Reset x to 0.
x_advance = 0.f;
begin_of_word = result.size();
continue;
}
default:
{
const bool is_whitespace = c == ' ';
stbtt_aligned_quad quad{};
if (is_whitespace)
{
// Skip whitespace if we are at the start of a line.
if (x_advance <= 0.f)
{
// Set the glyph to the current position.
// This is necessary for downstream linewidth calculations.
quad.x0 = quad.x1 = x_advance;
quad.y0 = quad.y1 = y_advance;
}
else
{
const f32 x_advance_old = x_advance;
const f32 y_advance_old = y_advance;
// Get the glyph size.
quad = get_char(c, x_advance, y_advance);
// Reset the result if the glyph would protrude out of the given space anyway.
if (x_advance > max_width)
{
// Set the glyph to the previous position.
// This is necessary for downstream linewidth calculations.
quad.x0 = quad.x1 = x_advance_old;
quad.y0 = quad.y1 = y_advance_old;
}
}
}
else
{
// No whitespace. Get the glyph size.
quad = get_char(c, x_advance, y_advance);
}
// Add the glyph's vertices.
result.emplace_back(quad.x0, quad.y0, quad.s0, quad.t0);
result.emplace_back(quad.x1, quad.y0, quad.s1, quad.t0);
result.emplace_back(quad.x0, quad.y1, quad.s0, quad.t1);
result.emplace_back(quad.x1, quad.y1, quad.s1, quad.t1);
// The next word will begin after any whitespaces.
if (is_whitespace)
{
begin_of_word = result.size();
}
// Check if we reached the end of the available space.
if (x_advance > max_width)
{
// Try to wrap the protruding text
if (wrap)
{
// Increase y to advance to the next line.
y_advance += size_px + 2.f;
// We can just reset x and move on to the next character if this is a whitespace.
if (is_whitespace)
{
x_advance = 0.f;
break;
}
// Get the leftmost offset of the current word.
const f32 base_x = result[begin_of_word].x();
// Move all characters of the current word one line down and to the left.
for (usz n = begin_of_word; n < result.size(); ++n)
{
result[n].x() -= base_x;
result[n].y() += size_px + 2.f;
}
// Set x offset to the rightmost position of the current word
x_advance = result.back().x();
}
else
{
// TODO: Ellipsize
}
}
break;
}
} // switch
}
return result;
}
std::vector<vertex> font::render_text(const char32_t* text, u16 max_width, bool wrap)
{
f32 unused_x, unused_y;
return render_text_ex(unused_x, unused_y, text, -1, max_width, wrap);
}
std::pair<f32, f32> font::get_char_offset(const char32_t* text, usz max_length, u16 max_width, bool wrap)
{
f32 loc_x, loc_y;
render_text_ex(loc_x, loc_y, text, max_length, max_width, wrap);
return {loc_x, loc_y};
}
std::vector<u8> font::get_glyph_data() const
{
std::vector<u8> bytes;
const u32 page_size = codepage::bitmap_width * codepage::bitmap_height;
const auto size = page_size * m_glyph_map.size();
bytes.resize(size);
u8* data = bytes.data();
for (const auto& e : m_glyph_map)
{
std::memcpy(data, e.second->glyph_data.data(), page_size);
data += page_size;
}
return bytes;
}
} // namespace overlays
} // namespace rsx
| 11,205
|
C++
|
.cpp
| 377
| 24.702918
| 138
| 0.629829
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,417
|
overlay_animated_icon.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_animated_icon.cpp
|
#include "stdafx.h"
#include "overlay_animated_icon.h"
#include "Utilities/File.h"
#include "../Common/time.hpp"
namespace rsx
{
namespace overlays
{
animated_icon::animated_icon(const char* icon_name)
{
const std::string image_path = fmt::format("%s/Icons/ui/%s", fs::get_config_dir(), icon_name);
m_icon = std::make_unique<image_info>(image_path.c_str());
set_raw_image(m_icon.get());
}
animated_icon::animated_icon(const std::vector<u8>& icon_data)
{
m_icon = std::make_unique<image_info>(icon_data);
set_raw_image(m_icon.get());
}
void animated_icon::update_animation_frame(compiled_resource& result)
{
if (m_last_update_timestamp_us == 0)
{
m_last_update_timestamp_us = get_system_time();
}
else
{
const auto now = get_system_time();
m_current_frame_duration_us += (now - m_last_update_timestamp_us);
m_last_update_timestamp_us = now;
}
if (m_current_frame_duration_us > m_frame_duration_us)
{
m_current_frame = (m_current_frame + 1) % m_total_frames;
m_current_frame_duration_us = 0;
}
// We only care about the uvs (zw) components
const float x = f32(m_frame_width + m_spacing_x) * (m_current_frame % m_row_length) + m_start_x;
const float y = f32(m_frame_height + m_spacing_y) * (m_current_frame / m_row_length) + m_start_y;
auto& cmd = result.draw_commands[0];
cmd.verts[0].z() = x / m_icon->w;
cmd.verts[0].w() = y / m_icon->h;
cmd.verts[1].z() = (x + m_frame_width) / m_icon->w;
cmd.verts[1].w() = y / m_icon->h;
cmd.verts[2].z() = x / m_icon->w;
cmd.verts[2].w() = ((y + m_frame_height) / m_icon->h);
cmd.verts[3].z() = (x + m_frame_width) / m_icon->w;
cmd.verts[3].w() = ((y + m_frame_height) / m_icon->h);
}
compiled_resource& animated_icon::get_compiled()
{
if (!is_compiled)
{
compiled_resources = image_view::get_compiled();
}
update_animation_frame(compiled_resources);
return compiled_resources;
}
}
}
| 2,047
|
C++
|
.cpp
| 60
| 29.35
| 101
| 0.618588
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,418
|
overlays.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlays.cpp
|
#include "stdafx.h"
#include "overlays.h"
#include "overlay_manager.h"
#include "overlay_message_dialog.h"
#include "Input/pad_thread.h"
#include "Emu/Io/interception.h"
#include "Emu/Io/KeyboardHandler.h"
#include "Emu/RSX/RSXThread.h"
#include "Emu/RSX/Common/time.hpp"
LOG_CHANNEL(overlays);
extern bool is_input_allowed();
namespace rsx
{
namespace overlays
{
thread_local DECLARE(user_interface::g_thread_bit) = 0;
u32 user_interface::alloc_thread_bit()
{
auto [_old, ok] = this->thread_bits.fetch_op([](u32& bits)
{
if (~bits)
{
// Set lowest clear bit
bits |= bits + 1;
return true;
}
return false;
});
if (!ok)
{
::overlays.fatal("Out of thread bits in user interface");
return 0;
}
const u32 r = u32{1} << std::countr_one(_old);
::overlays.trace("Bit allocated (%u)", r);
return r;
}
// Singleton instance declaration
fontmgr* fontmgr::m_instance = nullptr;
s32 user_interface::run_input_loop(std::function<bool()> check_state)
{
user_interface::thread_bits_allocator thread_bits_alloc(this);
m_interactive = true;
std::array<steady_clock::time_point, CELL_PAD_MAX_PORT_NUM> timestamp;
timestamp.fill(steady_clock::now());
constexpr u64 ms_threshold = 500;
std::array<steady_clock::time_point, CELL_PAD_MAX_PORT_NUM> initial_timestamp;
initial_timestamp.fill(steady_clock::now());
std::array<pad_button, CELL_PAD_MAX_PORT_NUM> last_auto_repeat_button;
last_auto_repeat_button.fill(pad_button::pad_button_max_enum);
std::array<std::array<bool, static_cast<u32>(pad_button::pad_button_max_enum)>, CELL_PAD_MAX_PORT_NUM> last_button_state;
for (auto& state : last_button_state)
{
// Initialize last button states as pressed to avoid unwanted button presses when entering the dialog.
state.fill(true);
}
m_input_timer.Start();
// Only start intercepting input if the the overlay allows it (enabled by default)
if (m_start_pad_interception)
{
input::SetIntercepted(true);
}
const auto handle_button_press = [&](pad_button button_id, bool pressed, int pad_index)
{
if (button_id >= pad_button::pad_button_max_enum)
{
return;
}
bool& last_state = last_button_state[pad_index][static_cast<u32>(button_id)];
if (pressed)
{
const bool is_auto_repeat_button = m_auto_repeat_buttons.contains(button_id);
if (!last_state)
{
// The button was not pressed before, so this is a new button press. Reset auto-repeat.
timestamp[pad_index] = steady_clock::now();
initial_timestamp[pad_index] = timestamp[pad_index];
last_auto_repeat_button[pad_index] = is_auto_repeat_button ? button_id : pad_button::pad_button_max_enum;
on_button_pressed(static_cast<pad_button>(button_id), false);
}
else if (is_auto_repeat_button)
{
if (last_auto_repeat_button[pad_index] == button_id
&& m_input_timer.GetMsSince(initial_timestamp[pad_index]) > ms_threshold
&& m_input_timer.GetMsSince(timestamp[pad_index]) > m_auto_repeat_ms_interval)
{
// The auto-repeat button was pressed for at least the given threshold in ms and will trigger at an interval.
timestamp[pad_index] = steady_clock::now();
on_button_pressed(static_cast<pad_button>(button_id), true);
}
else if (last_auto_repeat_button[pad_index] == pad_button::pad_button_max_enum)
{
// An auto-repeat button was already pressed before and will now start triggering again after the next threshold.
last_auto_repeat_button[pad_index] = button_id;
}
}
}
else if (last_state && last_auto_repeat_button[pad_index] == button_id)
{
// We stopped pressing an auto-repeat button, so re-enable auto-repeat for other buttons.
last_auto_repeat_button[pad_index] = pad_button::pad_button_max_enum;
}
last_state = pressed;
};
while (!m_stop_input_loop)
{
if (check_state && !check_state())
{
// Interrupted externally.
break;
}
if (Emu.IsStopped())
{
return selection_code::canceled;
}
if (Emu.IsPaused() && !m_allow_input_on_pause)
{
thread_ctrl::wait_for(10000);
continue;
}
thread_ctrl::wait_for(1000);
if (!is_input_allowed())
{
refresh();
continue;
}
// Get keyboard input if supported by the overlay and activated by the game.
// Ignored if a keyboard pad handler is active in order to prevent double input.
if (m_keyboard_input_enabled && !m_keyboard_pad_handler_active && input::g_keyboards_intercepted)
{
auto& kb_handler = g_fxo->get<KeyboardHandlerBase>();
std::lock_guard<std::mutex> lock(kb_handler.m_mutex);
// Add and get consumer
keyboard_consumer& kb_consumer = kb_handler.AddConsumer(keyboard_consumer::identifier::overlays, 1);
std::vector<Keyboard>& keyboards = kb_consumer.GetKeyboards();
if (!keyboards.empty() && kb_consumer.GetInfo().status[0] == CELL_KB_STATUS_CONNECTED)
{
KbData& current_data = kb_consumer.GetData(0);
KbExtraData& extra_data = kb_consumer.GetExtraData(0);
if (current_data.len > 0 || !extra_data.pressed_keys.empty())
{
for (s32 i = 0; i < current_data.len; i++)
{
const KbButton& key = current_data.buttons[i];
on_key_pressed(current_data.led, current_data.mkey, key.m_keyCode, key.m_outKeyCode, key.m_pressed, {});
}
for (const std::u32string& key : extra_data.pressed_keys)
{
on_key_pressed(0, 0, 0, 0, true, key);
}
// Flush buffer unconditionally. Otherwise we get a flood of key events.
current_data.len = 0;
extra_data.pressed_keys.clear();
// Ignore gamepad input if a key was recognized
refresh();
continue;
}
}
}
// Get gamepad input
std::lock_guard lock(pad::g_pad_mutex);
const auto handler = pad::get_current_handler();
const PadInfo& rinfo = handler->GetInfo();
const bool ignore_gamepad_input = (!rinfo.now_connect || !input::g_pads_intercepted);
const pad_button cross_button = g_cfg.sys.enter_button_assignment == enter_button_assign::circle ? pad_button::circle : pad_button::cross;
const pad_button circle_button = g_cfg.sys.enter_button_assignment == enter_button_assign::circle ? pad_button::cross : pad_button::circle;
m_keyboard_pad_handler_active = false;
int pad_index = -1;
for (const auto& pad : handler->GetPads())
{
if (m_stop_input_loop)
break;
if (++pad_index >= CELL_PAD_MAX_PORT_NUM)
{
rsx_log.fatal("The native overlay cannot handle more than 7 pads! Current number of pads: %d", pad_index + 1);
continue;
}
if (pad_index > 0 && g_cfg.io.lock_overlay_input_to_player_one)
{
continue;
}
if (!pad)
{
rsx_log.fatal("Pad %d is nullptr", pad_index);
continue;
}
if (!(pad->m_port_status & CELL_PAD_STATUS_CONNECTED))
{
continue;
}
if (pad->m_pad_handler == pad_handler::keyboard)
{
m_keyboard_pad_handler_active = true;
}
if (ignore_gamepad_input)
{
if (m_keyboard_pad_handler_active)
{
break;
}
continue;
}
if (pad->ldd)
{
// LDD pads get passed input data from the game itself.
// NOTE: Rock Band 3 doesn't seem to care about the len. It's always 0.
//if (pad->ldd_data.len > CELL_PAD_BTN_OFFSET_DIGITAL1)
{
const u16 digital1 = pad->ldd_data.button[CELL_PAD_BTN_OFFSET_DIGITAL1];
handle_button_press(pad_button::dpad_left, !!(digital1 & CELL_PAD_CTRL_LEFT), pad_index);
handle_button_press(pad_button::dpad_right, !!(digital1 & CELL_PAD_CTRL_RIGHT), pad_index);
handle_button_press(pad_button::dpad_down, !!(digital1 & CELL_PAD_CTRL_DOWN), pad_index);
handle_button_press(pad_button::dpad_up, !!(digital1 & CELL_PAD_CTRL_UP), pad_index);
handle_button_press(pad_button::L3, !!(digital1 & CELL_PAD_CTRL_L3), pad_index);
handle_button_press(pad_button::R3, !!(digital1 & CELL_PAD_CTRL_R3), pad_index);
handle_button_press(pad_button::select, !!(digital1 & CELL_PAD_CTRL_SELECT), pad_index);
handle_button_press(pad_button::start, !!(digital1 & CELL_PAD_CTRL_START), pad_index);
}
//if (pad->ldd_data.len > CELL_PAD_BTN_OFFSET_DIGITAL2)
{
const u16 digital2 = pad->ldd_data.button[CELL_PAD_BTN_OFFSET_DIGITAL2];
handle_button_press(pad_button::triangle, !!(digital2 & CELL_PAD_CTRL_TRIANGLE), pad_index);
handle_button_press(circle_button, !!(digital2 & CELL_PAD_CTRL_CIRCLE), pad_index);
handle_button_press(pad_button::square, !!(digital2 & CELL_PAD_CTRL_SQUARE), pad_index);
handle_button_press(cross_button, !!(digital2 & CELL_PAD_CTRL_CROSS), pad_index);
handle_button_press(pad_button::L1, !!(digital2 & CELL_PAD_CTRL_L1), pad_index);
handle_button_press(pad_button::R1, !!(digital2 & CELL_PAD_CTRL_R1), pad_index);
handle_button_press(pad_button::L2, !!(digital2 & CELL_PAD_CTRL_L2), pad_index);
handle_button_press(pad_button::R2, !!(digital2 & CELL_PAD_CTRL_R2), pad_index);
handle_button_press(pad_button::ps, !!(digital2 & CELL_PAD_CTRL_PS), pad_index);
}
const auto handle_ldd_stick_input = [&](s32 offset, pad_button id_small, pad_button id_large)
{
//if (pad->ldd_data.len <= offset) return;
constexpr u16 threshold = 20; // Let's be careful and use some threshold here
const u16 value = pad->ldd_data.button[offset];
if (value <= (128 - threshold))
{
// Release other direction on the same axis first
handle_button_press(id_large, false, pad_index);
handle_button_press(id_small, true, pad_index);
}
else if (value > (128 + threshold))
{
// Release other direction on the same axis first
handle_button_press(id_small, false, pad_index);
handle_button_press(id_large, true, pad_index);
}
else
{
// Release both directions on the same axis
handle_button_press(id_small, false, pad_index);
handle_button_press(id_large, false, pad_index);
}
};
handle_ldd_stick_input(CELL_PAD_BTN_OFFSET_ANALOG_RIGHT_X, pad_button::rs_left, pad_button::rs_right);
handle_ldd_stick_input(CELL_PAD_BTN_OFFSET_ANALOG_RIGHT_Y, pad_button::rs_down, pad_button::rs_up);
handle_ldd_stick_input(CELL_PAD_BTN_OFFSET_ANALOG_LEFT_X, pad_button::ls_left, pad_button::ls_right);
handle_ldd_stick_input(CELL_PAD_BTN_OFFSET_ANALOG_LEFT_Y, pad_button::ls_down, pad_button::ls_up);
continue;
}
for (const Button& button : pad->m_buttons)
{
pad_button button_id = pad_button::pad_button_max_enum;
if (button.m_offset == CELL_PAD_BTN_OFFSET_DIGITAL1)
{
switch (button.m_outKeyCode)
{
case CELL_PAD_CTRL_LEFT:
button_id = pad_button::dpad_left;
break;
case CELL_PAD_CTRL_RIGHT:
button_id = pad_button::dpad_right;
break;
case CELL_PAD_CTRL_DOWN:
button_id = pad_button::dpad_down;
break;
case CELL_PAD_CTRL_UP:
button_id = pad_button::dpad_up;
break;
case CELL_PAD_CTRL_L3:
button_id = pad_button::L3;
break;
case CELL_PAD_CTRL_R3:
button_id = pad_button::R3;
break;
case CELL_PAD_CTRL_SELECT:
button_id = pad_button::select;
break;
case CELL_PAD_CTRL_START:
button_id = pad_button::start;
break;
default:
break;
}
}
else if (button.m_offset == CELL_PAD_BTN_OFFSET_DIGITAL2)
{
switch (button.m_outKeyCode)
{
case CELL_PAD_CTRL_TRIANGLE:
button_id = pad_button::triangle;
break;
case CELL_PAD_CTRL_CIRCLE:
button_id = circle_button;
break;
case CELL_PAD_CTRL_SQUARE:
button_id = pad_button::square;
break;
case CELL_PAD_CTRL_CROSS:
button_id = cross_button;
break;
case CELL_PAD_CTRL_L1:
button_id = pad_button::L1;
break;
case CELL_PAD_CTRL_R1:
button_id = pad_button::R1;
break;
case CELL_PAD_CTRL_L2:
button_id = pad_button::L2;
break;
case CELL_PAD_CTRL_R2:
button_id = pad_button::R2;
break;
case CELL_PAD_CTRL_PS:
button_id = pad_button::ps;
break;
default:
break;
}
}
handle_button_press(button_id, button.m_pressed, pad_index);
if (m_stop_input_loop)
break;
}
for (const AnalogStick& stick : pad->m_sticks)
{
pad_button button_id = pad_button::pad_button_max_enum;
pad_button release_id = pad_button::pad_button_max_enum;
// Let's say sticks are only pressed if they are almost completely tilted. Otherwise navigation feels really wacky.
const bool pressed = stick.m_value < 30 || stick.m_value > 225;
switch (stick.m_offset)
{
case CELL_PAD_BTN_OFFSET_ANALOG_LEFT_X:
button_id = (stick.m_value <= 128) ? pad_button::ls_left : pad_button::ls_right;
release_id = (stick.m_value > 128) ? pad_button::ls_left : pad_button::ls_right;
break;
case CELL_PAD_BTN_OFFSET_ANALOG_LEFT_Y:
button_id = (stick.m_value <= 128) ? pad_button::ls_up : pad_button::ls_down;
release_id = (stick.m_value > 128) ? pad_button::ls_up : pad_button::ls_down;
break;
case CELL_PAD_BTN_OFFSET_ANALOG_RIGHT_X:
button_id = (stick.m_value <= 128) ? pad_button::rs_left : pad_button::rs_right;
release_id = (stick.m_value > 128) ? pad_button::rs_left : pad_button::rs_right;
break;
case CELL_PAD_BTN_OFFSET_ANALOG_RIGHT_Y:
button_id = (stick.m_value <= 128) ? pad_button::rs_up : pad_button::rs_down;
release_id = (stick.m_value > 128) ? pad_button::rs_up : pad_button::rs_down;
break;
default:
break;
}
// Release other direction on the same axis first
handle_button_press(release_id, false, pad_index);
// Handle currently pressed stick direction
handle_button_press(button_id, pressed, pad_index);
if (m_stop_input_loop)
break;
}
}
refresh();
}
// Remove keyboard consumer. We don't need it anymore.
{
auto& kb_handler = g_fxo->get<KeyboardHandlerBase>();
std::lock_guard<std::mutex> lock(kb_handler.m_mutex);
kb_handler.RemoveConsumer(keyboard_consumer::identifier::overlays);
}
// Disable pad interception since this user interface has to be interactive.
// Non-interactive user intefaces handle this in close in order to prevent a g_pad_mutex deadlock.
if (m_stop_pad_interception)
{
input::SetIntercepted(false);
}
return !m_stop_input_loop
? selection_code::interrupted
: selection_code::ok;
}
void user_interface::close(bool use_callback, bool stop_pad_interception)
{
// Force unload
m_stop_pad_interception.release(stop_pad_interception);
m_stop_input_loop.release(true);
while (u32 b = thread_bits)
{
if (b == g_thread_bit)
{
// Don't wait for its own bit
break;
}
thread_bits.wait(b);
}
// Only disable pad interception if this user interface is not interactive.
// Interactive user interfaces handle this in run_input_loop in order to prevent a g_pad_mutex deadlock.
if (!m_interactive && m_stop_pad_interception)
{
input::SetIntercepted(false);
}
if (on_close && use_callback)
{
on_close(return_code);
}
// NOTE: Object removal should be the last step
if (auto& manager = g_fxo->get<display_manager>(); g_fxo->is_init<display_manager>())
{
manager.remove(uid);
}
}
void overlay::refresh() const
{
if (!visible)
{
return;
}
if (auto rsxthr = rsx::get_current_renderer(); rsxthr &&
(min_refresh_duration_us + rsxthr->last_host_flip_timestamp) < get_system_time())
{
rsxthr->async_flip_requested |= rsx::thread::flip_request::native_ui;
}
}
} // namespace overlays
} // namespace rsx
| 16,516
|
C++
|
.cpp
| 432
| 31.87963
| 143
| 0.633308
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,419
|
overlay_list_view.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_list_view.cpp
|
#include "stdafx.h"
#include "overlay_list_view.hpp"
#include "Emu/system_config.h"
namespace rsx
{
namespace overlays
{
list_view::list_view(u16 width, u16 height, bool use_separators, bool can_deny)
: m_use_separators(use_separators)
{
w = width;
h = height;
m_scroll_indicator_top = std::make_unique<image_view>(width, 5);
m_scroll_indicator_bottom = std::make_unique<image_view>(width, 5);
m_accept_btn = std::make_unique<image_button>(120, 20);
m_cancel_btn = std::make_unique<image_button>(120, 20);
m_highlight_box = std::make_unique<overlay_element>(width, 0);
m_scroll_indicator_top->set_size(width, 40);
m_scroll_indicator_bottom->set_size(width, 40);
m_accept_btn->set_size(120, 30);
m_cancel_btn->set_size(120, 30);
m_scroll_indicator_top->set_image_resource(resource_config::standard_image_resource::fade_top);
m_scroll_indicator_bottom->set_image_resource(resource_config::standard_image_resource::fade_bottom);
if (g_cfg.sys.enter_button_assignment == enter_button_assign::circle)
{
m_accept_btn->set_image_resource(resource_config::standard_image_resource::circle);
m_cancel_btn->set_image_resource(resource_config::standard_image_resource::cross);
}
else
{
m_accept_btn->set_image_resource(resource_config::standard_image_resource::cross);
m_cancel_btn->set_image_resource(resource_config::standard_image_resource::circle);
}
m_scroll_indicator_bottom->set_pos(0, height - 40);
m_accept_btn->set_pos(30, height + 20);
if (can_deny)
{
m_deny_btn = std::make_unique<image_button>(120, 20);
m_deny_btn->set_size(120, 30);
m_deny_btn->set_image_resource(resource_config::standard_image_resource::triangle);
m_deny_btn->set_pos(180, height + 20);
m_deny_btn->set_text(localized_string_id::RSX_OVERLAYS_LIST_DENY);
m_deny_btn->set_font("Arial", 16);
m_cancel_btn->set_pos(330, height + 20);
}
else
{
m_cancel_btn->set_pos(180, height + 20);
}
m_accept_btn->set_text(localized_string_id::RSX_OVERLAYS_LIST_SELECT);
m_cancel_btn->set_text(localized_string_id::RSX_OVERLAYS_LIST_CANCEL);
m_accept_btn->set_font("Arial", 16);
m_cancel_btn->set_font("Arial", 16);
auto_resize = false;
back_color = {0.15f, 0.15f, 0.15f, 0.8f};
m_highlight_box->back_color = {.5f, .5f, .8f, 0.2f};
m_highlight_box->pulse_effect_enabled = true;
m_scroll_indicator_top->fore_color.a = 0.f;
m_scroll_indicator_bottom->fore_color.a = 0.f;
}
void list_view::update_selection()
{
if (m_selected_entry < 0)
{
return; // Ideally unreachable but it should still be possible to recover by user interaction.
}
const usz current_index = static_cast<usz>(m_selected_entry) * (m_use_separators ? 2 : 1);
if (m_items.size() <= current_index)
{
return; // Ideally unreachable but it should still be possible to recover by user interaction.
}
auto current_element = m_items[current_index].get();
// Calculate bounds
auto min_y = current_element->y - y;
auto max_y = current_element->y + current_element->h + pack_padding + 2 - y;
if (min_y < scroll_offset_value)
{
scroll_offset_value = min_y;
}
else if (max_y > (h + scroll_offset_value))
{
scroll_offset_value = max_y - h - 2;
}
if ((scroll_offset_value + h + 2) >= m_elements_height)
m_scroll_indicator_bottom->fore_color.a = 0.f;
else
m_scroll_indicator_bottom->fore_color.a = 0.5f;
if (scroll_offset_value == 0)
m_scroll_indicator_top->fore_color.a = 0.f;
else
m_scroll_indicator_top->fore_color.a = 0.5f;
m_highlight_box->set_pos(current_element->x, current_element->y);
m_highlight_box->h = current_element->h + pack_padding;
m_highlight_box->y -= scroll_offset_value;
m_highlight_box->refresh();
m_scroll_indicator_top->refresh();
m_scroll_indicator_bottom->refresh();
refresh();
}
void list_view::select_entry(s32 entry)
{
const s32 max_entry = m_elements_count - 1;
// Reset the pulse slightly below 1 rising on each user interaction
m_highlight_box->set_sinus_offset(1.6f);
if (m_selected_entry != entry)
{
m_selected_entry = std::max(0, std::min(entry, max_entry));
update_selection();
}
else
{
refresh();
}
}
void list_view::select_next(u16 count)
{
select_entry(m_selected_entry + count);
}
void list_view::select_previous(u16 count)
{
select_entry(m_selected_entry - count);
}
void list_view::add_entry(std::unique_ptr<overlay_element>& entry)
{
// Add entry view
add_element(entry);
m_elements_count++;
// Add separator
if (m_use_separators)
{
auto separator = std::make_unique<overlay_element>();
separator->back_color = fore_color;
separator->w = w;
separator->h = 2;
add_element(separator);
}
if (m_selected_entry < 0)
m_selected_entry = 0;
m_elements_height = advance_pos;
update_selection();
}
int list_view::get_selected_index() const
{
return m_selected_entry;
}
void list_view::set_cancel_only(bool cancel_only)
{
if (cancel_only)
m_cancel_btn->set_pos(x + 30, y + h + 20);
else if (m_deny_btn)
m_cancel_btn->set_pos(x + 330, y + h + 20);
else
m_cancel_btn->set_pos(x + 180, y + h + 20);
m_cancel_only = cancel_only;
is_compiled = false;
}
bool list_view::get_cancel_only() const
{
return m_cancel_only;
}
void list_view::translate(s16 _x, s16 _y)
{
layout_container::translate(_x, _y);
m_scroll_indicator_top->translate(_x, _y);
m_scroll_indicator_bottom->translate(_x, _y);
m_accept_btn->translate(_x, _y);
m_cancel_btn->translate(_x, _y);
if (m_deny_btn)
{
m_deny_btn->translate(_x, _y);
}
}
compiled_resource& list_view::get_compiled()
{
if (!is_compiled)
{
auto& compiled = vertical_layout::get_compiled();
compiled.add(m_highlight_box->get_compiled());
compiled.add(m_scroll_indicator_top->get_compiled());
compiled.add(m_scroll_indicator_bottom->get_compiled());
compiled.add(m_cancel_btn->get_compiled());
if (!m_cancel_only)
{
compiled.add(m_accept_btn->get_compiled());
if (m_deny_btn)
{
compiled.add(m_deny_btn->get_compiled());
}
}
compiled_resources = compiled;
}
return compiled_resources;
}
} // namespace overlays
} // namespace rsx
| 6,501
|
C++
|
.cpp
| 195
| 29.14359
| 104
| 0.656609
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,420
|
overlay_message.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_message.cpp
|
#include "stdafx.h"
#include "overlay_message.h"
#include "Emu/RSX/Common/time.hpp"
namespace rsx
{
namespace overlays
{
static u64 get_expiration_time(u64 duration)
{
if (duration == umax)
{
return duration;
}
return get_system_time() + duration;
}
template <typename T>
message_item::message_item(const T& msg_id, u64 expiration, std::shared_ptr<atomic_t<u32>> refs, std::shared_ptr<overlay_element> icon)
{
m_visible_duration = expiration;
m_refs = std::move(refs);
m_text.set_font("Arial", 14);
m_text.set_text(msg_id);
m_text.set_padding(4, 8, 4, 8);
m_text.auto_resize();
m_text.back_color.a = 0.f;
m_fade_in_animation.current = color4f(1.f, 1.f, 1.f, 0.f);
m_fade_in_animation.end = color4f(1.0f);
m_fade_in_animation.duration_sec = 1.f;
m_fade_in_animation.active = true;
m_fade_out_animation.current = color4f(1.f);
m_fade_out_animation.end = color4f(1.f, 1.f, 1.f, 0.f);
m_fade_out_animation.duration_sec = 1.f;
m_fade_out_animation.active = false;
back_color = color4f(0.25f, 0.25f, 0.25f, 0.85f);
if (icon)
{
m_icon = icon;
m_icon->set_pos(m_text.x + m_text.w + 8, m_text.y);
set_size(m_margin + m_text.w + m_icon->w + m_margin, m_margin + std::max(m_text.h, m_icon->h) + m_margin);
}
else
{
set_size(m_text.w + m_margin + m_margin, m_text.h + m_margin + m_margin);
}
}
template message_item::message_item(const std::string& msg_id, u64, std::shared_ptr<atomic_t<u32>>, std::shared_ptr<overlay_element>);
template message_item::message_item(const localized_string_id& msg_id, u64, std::shared_ptr<atomic_t<u32>>, std::shared_ptr<overlay_element>);
void message_item::reset_expiration()
{
m_expiration_time = get_expiration_time(m_visible_duration);
}
u64 message_item::get_expiration() const
{
// If reference counting is enabled and reached 0 consider it expired
return m_refs && *m_refs == 0 ? 0 : m_expiration_time;
}
void message_item::ensure_expired()
{
// If reference counting is enabled and reached 0 consider it expired
if (m_refs)
{
*m_refs = 0;
}
}
bool message_item::text_matches(const std::u32string& text) const
{
return m_text.text == text;
}
void message_item::set_pos(s16 _x, s16 _y)
{
rounded_rect::set_pos(_x, _y);
m_text.set_pos(_x + m_margin, y + m_margin);
if (m_icon)
{
m_icon->set_pos(m_icon->x, m_text.y);
}
}
compiled_resource& message_item::get_compiled()
{
if (!m_processed)
{
compiled_resources = {};
return compiled_resources;
}
// Disable caching
is_compiled = false;
compiled_resources = rounded_rect::get_compiled();
compiled_resources.add(m_text.get_compiled());
if (m_icon)
{
compiled_resources.add(m_icon->get_compiled());
}
auto& current_animation = m_fade_in_animation.active
? m_fade_in_animation
: m_fade_out_animation;
current_animation.apply(compiled_resources);
return compiled_resources;
}
void message_item::update(usz index, u64 timestamp_us, s16 x_offset, s16 y_offset)
{
if (m_cur_pos != index)
{
m_cur_pos = index;
set_pos(x_offset, y_offset);
}
if (!m_processed)
{
m_expiration_time = get_expiration_time(m_visible_duration);
}
if (m_fade_in_animation.active)
{
// We are fading in.
m_fade_in_animation.update(timestamp_us);
}
else if (timestamp_us + u64(m_fade_out_animation.duration_sec * 1'000'000) > get_expiration())
{
// We are fading out.
// Only activate the animation if the message hasn't expired yet (prevents glitches afterwards).
if (timestamp_us <= get_expiration())
{
m_fade_out_animation.active = true;
}
m_fade_out_animation.update(timestamp_us);
}
else if (m_fade_out_animation.active)
{
// We are fading out, but the expiration was extended.
// Reset the fade in animation to the state of the fade out animation to prevent opacity pop.
const f32 fade_out_progress = static_cast<f32>(m_fade_out_animation.get_remaining_duration_us(timestamp_us)) / static_cast<f32>(m_fade_out_animation.get_total_duration_us());
const u64 fade_in_us_done = u64(fade_out_progress * m_fade_in_animation.get_total_duration_us());
m_fade_in_animation.reset(timestamp_us - fade_in_us_done);
m_fade_in_animation.active = true;
m_fade_in_animation.update(timestamp_us);
// Reset the fade out animation.
m_fade_out_animation.reset();
}
m_processed = true;
}
void message::update_queue(std::deque<message_item>& vis_set, std::deque<message_item>& ready_set, message_pin_location origin)
{
const u64 cur_time = get_system_time();
for (auto it = vis_set.begin(); it != vis_set.end();)
{
if (it->get_expiration() < cur_time)
{
// Enusre reference counter is updated on timeout
it->ensure_expired();
it = vis_set.erase(it);
}
else
{
it++;
}
}
while (vis_set.size() < max_visible_items && !ready_set.empty())
{
vis_set.emplace_back(std::move(ready_set.front()));
ready_set.pop_front();
}
if (vis_set.empty())
{
return;
}
// Render reversed list. Oldest entries are furthest from the border
constexpr u16 spacing = 4;
s16 x_offset = 10;
s16 y_offset = 8;
usz index = 0;
for (auto it = vis_set.rbegin(); it != vis_set.rend(); ++it, ++index)
{
switch (origin)
{
case message_pin_location::bottom_right:
y_offset += (spacing + it->h);
it->update(index, cur_time, virtual_width - x_offset - it->w, virtual_height - y_offset);
break;
case message_pin_location::bottom_left:
y_offset += (spacing + it->h);
it->update(index, cur_time, x_offset, virtual_height - y_offset);
break;
case message_pin_location::top_right:
it->update(index, cur_time, virtual_width - x_offset - it->w, y_offset);
y_offset += (spacing + it->h);
break;
case message_pin_location::top_left:
it->update(index, cur_time, x_offset, y_offset);
y_offset += (spacing + it->h);
break;
}
}
}
void message::update(u64 /*timestamp_us*/)
{
if (!visible)
{
return;
}
std::lock_guard lock(m_mutex_queue);
update_queue(m_visible_items_bottom_right, m_ready_queue_bottom_right, message_pin_location::bottom_right);
update_queue(m_visible_items_bottom_left, m_ready_queue_bottom_left, message_pin_location::bottom_left);
update_queue(m_visible_items_top_right, m_ready_queue_top_right, message_pin_location::top_right);
update_queue(m_visible_items_top_left, m_ready_queue_top_left, message_pin_location::top_left);
visible = !m_visible_items_bottom_right.empty() || !m_visible_items_bottom_left.empty() ||
!m_visible_items_top_right.empty() || !m_visible_items_top_left.empty();
}
compiled_resource message::get_compiled()
{
if (!visible)
{
return {};
}
std::lock_guard lock(m_mutex_queue);
compiled_resource cr{};
for (auto& item : m_visible_items_bottom_right)
{
cr.add(item.get_compiled());
}
for (auto& item : m_visible_items_bottom_left)
{
cr.add(item.get_compiled());
}
for (auto& item : m_visible_items_top_right)
{
cr.add(item.get_compiled());
}
for (auto& item : m_visible_items_top_left)
{
cr.add(item.get_compiled());
}
return cr;
}
bool message::message_exists(message_pin_location location, localized_string_id id, bool allow_refresh)
{
return message_exists(location, get_localized_u32string(id), allow_refresh);
}
bool message::message_exists(message_pin_location location, const std::string& msg, bool allow_refresh)
{
return message_exists(location, utf8_to_u32string(msg), allow_refresh);
}
bool message::message_exists(message_pin_location location, const std::u32string& msg, bool allow_refresh)
{
auto check_list = [&](std::deque<message_item>& list)
{
return std::any_of(list.begin(), list.end(), [&](message_item& item)
{
if (item.text_matches(msg))
{
if (allow_refresh)
{
item.reset_expiration();
}
return true;
}
return false;
});
};
switch (location)
{
case message_pin_location::bottom_right:
return check_list(m_ready_queue_bottom_right) || check_list(m_visible_items_bottom_right);
case message_pin_location::bottom_left:
return check_list(m_ready_queue_bottom_left) || check_list(m_visible_items_bottom_left);
case message_pin_location::top_right:
return check_list(m_ready_queue_top_right) || check_list(m_visible_items_top_right);
case message_pin_location::top_left:
return check_list(m_ready_queue_top_left) || check_list(m_visible_items_top_left);
}
return false;
}
void refresh_message_queue()
{
if (auto manager = g_fxo->try_get<rsx::overlays::display_manager>())
{
if (auto msg_overlay = manager->get<rsx::overlays::message>())
{
msg_overlay->refresh();
}
}
}
} // namespace overlays
} // namespace rsx
| 9,098
|
C++
|
.cpp
| 281
| 27.975089
| 178
| 0.659706
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,421
|
overlay_media_list_dialog.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_media_list_dialog.cpp
|
#include "stdafx.h"
#include "overlays.h"
#include "overlay_manager.h"
#include "overlay_media_list_dialog.h"
#include "Emu/Cell/Modules/cellMusic.h"
#include "Emu/System.h"
#include "Emu/VFS.h"
#include "Utilities/StrUtil.h"
#include "Utilities/Thread.h"
namespace rsx
{
namespace overlays
{
media_list_dialog::media_list_entry::media_list_entry(const media_list_dialog::media_entry& entry)
{
std::unique_ptr<overlay_element> image = std::make_unique<image_view>();
image->set_size(160, 110);
image->set_padding(36, 36, 11, 11); // Square image, 88x88
switch (entry.type)
{
case media_list_dialog::media_type::audio:
{
// TODO: use thumbnail or proper icon
static_cast<image_view*>(image.get())->set_image_resource(resource_config::standard_image_resource::new_entry);
break;
}
case media_list_dialog::media_type::video:
{
// TODO: use thumbnail or proper icon
static_cast<image_view*>(image.get())->set_image_resource(resource_config::standard_image_resource::new_entry);
break;
}
case media_list_dialog::media_type::photo:
{
if (fs::exists(entry.info.path))
{
// Fit the new image into the available space
if (entry.info.width > 0 && entry.info.height > 0)
{
const u16 target_width = image->w - (image->padding_left + image->padding_right);
const u16 target_height = image->h - (image->padding_top + image->padding_bottom);
const f32 target_ratio = target_width / static_cast<f32>(target_height);
const f32 image_ratio = entry.info.width / static_cast<f32>(entry.info.height);
const f32 convert_ratio = image_ratio / target_ratio;
if (convert_ratio > 1.0f)
{
const u16 new_padding = static_cast<u16>(target_height - target_height / convert_ratio) / 2;
image->set_padding(image->padding_left, image->padding_right, new_padding + image->padding_top, new_padding + image->padding_bottom);
}
else if (convert_ratio < 1.0f)
{
const u16 new_padding = static_cast<u16>(target_width - target_width * convert_ratio) / 2;
image->set_padding(image->padding_left + new_padding, image->padding_right + new_padding, image->padding_top, image->padding_bottom);
}
}
icon_data = std::make_unique<image_info>(entry.info.path.c_str());
static_cast<image_view*>(image.get())->set_raw_image(icon_data.get());
}
else
{
// Fallback
// TODO: use proper icon
static_cast<image_view*>(image.get())->set_image_resource(resource_config::standard_image_resource::new_entry);
}
break;
}
case media_list_dialog::media_type::directory:
{
static_cast<image_view*>(image.get())->set_image_resource(resource_config::standard_image_resource::save);
break;
}
case media_list_dialog::media_type::invalid:
fmt::throw_exception("Unexpected media type");
}
char title[384]{}; // CELL_SEARCH_TITLE_LEN_MAX
char artist[384]{}; // CELL_SEARCH_TITLE_LEN_MAX
if (entry.type == media_type::directory)
{
strcpy_trunc(title, entry.name);
}
else
{
utils::parse_metadata(title, entry.info, "title", entry.name.substr(0, entry.name.find_last_of('.')), 384); // CELL_SEARCH_TITLE_LEN_MAX
utils::parse_metadata(artist, entry.info, "artist", "Unknown Artist", 384); // CELL_SEARCH_TITLE_LEN_MAX
}
std::unique_ptr<overlay_element> text_stack = std::make_unique<vertical_layout>();
std::unique_ptr<overlay_element> padding = std::make_unique<spacer>();
std::unique_ptr<overlay_element> header_text = std::make_unique<label>(title);
std::unique_ptr<overlay_element> subtext = std::make_unique<label>(artist);
padding->set_size(1, 1);
header_text->set_size(800, 40);
header_text->set_font("Arial", 16);
header_text->set_wrap_text(true);
subtext->set_size(800, 0);
subtext->set_font("Arial", 14);
subtext->set_wrap_text(true);
static_cast<label*>(subtext.get())->auto_resize(true);
// Make back color transparent for text
header_text->back_color.a = 0.f;
subtext->back_color.a = 0.f;
static_cast<vertical_layout*>(text_stack.get())->pack_padding = 5;
static_cast<vertical_layout*>(text_stack.get())->add_element(padding);
static_cast<vertical_layout*>(text_stack.get())->add_element(header_text);
static_cast<vertical_layout*>(text_stack.get())->add_element(subtext);
if (text_stack->h > image->h)
{
std::unique_ptr<overlay_element> padding2 = std::make_unique<spacer>();
padding2->set_size(1, 5);
static_cast<vertical_layout*>(text_stack.get())->add_element(padding2);
}
// Pack
pack_padding = 15;
add_element(image);
add_element(text_stack);
}
media_list_dialog::media_list_dialog()
{
m_dim_background = std::make_unique<overlay_element>();
m_dim_background->set_size(virtual_width, virtual_height);
m_dim_background->back_color.a = 0.5f;
m_description = std::make_unique<label>();
m_description->set_font("Arial", 20);
m_description->set_pos(20, 37);
m_description->set_text("Select media"); // Fallback. I don't think this will ever be used, so I won't localize it.
m_description->auto_resize();
m_description->back_color.a = 0.f;
}
void media_list_dialog::on_button_pressed(pad_button button_press, bool is_auto_repeat)
{
bool play_cursor_sound = true;
switch (button_press)
{
case pad_button::cross:
if (m_no_media_text)
break;
return_code = m_list->get_selected_index();
m_stop_input_loop = true;
play_cursor_sound = false;
Emu.GetCallbacks().play_sound(fs::get_config_dir() + "sounds/snd_decide.wav");
break;
case pad_button::circle:
return_code = selection_code::canceled;
m_stop_input_loop = true;
play_cursor_sound = false;
Emu.GetCallbacks().play_sound(fs::get_config_dir() + "sounds/snd_cancel.wav");
break;
case pad_button::dpad_up:
m_list->select_previous();
break;
case pad_button::dpad_down:
m_list->select_next();
break;
case pad_button::L1:
m_list->select_previous(10);
break;
case pad_button::R1:
m_list->select_next(10);
break;
default:
rsx_log.trace("[ui] Button %d pressed", static_cast<u8>(button_press));
break;
}
// Play a sound unless this is a fast auto repeat which would induce a nasty noise
if (play_cursor_sound && (!is_auto_repeat || m_auto_repeat_ms_interval >= m_auto_repeat_ms_interval_default))
{
Emu.GetCallbacks().play_sound(fs::get_config_dir() + "sounds/snd_cursor.wav");
}
}
compiled_resource media_list_dialog::get_compiled()
{
if (!visible)
{
return {};
}
compiled_resource result;
result.add(m_dim_background->get_compiled());
result.add(m_list->get_compiled());
result.add(m_description->get_compiled());
if (m_no_media_text)
result.add(m_no_media_text->get_compiled());
return result;
}
s32 media_list_dialog::show(media_entry* root, media_entry& result, const std::string& title, u32 focused, bool enable_overlay)
{
auto ref = g_fxo->get<display_manager>().get(uid);
m_media = root;
result = {};
if (enable_overlay)
{
m_dim_background->back_color.a = 0.9f;
}
else
{
m_dim_background->back_color.a = 0.5f;
}
while (thread_ctrl::state() != thread_state::aborting && return_code >= 0 && m_media && m_media->type == media_list_dialog::media_type::directory)
{
reload(title, focused);
m_stop_input_loop = false;
if (const auto error = run_input_loop())
{
if (error != selection_code::canceled)
{
rsx_log.error("Media list dialog input loop exited with error code=%d", error);
}
return error;
}
if (return_code >= 0)
{
focused = 0;
ensure(static_cast<size_t>(return_code) < m_media->children.size());
m_media = &m_media->children[return_code];
rsx_log.notice("Media dialog: selected entry: %d ('%s')", return_code, m_media->path);
continue;
}
if (return_code == user_interface::selection_code::canceled)
{
if (m_media == root)
{
rsx_log.notice("Media list dialog canceled");
break;
}
focused = m_media->index;
m_media = m_media->parent;
return_code = 0;
rsx_log.notice("Media list dialog moving to parent directory (focused=%d)", focused);
continue;
}
rsx_log.error("Left media list dialog with error: %d", return_code);
break;
}
m_interactive = false; // KLUDGE: Set interactive to false in order to stop pad interaction properly in close.
close(false, true);
if (return_code >= 0 && m_media && m_media->type != media_list_dialog::media_type::directory)
{
result = *m_media;
rsx_log.notice("Left media list dialog: return_code=%d, type=%d, path=%s", return_code, static_cast<s32>(result.type), result.info.path);
}
return return_code;
}
void media_list_dialog::reload(const std::string& title, u32 focused)
{
ensure(m_media);
rsx_log.notice("Media dialog: showing entry '%s' ('%s')", m_media->name, m_media->path);
if (m_list)
{
status_flags |= status_bits::invalidate_image_cache;
}
m_list = std::make_unique<list_view>(virtual_width - 2 * 20, 540);
m_list->set_pos(20, 85);
for (const media_entry& child : m_media->children)
{
std::unique_ptr<overlay_element> entry = std::make_unique<media_list_entry>(child);
m_list->add_entry(entry);
}
if (m_list->m_items.empty())
{
m_no_media_text = std::make_unique<label>(get_localized_string(localized_string_id::RSX_OVERLAYS_MEDIA_DIALOG_EMPTY));
m_no_media_text->set_font("Arial", 20);
m_no_media_text->align_text(overlay_element::text_align::center);
m_no_media_text->set_pos(m_list->x, m_list->y + m_list->h / 2);
m_no_media_text->set_size(m_list->w, 30);
m_no_media_text->back_color.a = 0;
m_list->set_cancel_only(true);
}
else
{
// Only select an entry if there are entries available
m_list->select_entry(focused);
}
m_description->set_text(title);
m_description->auto_resize();
visible = true;
}
struct media_list_dialog_thread
{
static constexpr auto thread_name = "MediaList Thread"sv;
};
void parse_media_recursive(u32 depth, const std::string& media_path, const std::string& name, media_list_dialog::media_type type, media_list_dialog::media_entry& current_entry)
{
if (depth++ > music_selection_context::max_depth)
{
return;
}
if (fs::is_dir(media_path))
{
for (auto&& dir_entry : fs::dir{media_path})
{
if (dir_entry.name == "." || dir_entry.name == "..")
{
continue;
}
const std::string unescaped_name = vfs::unescape(dir_entry.name);
media_list_dialog::media_entry new_entry{};
parse_media_recursive(depth, media_path + "/" + dir_entry.name, unescaped_name, type, new_entry);
if (new_entry.type != media_list_dialog::media_type::invalid)
{
new_entry.parent = ¤t_entry;
new_entry.index = ::narrow<u32>(current_entry.children.size());
current_entry.children.emplace_back(std::move(new_entry));
}
}
// Only keep directories that contain valid entries
if (current_entry.children.empty())
{
rsx_log.notice("parse_media_recursive: No matches in directory '%s'", media_path);
}
else
{
rsx_log.notice("parse_media_recursive: Found %d matches in directory '%s'", current_entry.children.size(), media_path);
current_entry.type = media_list_dialog::media_type::directory;
current_entry.info.path = media_path;
}
}
else
{
// Try to peek into the file
const s32 av_media_type = type == media_list_dialog::media_type::photo ? -1
: type == media_list_dialog::media_type::video ? 0 /*AVMEDIA_TYPE_VIDEO*/
: 1 /*AVMEDIA_TYPE_AUDIO*/;
auto [success, info] = utils::get_media_info(media_path, av_media_type);
if (success)
{
current_entry.type = type;
current_entry.info = std::move(info);
rsx_log.notice("parse_media_recursive: Found media '%s'", media_path);
}
}
if (current_entry.type != media_list_dialog::media_type::invalid)
{
current_entry.path = media_path;
current_entry.name = name;
}
}
error_code show_media_list_dialog(media_list_dialog::media_type type, const std::string& path, const std::string& title, std::function<void(s32 status, utils::media_info info)> on_finished)
{
rsx_log.todo("show_media_list_dialog(type=%d, path='%s', title='%s', on_finished=%d)", static_cast<s32>(type), path, title, !!on_finished);
if (!on_finished)
{
return CELL_CANCEL;
}
g_fxo->get<named_thread<media_list_dialog_thread>>()([=]()
{
media_list_dialog::media_entry root_media_entry{};
root_media_entry.type = media_list_dialog::media_type::directory;
if (fs::is_dir(path))
{
parse_media_recursive(0, path, title, type, root_media_entry);
}
else
{
rsx_log.error("Media list: Failed to open path: '%s'", path);
}
media_list_dialog::media_entry media{};
s32 result = 0;
u32 focused = 0;
if (auto manager = g_fxo->try_get<rsx::overlays::display_manager>())
{
result = manager->create<rsx::overlays::media_list_dialog>()->show(&root_media_entry, media, title, focused, true);
}
else
{
result = user_interface::selection_code::canceled;
rsx_log.error("Media selection is only possible when the native user interface is enabled in the settings. The action will be canceled.");
}
if (result >= 0 && media.type == type)
{
on_finished(CELL_OK, media.info);
}
else
{
on_finished(result, {});
}
});
return CELL_OK;
}
} // namespace overlays
} // namespace RSX
| 13,851
|
C++
|
.cpp
| 378
| 31.775132
| 191
| 0.655882
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,422
|
overlay_friends_list_dialog.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/FriendsList/overlay_friends_list_dialog.cpp
|
#include "stdafx.h"
#include "../overlay_manager.h"
#include "overlay_friends_list_dialog.h"
#include "Emu/System.h"
#include "Emu/NP/rpcn_config.h"
#include "Emu/vfs_config.h"
namespace rsx
{
namespace overlays
{
void friend_callback(void* param, rpcn::NotificationType ntype, const std::string& username, bool status)
{
auto* dlg = static_cast<friends_list_dialog*>(param);
dlg->callback_handler(ntype, username, status);
}
friends_list_dialog::friends_list_entry::friends_list_entry(friends_list_dialog_page page, const std::string& username, const rpcn::friend_online_data& data)
{
std::unique_ptr<overlay_element> image = std::make_unique<image_view>();
image->set_size(160, 110);
image->set_padding(36, 36, 11, 11); // Square image, 88x88
std::string avatar_path = g_cfg_vfs.get_dev_flash() + "vsh/resource/explore/user/";
std::string text;
switch (page)
{
case friends_list_dialog_page::friends:
{
avatar_path += data.online ? "013.png" : "009.png";
text = get_localized_string(data.online ? localized_string_id::HOME_MENU_FRIENDS_STATUS_ONLINE : localized_string_id::HOME_MENU_FRIENDS_STATUS_OFFLINE);
if (data.online)
{
if (!data.pr_title.empty())
{
fmt::append(text, " - %s", data.pr_title);
}
if (!data.pr_status.empty())
{
fmt::append(text, " - %s", data.pr_status);
}
}
break;
}
case friends_list_dialog_page::invites:
{
// We use "online" to show whether an invite was sent or received
avatar_path += data.online ? "012.png" : "011.png";
text = get_localized_string(data.online ? localized_string_id::HOME_MENU_FRIENDS_REQUEST_RECEIVED : localized_string_id::HOME_MENU_FRIENDS_REQUEST_SENT);
break;
}
case friends_list_dialog_page::blocked:
{
avatar_path += "010.png";
text = get_localized_string(localized_string_id::HOME_MENU_FRIENDS_STATUS_BLOCKED);
break;
}
}
if (fs::exists(avatar_path))
{
icon_data = std::make_unique<image_info>(avatar_path.c_str());
static_cast<image_view*>(image.get())->set_raw_image(icon_data.get());
}
else
{
// Fallback
// TODO: use proper icon
static_cast<image_view*>(image.get())->set_image_resource(resource_config::standard_image_resource::square);
}
std::unique_ptr<overlay_element> text_stack = std::make_unique<vertical_layout>();
std::unique_ptr<overlay_element> padding = std::make_unique<spacer>();
std::unique_ptr<overlay_element> header_text = std::make_unique<label>(username);
std::unique_ptr<overlay_element> subtext = std::make_unique<label>(text);
padding->set_size(1, 1);
header_text->set_size(800, 40);
header_text->set_font("Arial", 16);
header_text->set_wrap_text(true);
subtext->set_size(800, 0);
subtext->set_font("Arial", 14);
subtext->set_wrap_text(true);
static_cast<label*>(subtext.get())->auto_resize(true);
// Make back color transparent for text
header_text->back_color.a = 0.f;
subtext->back_color.a = 0.f;
static_cast<vertical_layout*>(text_stack.get())->pack_padding = 5;
static_cast<vertical_layout*>(text_stack.get())->add_element(padding);
static_cast<vertical_layout*>(text_stack.get())->add_element(header_text);
static_cast<vertical_layout*>(text_stack.get())->add_element(subtext);
if (text_stack->h > image->h)
{
std::unique_ptr<overlay_element> padding2 = std::make_unique<spacer>();
padding2->set_size(1, 5);
static_cast<vertical_layout*>(text_stack.get())->add_element(padding2);
}
// Pack
this->pack_padding = 15;
add_element(image);
add_element(text_stack);
}
friends_list_dialog::friends_list_dialog()
: m_page_btn(120, 30)
, m_extra_btn(120, 30)
{
m_dim_background = std::make_unique<overlay_element>();
m_dim_background->set_size(virtual_width, virtual_height);
m_dim_background->back_color.a = 0.5f;
m_list = std::make_unique<list_view>(virtual_width - 2 * 20, 540);
m_list->set_pos(20, 85);
m_message_box = std::make_shared<home_menu_message_box>(20, 85, virtual_width - 2 * 20, 540);
m_description = std::make_unique<label>();
m_description->set_font("Arial", 20);
m_description->set_pos(20, 37);
m_description->set_text("Select user"); // Fallback. I don't think this will ever be used, so I won't localize it.
m_description->auto_resize();
m_description->back_color.a = 0.f;
fade_animation.duration_sec = 0.15f;
return_code = selection_code::canceled;
m_extra_btn.set_image_resource(resource_config::standard_image_resource::triangle);
m_extra_btn.set_pos(330, m_list->y + m_list->h + 20);
m_extra_btn.set_text("");
m_extra_btn.set_font("Arial", 16);
m_page_btn.set_image_resource(resource_config::standard_image_resource::square);
m_page_btn.set_pos(m_list->x + m_list->w - (30 + 120), m_list->y + m_list->h + 20);
m_page_btn.set_text(get_localized_string(localized_string_id::HOME_MENU_FRIENDS_NEXT_LIST));
m_page_btn.set_font("Arial", 16);
}
void friends_list_dialog::update(u64 timestamp_us)
{
if (fade_animation.active)
{
fade_animation.update(timestamp_us);
}
}
void friends_list_dialog::on_button_pressed(pad_button button_press, bool is_auto_repeat)
{
if (fade_animation.active) return;
if (m_message_box && m_message_box->visible())
{
const page_navigation navigation = m_message_box->handle_button_press(button_press);
if (navigation != page_navigation::stay)
{
m_message_box->hide();
refresh();
}
return;
}
std::lock_guard lock(m_list_mutex);
bool close_dialog = false;
switch (button_press)
{
case pad_button::cross:
case pad_button::triangle:
{
if (!m_list || m_list->m_items.empty())
break;
if (button_press == pad_button::triangle && m_current_page != friends_list_dialog_page::invites)
break;
const usz index = static_cast<usz>(m_list->get_selected_index());
switch (m_current_page)
{
case friends_list_dialog_page::friends:
{
// Get selected user
usz user_index = 0;
std::string selected_username;
for (const auto& [username, data] : m_friend_data.friends)
{
if (data.online && user_index++ == index)
{
selected_username = username;
break;
}
}
for (const auto& [username, data] : m_friend_data.friends)
{
if (!selected_username.empty())
break;
if (!data.online && user_index++ == index)
{
selected_username = username;
break;
}
}
if (!selected_username.empty() && m_message_box && !m_message_box->visible())
{
m_message_box->show(get_localized_string(localized_string_id::HOME_MENU_FRIENDS_REMOVE_USER_MSG, selected_username.c_str()), [this, selected_username]()
{
m_rpcn->remove_friend(selected_username);
});
refresh();
}
break;
}
case friends_list_dialog_page::invites:
{
// Get selected user
usz user_index = 0;
std::string selected_username;
for (const std::string& username : m_friend_data.requests_received)
{
if (user_index == index)
{
selected_username = username;
break;
}
user_index++;
}
for (const std::string& username : m_friend_data.requests_sent)
{
if (!selected_username.empty())
break;
if (user_index == index)
{
selected_username = username;
break;
}
user_index++;
}
if (!selected_username.empty() && m_message_box && !m_message_box->visible())
{
if (user_index < m_friend_data.requests_received.size())
{
if (button_press == pad_button::triangle)
{
m_message_box->show(get_localized_string(localized_string_id::HOME_MENU_FRIENDS_REJECT_REQUEST_MSG, selected_username.c_str()), [this, selected_username]()
{
m_rpcn->remove_friend(selected_username);
});
}
else
{
m_message_box->show(get_localized_string(localized_string_id::HOME_MENU_FRIENDS_ACCEPT_REQUEST_MSG, selected_username.c_str()), [this, selected_username]()
{
m_rpcn->add_friend(selected_username);
});
}
}
else
{
m_message_box->show(get_localized_string(localized_string_id::HOME_MENU_FRIENDS_CANCEL_REQUEST_MSG, selected_username.c_str()), [this, selected_username]()
{
m_rpcn->remove_friend(selected_username);
});
}
refresh();
}
break;
}
case friends_list_dialog_page::blocked:
{
// Get selected user
usz user_index = 0;
std::string selected_username;
for (const std::string& username : m_friend_data.blocked)
{
if (user_index++ == index)
{
selected_username = username;
break;
}
}
if (!selected_username.empty() && m_message_box && !m_message_box->visible())
{
m_message_box->show(get_localized_string(localized_string_id::HOME_MENU_FRIENDS_UNBLOCK_USER_MSG, selected_username.c_str()), []()
{
// TODO
});
refresh();
}
break;
}
}
Emu.GetCallbacks().play_sound(fs::get_config_dir() + "sounds/snd_decide.wav");
return;
}
case pad_button::circle:
Emu.GetCallbacks().play_sound(fs::get_config_dir() + "sounds/snd_cancel.wav");
close_dialog = true;
break;
case pad_button::square:
switch (m_current_page)
{
case friends_list_dialog_page::friends: m_current_page = friends_list_dialog_page::invites; break;
case friends_list_dialog_page::invites: m_current_page = friends_list_dialog_page::blocked; break;
case friends_list_dialog_page::blocked: m_current_page = friends_list_dialog_page::friends; break;
}
m_list_dirty = true;
break;
case pad_button::dpad_up:
case pad_button::ls_up:
if (m_list)
m_list->select_previous();
break;
case pad_button::dpad_down:
case pad_button::ls_down:
if (m_list)
m_list->select_next();
break;
case pad_button::L1:
if (m_list)
m_list->select_previous(10);
break;
case pad_button::R1:
if (m_list)
m_list->select_next(10);
break;
default:
rsx_log.trace("[ui] Button %d pressed", static_cast<u8>(button_press));
break;
}
if (close_dialog)
{
fade_animation.current = color4f(1.f);
fade_animation.end = color4f(0.f);
fade_animation.active = true;
fade_animation.on_finish = [this]
{
close(true, true);
};
}
// Play a sound unless this is a fast auto repeat which would induce a nasty noise
else if (!is_auto_repeat || m_auto_repeat_ms_interval >= m_auto_repeat_ms_interval_default)
{
Emu.GetCallbacks().play_sound(fs::get_config_dir() + "sounds/snd_cursor.wav");
}
}
compiled_resource friends_list_dialog::get_compiled()
{
if (!visible)
{
return {};
}
compiled_resource result;
result.add(m_dim_background->get_compiled());
{
std::lock_guard lock(m_list_mutex);
if (m_list_dirty.exchange(false))
{
if (m_current_page != m_last_page)
{
localized_string_id title_id = localized_string_id::INVALID;
switch (m_current_page)
{
case friends_list_dialog_page::friends: title_id = localized_string_id::HOME_MENU_FRIENDS; break;
case friends_list_dialog_page::invites: title_id = localized_string_id::HOME_MENU_FRIENDS_REQUESTS; break;
case friends_list_dialog_page::blocked: title_id = localized_string_id::HOME_MENU_FRIENDS_BLOCKED; break;
}
m_description->set_text(get_localized_string(title_id));
m_description->auto_resize();
}
reload();
m_last_page.store(m_current_page);
}
if (m_message_box && m_message_box->visible())
{
result.add(m_message_box->get_compiled());
}
else
{
if (m_list)
{
result.add(m_list->get_compiled());
if (!m_list->m_items.empty() && m_current_page == friends_list_dialog_page::invites)
{
// Get selected user
const usz index = static_cast<usz>(m_list->get_selected_index());
if (index < m_friend_data.requests_received.size())
{
m_extra_btn.set_text(get_localized_string(localized_string_id::HOME_MENU_FRIENDS_REJECT_REQUEST));
result.add(m_extra_btn.get_compiled());
}
}
}
result.add(m_page_btn.get_compiled());
}
}
result.add(m_description->get_compiled());
fade_animation.apply(result);
return result;
}
void friends_list_dialog::callback_handler(rpcn::NotificationType ntype, const std::string& /*username*/, bool /*status*/)
{
switch (ntype)
{
case rpcn::NotificationType::FriendNew: // Add a friend to the friendlist(either accepted a friend request or friend accepted it)
case rpcn::NotificationType::FriendStatus: // Set status of friend to Offline or Online
case rpcn::NotificationType::FriendQuery: // Other user sent a friend request
case rpcn::NotificationType::FriendLost: // Remove friend from the friendlist(user removed friend or friend removed friend)
case rpcn::NotificationType::FriendPresenceChanged:
{
m_list_dirty = true;
break;
}
default:
{
rsx_log.fatal("An unhandled notification type was received by the RPCN friends dialog callback!");
break;
}
}
}
void friends_list_dialog::reload()
{
std::vector<std::unique_ptr<overlay_element>> entries;
std::string selected_user;
s32 selected_index = 0;
// Get selected user name
if (m_list && m_current_page == m_last_page)
{
const s32 old_index = m_list->get_selected_index();
s32 i = 0;
switch (m_current_page)
{
case friends_list_dialog_page::friends:
{
for (const auto& [username, data] : m_friend_data.friends)
{
if (i++ == old_index)
{
selected_user = username;
break;
}
}
break;
}
case friends_list_dialog_page::invites:
{
for (const std::string& username : m_friend_data.requests_received)
{
if (i++ == old_index)
{
selected_user = username;
break;
}
}
for (const std::string& username : m_friend_data.requests_sent)
{
if (!selected_user.empty())
break;
if (i++ == old_index)
{
selected_user = username;
break;
}
}
break;
}
case friends_list_dialog_page::blocked:
{
for (const std::string& username : m_friend_data.blocked)
{
if (i++ == old_index)
{
selected_user = username;
break;
}
}
break;
}
}
}
if (auto res = m_rpcn->wait_for_connection(); res != rpcn::rpcn_state::failure_no_failure)
{
rsx_log.error("Failed to connect to RPCN: %s", rpcn::rpcn_state_to_string(res));
status_flags |= status_bits::invalidate_image_cache;
m_list.reset();
return;
}
if (auto res = m_rpcn->wait_for_authentified(); res != rpcn::rpcn_state::failure_no_failure)
{
rsx_log.error("Failed to authentify to RPCN: %s", rpcn::rpcn_state_to_string(res));
status_flags |= status_bits::invalidate_image_cache;
m_list.reset();
return;
}
// Get friends, setup callback and setup comboboxes
m_rpcn->get_friends(m_friend_data);
switch (m_current_page)
{
case friends_list_dialog_page::friends:
{
// Sort users by online status
std::vector<std::pair<const std::string&, const rpcn::friend_online_data&>> friends_online;
std::vector<std::pair<const std::string&, const rpcn::friend_online_data&>> friends_offline;
for (const auto& [username, data] : m_friend_data.friends)
{
if (data.online)
{
friends_online.push_back({ username, data });
}
else
{
friends_offline.push_back({ username, data });
}
}
// Add users and try to find the old selected user again
for (const auto& [username, data] : friends_online)
{
if (username == selected_user)
{
selected_index = ::size32(entries);
}
std::unique_ptr<overlay_element> entry = std::make_unique<friends_list_entry>(m_current_page, username, data);
entries.emplace_back(std::move(entry));
}
for (const auto& [username, data] : friends_offline)
{
if (username == selected_user)
{
selected_index = ::size32(entries);
}
std::unique_ptr<overlay_element> entry = std::make_unique<friends_list_entry>(m_current_page, username, data);
entries.emplace_back(std::move(entry));
}
break;
}
case friends_list_dialog_page::invites:
{
for (const std::string& username : m_friend_data.requests_received)
{
if (username == selected_user)
{
selected_index = ::size32(entries);
}
std::unique_ptr<overlay_element> entry = std::make_unique<friends_list_entry>(m_current_page, username, rpcn::friend_online_data(true, 0));
entries.emplace_back(std::move(entry));
}
for (const std::string& username : m_friend_data.requests_sent)
{
if (username == selected_user)
{
selected_index = ::size32(entries);
}
std::unique_ptr<overlay_element> entry = std::make_unique<friends_list_entry>(m_current_page, username, rpcn::friend_online_data(false, 0));
entries.emplace_back(std::move(entry));
}
break;
}
case friends_list_dialog_page::blocked:
{
for (const std::string& username : m_friend_data.blocked)
{
if (username == selected_user)
{
selected_index = ::size32(entries);
}
std::unique_ptr<overlay_element> entry = std::make_unique<friends_list_entry>(m_current_page, username, rpcn::friend_online_data(false, 0));
entries.emplace_back(std::move(entry));
}
break;
}
}
// Recreate list
if (m_list)
{
status_flags |= status_bits::invalidate_image_cache;
}
m_list = std::make_unique<list_view>(virtual_width - 2 * 20, 540);
m_list->set_pos(20, 85);
for (auto& entry : entries)
{
m_list->add_entry(entry);
}
if (!m_list->m_items.empty())
{
// Only select an entry if there are entries available
m_list->select_entry(selected_index);
}
}
error_code friends_list_dialog::show(bool enable_overlay, std::function<void(s32 status)> on_close)
{
visible = false;
if (enable_overlay)
{
m_dim_background->back_color.a = 0.9f;
}
else
{
m_dim_background->back_color.a = 0.5f;
}
g_cfg_rpcn.load(); // Ensures config is loaded even if rpcn is not running for simulated
m_rpcn = rpcn::rpcn_client::get_instance();
m_rpcn->register_friend_cb(friend_callback, this);
m_description->set_text(get_localized_string(localized_string_id::HOME_MENU_FRIENDS));
m_description->auto_resize();
fade_animation.current = color4f(0.f);
fade_animation.end = color4f(1.f);
fade_animation.active = true;
this->on_close = std::move(on_close);
visible = true;
const auto notify = std::make_shared<atomic_t<u32>>(0);
auto& overlayman = g_fxo->get<display_manager>();
overlayman.attach_thread_input(
uid, "Friends list dialog",
[notify]() { *notify = true; notify->notify_one(); }
);
while (!Emu.IsStopped() && !*notify)
{
notify->wait(0, atomic_wait_timeout{1'000'000});
}
return CELL_OK;
}
} // namespace overlays
} // namespace RSX
| 19,499
|
C++
|
.cpp
| 600
| 27.185
| 163
| 0.641392
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,423
|
shader_loading_dialog.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/Shaders/shader_loading_dialog.cpp
|
#include "stdafx.h"
#include "shader_loading_dialog.h"
#include "Emu/System.h"
#include "Emu/Cell/Modules/cellMsgDialog.h"
#include "util/asm.hpp"
namespace rsx
{
void shader_loading_dialog::create(const std::string& msg, const std::string& title)
{
dlg = Emu.GetCallbacks().get_msg_dialog();
if (dlg)
{
dlg->type.se_normal = true;
dlg->type.bg_invisible = true;
dlg->type.progress_bar_count = 2;
dlg->ProgressBarSetTaskbarIndex(-1); // -1 to combine all progressbars in the taskbar progress
dlg->on_close = [](s32 /*status*/)
{
Emu.CallFromMainThread([]()
{
rsx_log.notice("Aborted shader loading dialog");
Emu.Kill(false);
});
};
ref_cnt++;
Emu.CallFromMainThread([&]()
{
dlg->Create(msg, title);
ref_cnt--;
});
}
while (ref_cnt.load() && !Emu.IsStopped())
{
utils::pause();
}
}
void shader_loading_dialog::update_msg(u32 index, std::string msg)
{
if (!dlg)
{
return;
}
ref_cnt++;
Emu.CallFromMainThread([&, index, message = std::move(msg)]()
{
dlg->ProgressBarSetMsg(index, message);
ref_cnt--;
});
}
void shader_loading_dialog::inc_value(u32 index, u32 value)
{
if (!dlg)
{
return;
}
ref_cnt++;
Emu.CallFromMainThread([&, index, value]()
{
dlg->ProgressBarInc(index, value);
ref_cnt--;
});
}
void shader_loading_dialog::set_value(u32 index, u32 value)
{
if (!dlg)
{
return;
}
ref_cnt++;
Emu.CallFromMainThread([&, index, value]()
{
dlg->ProgressBarSetValue(index, value);
ref_cnt--;
});
}
void shader_loading_dialog::set_limit(u32 index, u32 limit)
{
if (!dlg)
{
return;
}
ref_cnt++;
Emu.CallFromMainThread([&, index, limit]()
{
dlg->ProgressBarSetLimit(index, limit);
ref_cnt--;
});
}
void shader_loading_dialog::refresh()
{
}
void shader_loading_dialog::close()
{
while (ref_cnt.load() && !Emu.IsStopped())
{
utils::pause();
}
}
}
| 1,958
|
C++
|
.cpp
| 99
| 16.545455
| 97
| 0.640217
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,424
|
shader_loading_dialog_native.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/Shaders/shader_loading_dialog_native.cpp
|
#include "stdafx.h"
#include "shader_loading_dialog_native.h"
#include "../overlay_manager.h"
#include "../overlay_message_dialog.h"
#include "../../GSRender.h"
#include "Emu/Cell/ErrorCodes.h"
namespace rsx
{
shader_loading_dialog_native::shader_loading_dialog_native(GSRender* ptr)
: owner(ptr)
{
}
void shader_loading_dialog_native::create(const std::string& msg, const std::string&/* title*/)
{
MsgDialogType type = {};
type.se_mute_on = true;
type.disable_cancel = true;
type.progress_bar_count = 2;
dlg = g_fxo->get<rsx::overlays::display_manager>().create<rsx::overlays::message_dialog>(true);
dlg->progress_bar_set_taskbar_index(-1);
dlg->show(false, msg, type, msg_dialog_source::shader_loading, [](s32 status)
{
if (status != CELL_OK)
{
rsx_log.notice("Aborted shader loading dialog");
Emu.Kill(false);
}
});
}
void shader_loading_dialog_native::update_msg(u32 index, std::string msg)
{
dlg->progress_bar_set_message(index, std::move(msg));
owner->flip({});
}
void shader_loading_dialog_native::inc_value(u32 index, u32 value)
{
dlg->progress_bar_increment(index, static_cast<f32>(value));
owner->flip({});
}
void shader_loading_dialog_native::set_value(u32 index, u32 value)
{
dlg->progress_bar_set_value(index, static_cast<f32>(value));
owner->flip({});
}
void shader_loading_dialog_native::set_limit(u32 index, u32 limit)
{
dlg->progress_bar_set_limit(index, limit);
owner->flip({});
}
void shader_loading_dialog_native::refresh()
{
dlg->refresh();
}
void shader_loading_dialog_native::close()
{
dlg->return_code = CELL_OK;
dlg->close(false, false);
}
}
| 1,663
|
C++
|
.cpp
| 59
| 25.627119
| 97
| 0.699687
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,425
|
overlay_home_menu_page.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/HomeMenu/overlay_home_menu_page.cpp
|
#include "stdafx.h"
#include "overlay_home_menu_page.h"
#include "Emu/System.h"
#include "Emu/system_config.h"
namespace rsx
{
namespace overlays
{
home_menu_page::home_menu_page(s16 x, s16 y, u16 width, u16 height, bool use_separators, home_menu_page* parent, const std::string& title)
: list_view(width, height, use_separators)
, parent(parent)
, title(title)
, m_save_btn(120, 30)
, m_discard_btn(120, 30)
{
if (parent)
{
m_message_box = parent->m_message_box;
m_config_changed = parent->m_config_changed;
}
m_save_btn.set_image_resource(resource_config::standard_image_resource::square);
m_discard_btn.set_image_resource(resource_config::standard_image_resource::triangle);
m_save_btn.set_pos(width - 2 * (30 + 120), height + 20);
m_discard_btn.set_pos(width - (30 + 120), height + 20);
m_save_btn.set_text(localized_string_id::HOME_MENU_SETTINGS_SAVE_BUTTON);
m_discard_btn.set_text(localized_string_id::HOME_MENU_SETTINGS_DISCARD_BUTTON);
m_save_btn.set_font("Arial", 16);
m_discard_btn.set_font("Arial", 16);
set_pos(x, y);
}
void home_menu_page::set_current_page(home_menu_page* page)
{
if (page)
{
is_current_page = false;
page->is_current_page = true;
rsx_log.notice("Home menu: changing current page from '%s' to '%s'", title, page->title);
}
}
home_menu_page* home_menu_page::get_current_page(bool include_this)
{
if (is_current_page)
{
if (include_this)
{
return this;
}
}
else
{
for (auto& page : m_pages)
{
if (page)
{
if (home_menu_page* p = page->get_current_page(true))
{
return p;
}
}
}
}
return nullptr;
}
void home_menu_page::add_page(std::shared_ptr<home_menu_page> page)
{
ensure(page);
std::unique_ptr<overlay_element> elem = std::make_unique<home_menu_entry>(page->title);
m_pages.push_back(page);
add_item(elem, [this, page](pad_button btn) -> page_navigation
{
if (btn != pad_button::cross) return page_navigation::stay;
rsx_log.notice("User selected '%s' in '%s'", page->title, title);
set_current_page(page.get());
return page_navigation::next;
});
}
void home_menu_page::add_item(std::unique_ptr<overlay_element>& element, std::function<page_navigation(pad_button)> callback)
{
m_callbacks.push_back(std::move(callback));
m_entries.push_back(std::move(element));
}
void home_menu_page::apply_layout(bool center_vertically)
{
// Center vertically if necessary
if (center_vertically)
{
usz total_height = 0;
for (auto& entry : m_entries)
{
total_height += entry->h;
}
if (total_height < h)
{
advance_pos = (h - ::narrow<u16>(total_height)) / 2;
}
}
for (auto& entry : m_entries)
{
add_entry(entry);
}
}
void home_menu_page::show_dialog(const std::string& text, std::function<void()> on_accept, std::function<void()> on_cancel)
{
if (m_message_box && !m_message_box->visible())
{
rsx_log.notice("home_menu_page::show_dialog: page='%s', text='%s'", title, text);
m_message_box->show(text, std::move(on_accept), std::move(on_cancel));
refresh();
}
}
page_navigation home_menu_page::handle_button_press(pad_button button_press, bool is_auto_repeat, u64 auto_repeat_interval_ms)
{
if (m_message_box && m_message_box->visible())
{
const page_navigation navigation = m_message_box->handle_button_press(button_press);
if (navigation != page_navigation::stay)
{
m_message_box->hide();
refresh();
}
return navigation;
}
if (home_menu_page* page = get_current_page(false))
{
return page->handle_button_press(button_press, is_auto_repeat, auto_repeat_interval_ms);
}
switch (button_press)
{
case pad_button::dpad_left:
case pad_button::dpad_right:
case pad_button::ls_left:
case pad_button::ls_right:
case pad_button::cross:
{
if (const usz index = static_cast<usz>(get_selected_index()); index < m_callbacks.size())
{
if (const std::function<page_navigation(pad_button)>& func = ::at32(m_callbacks, index))
{
// Play a sound unless this is a fast auto repeat which would induce a nasty noise
if (!is_auto_repeat || auto_repeat_interval_ms >= user_interface::m_auto_repeat_ms_interval_default)
{
Emu.GetCallbacks().play_sound(fs::get_config_dir() + "sounds/snd_decide.wav");
}
return func(button_press);
}
}
break;
}
case pad_button::circle:
{
Emu.GetCallbacks().play_sound(fs::get_config_dir() + "sounds/snd_cancel.wav");
if (parent)
{
set_current_page(parent);
return page_navigation::back;
}
return page_navigation::exit;
}
case pad_button::triangle:
{
if (m_config_changed && *m_config_changed)
{
show_dialog(get_localized_string(localized_string_id::HOME_MENU_SETTINGS_DISCARD), [this]()
{
rsx_log.notice("home_menu_page: discarding settings...");
if (m_config_changed && *m_config_changed)
{
g_cfg.from_string(g_backup_cfg.to_string());
Emu.GetCallbacks().update_emu_settings();
*m_config_changed = false;
}
});
}
break;
}
case pad_button::square:
{
if (m_config_changed && *m_config_changed)
{
show_dialog(get_localized_string(localized_string_id::HOME_MENU_SETTINGS_SAVE), [this]()
{
rsx_log.notice("home_menu_page: saving settings...");
Emu.GetCallbacks().save_emu_settings();
if (m_config_changed)
{
*m_config_changed = false;
}
});
}
break;
}
case pad_button::dpad_up:
case pad_button::ls_up:
{
select_previous();
break;
}
case pad_button::dpad_down:
case pad_button::ls_down:
{
select_next();
break;
}
case pad_button::L1:
{
select_previous(10);
break;
}
case pad_button::R1:
{
select_next(10);
break;
}
default:
{
rsx_log.trace("[ui] Button %d pressed", static_cast<u8>(button_press));
break;
}
}
// Play a sound unless this is a fast auto repeat which would induce a nasty noise
if (!is_auto_repeat || auto_repeat_interval_ms >= user_interface::m_auto_repeat_ms_interval_default)
{
Emu.GetCallbacks().play_sound(fs::get_config_dir() + "sounds/snd_cursor.wav");
}
return page_navigation::stay;
}
void home_menu_page::translate(s16 _x, s16 _y)
{
list_view::translate(_x, _y);
m_save_btn.translate(_x, _y);
m_discard_btn.translate(_x, _y);
}
compiled_resource& home_menu_page::get_compiled()
{
if (!is_compiled || (m_message_box && !m_message_box->is_compiled))
{
is_compiled = false;
if (home_menu_page* page = get_current_page(false))
{
compiled_resources = page->get_compiled();
}
else
{
compiled_resources = list_view::get_compiled();
if (m_message_box && m_message_box->visible())
{
compiled_resources.add(m_message_box->get_compiled());
}
else if (m_config_changed && *m_config_changed)
{
compiled_resources.add(m_save_btn.get_compiled());
compiled_resources.add(m_discard_btn.get_compiled());
}
}
is_compiled = true;
}
return compiled_resources;
}
}
}
| 7,345
|
C++
|
.cpp
| 260
| 23.561538
| 140
| 0.63657
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,426
|
overlay_home_menu_main_menu.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/HomeMenu/overlay_home_menu_main_menu.cpp
|
#include "stdafx.h"
#include "overlay_home_menu_main_menu.h"
#include "overlay_home_menu_components.h"
#include "Emu/RSX/Overlays/FriendsList/overlay_friends_list_dialog.h"
#include "Emu/RSX/Overlays/overlay_manager.h"
#include "Emu/System.h"
#include "Emu/system_config.h"
extern atomic_t<bool> g_user_asked_for_recording;
extern atomic_t<bool> g_user_asked_for_screenshot;
extern bool boot_last_savestate(bool testing);
namespace rsx
{
namespace overlays
{
home_menu_main_menu::home_menu_main_menu(s16 x, s16 y, u16 width, u16 height, bool use_separators, home_menu_page* parent)
: home_menu_page(x, y, width, height, use_separators, parent, get_localized_string(localized_string_id::HOME_MENU_TITLE))
{
is_current_page = true;
m_message_box = std::make_shared<home_menu_message_box>(x, y, width, height);
m_config_changed = std::make_shared<bool>(g_backup_cfg.to_string() != g_cfg.to_string());
std::unique_ptr<overlay_element> resume = std::make_unique<home_menu_entry>(get_localized_string(localized_string_id::HOME_MENU_RESUME));
add_item(resume, [](pad_button btn) -> page_navigation
{
if (btn != pad_button::cross) return page_navigation::stay;
rsx_log.notice("User selected resume in home menu");
return page_navigation::exit;
});
add_page(std::make_shared<home_menu_settings>(x, y, width, height, use_separators, this));
std::unique_ptr<overlay_element> friends = std::make_unique<home_menu_entry>(get_localized_string(localized_string_id::HOME_MENU_FRIENDS));
add_item(friends, [](pad_button btn) -> page_navigation
{
if (btn != pad_button::cross) return page_navigation::stay;
rsx_log.notice("User selected friends in home menu");
Emu.CallFromMainThread([]()
{
if (auto manager = g_fxo->try_get<rsx::overlays::display_manager>())
{
const error_code result = manager->create<rsx::overlays::friends_list_dialog>()->show(true, [](s32 status)
{
rsx_log.notice("Closing friends list with status %d", status);
});
(result ? rsx_log.error : rsx_log.notice)("Opened friends list with result %d", s32{result});
}
});
return page_navigation::stay;
});
std::unique_ptr<overlay_element> screenshot = std::make_unique<home_menu_entry>(get_localized_string(localized_string_id::HOME_MENU_SCREENSHOT));
add_item(screenshot, [](pad_button btn) -> page_navigation
{
if (btn != pad_button::cross) return page_navigation::stay;
rsx_log.notice("User selected screenshot in home menu");
g_user_asked_for_screenshot = true;
return page_navigation::exit;
});
std::unique_ptr<overlay_element> recording = std::make_unique<home_menu_entry>(get_localized_string(localized_string_id::HOME_MENU_RECORDING));
add_item(recording, [](pad_button btn) -> page_navigation
{
if (btn != pad_button::cross) return page_navigation::stay;
rsx_log.notice("User selected recording in home menu");
g_user_asked_for_recording = true;
return page_navigation::exit;
});
const bool suspend_mode = g_cfg.savestate.suspend_emu.get();
std::unique_ptr<overlay_element> save_state = std::make_unique<home_menu_entry>(get_localized_string(suspend_mode ? localized_string_id::HOME_MENU_SAVESTATE_AND_EXIT : localized_string_id::HOME_MENU_SAVESTATE));
add_item(save_state, [suspend_mode](pad_button btn) -> page_navigation
{
if (btn != pad_button::cross) return page_navigation::stay;
rsx_log.notice("User selected savestate in home menu");
Emu.CallFromMainThread([suspend_mode]()
{
if (!suspend_mode)
{
Emu.after_kill_callback = []()
{
Emu.Restart();
};
}
Emu.Kill(false, true);
});
return page_navigation::exit;
});
if (!suspend_mode && boot_last_savestate(true))
{
std::unique_ptr<overlay_element> reload_state = std::make_unique<home_menu_entry>(get_localized_string(localized_string_id::HOME_MENU_RELOAD_SAVESTATE));
add_item(reload_state, [](pad_button btn) -> page_navigation
{
if (btn != pad_button::cross) return page_navigation::stay;
rsx_log.notice("User selected reload savestate in home menu");
Emu.CallFromMainThread([]()
{
boot_last_savestate(false);
});
return page_navigation::exit;
});
}
std::unique_ptr<overlay_element> restart = std::make_unique<home_menu_entry>(get_localized_string(localized_string_id::HOME_MENU_RESTART));
add_item(restart, [](pad_button btn) -> page_navigation
{
if (btn != pad_button::cross) return page_navigation::stay;
rsx_log.notice("User selected restart in home menu");
Emu.CallFromMainThread([]()
{
Emu.Restart(false);
});
return page_navigation::exit;
});
std::unique_ptr<overlay_element> exit_game = std::make_unique<home_menu_entry>(get_localized_string(localized_string_id::HOME_MENU_EXIT_GAME));
add_item(exit_game, [](pad_button btn) -> page_navigation
{
if (btn != pad_button::cross) return page_navigation::stay;
rsx_log.notice("User selected exit game in home menu");
Emu.CallFromMainThread([]
{
Emu.GracefulShutdown(false, true);
});
return page_navigation::stay;
});
apply_layout();
}
}
}
| 5,252
|
C++
|
.cpp
| 121
| 38.727273
| 214
| 0.69267
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,427
|
overlay_home_menu_settings.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/HomeMenu/overlay_home_menu_settings.cpp
|
#include "stdafx.h"
#include "overlay_home_menu_settings.h"
#include "overlay_home_menu_components.h"
#include "Emu/system_config.h"
namespace rsx
{
namespace overlays
{
home_menu_settings::home_menu_settings(s16 x, s16 y, u16 width, u16 height, bool use_separators, home_menu_page* parent)
: home_menu_page(x, y, width, height, use_separators, parent, get_localized_string(localized_string_id::HOME_MENU_SETTINGS))
{
add_page(std::make_shared<home_menu_settings_audio>(x, y, width, height, use_separators, this));
add_page(std::make_shared<home_menu_settings_video>(x, y, width, height, use_separators, this));
add_page(std::make_shared<home_menu_settings_input>(x, y, width, height, use_separators, this));
add_page(std::make_shared<home_menu_settings_advanced>(x, y, width, height, use_separators, this));
add_page(std::make_shared<home_menu_settings_overlays>(x, y, width, height, use_separators, this));
add_page(std::make_shared<home_menu_settings_performance_overlay>(x, y, width, height, use_separators, this));
add_page(std::make_shared<home_menu_settings_debug>(x, y, width, height, use_separators, this));
apply_layout();
}
home_menu_settings_audio::home_menu_settings_audio(s16 x, s16 y, u16 width, u16 height, bool use_separators, home_menu_page* parent)
: home_menu_settings_page(x, y, width, height, use_separators, parent, get_localized_string(localized_string_id::HOME_MENU_SETTINGS_AUDIO))
{
add_signed_slider(&g_cfg.audio.volume, localized_string_id::HOME_MENU_SETTINGS_AUDIO_MASTER_VOLUME, " %", 1);
add_dropdown(&g_cfg.audio.renderer, localized_string_id::HOME_MENU_SETTINGS_AUDIO_BACKEND);
add_checkbox(&g_cfg.audio.enable_buffering, localized_string_id::HOME_MENU_SETTINGS_AUDIO_BUFFERING);
add_signed_slider(&g_cfg.audio.desired_buffer_duration, localized_string_id::HOME_MENU_SETTINGS_AUDIO_BUFFER_DURATION, " ms", 1);
add_checkbox(&g_cfg.audio.enable_time_stretching, localized_string_id::HOME_MENU_SETTINGS_AUDIO_TIME_STRETCHING);
add_signed_slider(&g_cfg.audio.time_stretching_threshold, localized_string_id::HOME_MENU_SETTINGS_AUDIO_TIME_STRETCHING_THRESHOLD, " %", 1);
apply_layout();
}
home_menu_settings_video::home_menu_settings_video(s16 x, s16 y, u16 width, u16 height, bool use_separators, home_menu_page* parent)
: home_menu_settings_page(x, y, width, height, use_separators, parent, get_localized_string(localized_string_id::HOME_MENU_SETTINGS_VIDEO))
{
add_dropdown(&g_cfg.video.frame_limit, localized_string_id::HOME_MENU_SETTINGS_VIDEO_FRAME_LIMIT);
add_unsigned_slider(&g_cfg.video.anisotropic_level_override, localized_string_id::HOME_MENU_SETTINGS_VIDEO_ANISOTROPIC_OVERRIDE, "x", 2, {{0, "Auto"}}, {14});
add_dropdown(&g_cfg.video.output_scaling, localized_string_id::HOME_MENU_SETTINGS_VIDEO_OUTPUT_SCALING);
if (g_cfg.video.renderer == video_renderer::vulkan && g_cfg.video.output_scaling == output_scaling_mode::fsr)
{
add_unsigned_slider(&g_cfg.video.vk.rcas_sharpening_intensity, localized_string_id::HOME_MENU_SETTINGS_VIDEO_RCAS_SHARPENING, " %", 1);
}
add_checkbox(&g_cfg.video.stretch_to_display_area, localized_string_id::HOME_MENU_SETTINGS_VIDEO_STRETCH_TO_DISPLAY);
apply_layout();
}
home_menu_settings_advanced::home_menu_settings_advanced(s16 x, s16 y, u16 width, u16 height, bool use_separators, home_menu_page* parent)
: home_menu_settings_page(x, y, width, height, use_separators, parent, get_localized_string(localized_string_id::HOME_MENU_SETTINGS_ADVANCED))
{
add_signed_slider(&g_cfg.core.preferred_spu_threads, localized_string_id::HOME_MENU_SETTINGS_ADVANCED_PREFERRED_SPU_THREADS, "", 1);
add_unsigned_slider(&g_cfg.core.max_cpu_preempt_count_per_frame, localized_string_id::HOME_MENU_SETTINGS_ADVANCED_MAX_CPU_PREEMPTIONS, "", 1);
add_checkbox(&g_cfg.core.rsx_accurate_res_access, localized_string_id::HOME_MENU_SETTINGS_ADVANCED_ACCURATE_RSX_RESERVATION_ACCESS);
add_dropdown(&g_cfg.core.sleep_timers_accuracy, localized_string_id::HOME_MENU_SETTINGS_ADVANCED_SLEEP_TIMERS_ACCURACY);
add_signed_slider(&g_cfg.core.max_spurs_threads, localized_string_id::HOME_MENU_SETTINGS_ADVANCED_MAX_SPURS_THREADS, "", 1);
add_unsigned_slider(&g_cfg.video.driver_wakeup_delay, localized_string_id::HOME_MENU_SETTINGS_ADVANCED_DRIVER_WAKE_UP_DELAY, " µs", 20, {}, {}, g_cfg.video.driver_wakeup_delay.min, 800);
add_signed_slider(&g_cfg.video.vblank_rate, localized_string_id::HOME_MENU_SETTINGS_ADVANCED_VBLANK_FREQUENCY, " Hz", 30);
add_checkbox(&g_cfg.video.vblank_ntsc, localized_string_id::HOME_MENU_SETTINGS_ADVANCED_VBLANK_NTSC);
apply_layout();
}
home_menu_settings_input::home_menu_settings_input(s16 x, s16 y, u16 width, u16 height, bool use_separators, home_menu_page* parent)
: home_menu_settings_page(x, y, width, height, use_separators, parent, get_localized_string(localized_string_id::HOME_MENU_SETTINGS_INPUT))
{
add_checkbox(&g_cfg.io.background_input_enabled, localized_string_id::HOME_MENU_SETTINGS_INPUT_BACKGROUND_INPUT);
add_checkbox(&g_cfg.io.keep_pads_connected, localized_string_id::HOME_MENU_SETTINGS_INPUT_KEEP_PADS_CONNECTED);
add_checkbox(&g_cfg.io.show_move_cursor, localized_string_id::HOME_MENU_SETTINGS_INPUT_SHOW_PS_MOVE_CURSOR);
if (g_cfg.io.camera == camera_handler::qt)
{
add_dropdown(&g_cfg.io.camera_flip_option, localized_string_id::HOME_MENU_SETTINGS_INPUT_CAMERA_FLIP);
}
add_dropdown(&g_cfg.io.pad_mode, localized_string_id::HOME_MENU_SETTINGS_INPUT_PAD_MODE);
add_unsigned_slider(&g_cfg.io.pad_sleep, localized_string_id::HOME_MENU_SETTINGS_INPUT_PAD_SLEEP, " µs", 100);
apply_layout();
}
home_menu_settings_overlays::home_menu_settings_overlays(s16 x, s16 y, u16 width, u16 height, bool use_separators, home_menu_page* parent)
: home_menu_settings_page(x, y, width, height, use_separators, parent, get_localized_string(localized_string_id::HOME_MENU_SETTINGS_OVERLAYS))
{
add_checkbox(&g_cfg.misc.show_trophy_popups, localized_string_id::HOME_MENU_SETTINGS_OVERLAYS_SHOW_TROPHY_POPUPS);
add_checkbox(&g_cfg.misc.show_rpcn_popups, localized_string_id::HOME_MENU_SETTINGS_OVERLAYS_SHOW_RPCN_POPUPS);
add_checkbox(&g_cfg.misc.show_shader_compilation_hint, localized_string_id::HOME_MENU_SETTINGS_OVERLAYS_SHOW_SHADER_COMPILATION_HINT);
add_checkbox(&g_cfg.misc.show_ppu_compilation_hint, localized_string_id::HOME_MENU_SETTINGS_OVERLAYS_SHOW_PPU_COMPILATION_HINT);
add_checkbox(&g_cfg.misc.show_autosave_autoload_hint, localized_string_id::HOME_MENU_SETTINGS_OVERLAYS_SHOW_AUTO_SAVE_LOAD_HINT);
add_checkbox(&g_cfg.misc.show_pressure_intensity_toggle_hint, localized_string_id::HOME_MENU_SETTINGS_OVERLAYS_SHOW_PRESSURE_INTENSITY_TOGGLE_HINT);
add_checkbox(&g_cfg.misc.show_analog_limiter_toggle_hint, localized_string_id::HOME_MENU_SETTINGS_OVERLAYS_SHOW_ANALOG_LIMITER_TOGGLE_HINT);
add_checkbox(&g_cfg.misc.show_mouse_and_keyboard_toggle_hint, localized_string_id::HOME_MENU_SETTINGS_OVERLAYS_SHOW_MOUSE_AND_KB_TOGGLE_HINT);
apply_layout();
}
home_menu_settings_performance_overlay::home_menu_settings_performance_overlay(s16 x, s16 y, u16 width, u16 height, bool use_separators, home_menu_page* parent)
: home_menu_settings_page(x, y, width, height, use_separators, parent, get_localized_string(localized_string_id::HOME_MENU_SETTINGS_PERFORMANCE_OVERLAY))
{
add_checkbox(&g_cfg.video.perf_overlay.perf_overlay_enabled, localized_string_id::HOME_MENU_SETTINGS_PERFORMANCE_OVERLAY_ENABLE);
add_checkbox(&g_cfg.video.perf_overlay.framerate_graph_enabled, localized_string_id::HOME_MENU_SETTINGS_PERFORMANCE_OVERLAY_ENABLE_FRAMERATE_GRAPH);
add_checkbox(&g_cfg.video.perf_overlay.frametime_graph_enabled, localized_string_id::HOME_MENU_SETTINGS_PERFORMANCE_OVERLAY_ENABLE_FRAMETIME_GRAPH);
add_dropdown(&g_cfg.video.perf_overlay.level, localized_string_id::HOME_MENU_SETTINGS_PERFORMANCE_OVERLAY_DETAIL_LEVEL);
add_dropdown(&g_cfg.video.perf_overlay.framerate_graph_detail_level, localized_string_id::HOME_MENU_SETTINGS_PERFORMANCE_OVERLAY_FRAMERATE_DETAIL_LEVEL);
add_dropdown(&g_cfg.video.perf_overlay.frametime_graph_detail_level, localized_string_id::HOME_MENU_SETTINGS_PERFORMANCE_OVERLAY_FRAMETIME_DETAIL_LEVEL);
add_unsigned_slider(&g_cfg.video.perf_overlay.framerate_datapoint_count, localized_string_id::HOME_MENU_SETTINGS_PERFORMANCE_OVERLAY_FRAMERATE_DATAPOINT_COUNT, "", 1);
add_unsigned_slider(&g_cfg.video.perf_overlay.frametime_datapoint_count, localized_string_id::HOME_MENU_SETTINGS_PERFORMANCE_OVERLAY_FRAMETIME_DATAPOINT_COUNT, "", 1);
add_unsigned_slider(&g_cfg.video.perf_overlay.update_interval, localized_string_id::HOME_MENU_SETTINGS_PERFORMANCE_OVERLAY_UPDATE_INTERVAL, " ms", 1);
add_dropdown(&g_cfg.video.perf_overlay.position, localized_string_id::HOME_MENU_SETTINGS_PERFORMANCE_OVERLAY_POSITION);
add_checkbox(&g_cfg.video.perf_overlay.center_x, localized_string_id::HOME_MENU_SETTINGS_PERFORMANCE_OVERLAY_CENTER_X);
add_checkbox(&g_cfg.video.perf_overlay.center_y, localized_string_id::HOME_MENU_SETTINGS_PERFORMANCE_OVERLAY_CENTER_Y);
add_unsigned_slider(&g_cfg.video.perf_overlay.margin_x, localized_string_id::HOME_MENU_SETTINGS_PERFORMANCE_OVERLAY_MARGIN_X, " px", 1);
add_unsigned_slider(&g_cfg.video.perf_overlay.margin_y, localized_string_id::HOME_MENU_SETTINGS_PERFORMANCE_OVERLAY_MARGIN_Y, " px", 1);
add_unsigned_slider(&g_cfg.video.perf_overlay.font_size, localized_string_id::HOME_MENU_SETTINGS_PERFORMANCE_OVERLAY_FONT_SIZE, " px", 1);
add_unsigned_slider(&g_cfg.video.perf_overlay.opacity, localized_string_id::HOME_MENU_SETTINGS_PERFORMANCE_OVERLAY_OPACITY, " %", 1);
apply_layout();
}
home_menu_settings_debug::home_menu_settings_debug(s16 x, s16 y, u16 width, u16 height, bool use_separators, home_menu_page* parent)
: home_menu_settings_page(x, y, width, height, use_separators, parent, get_localized_string(localized_string_id::HOME_MENU_SETTINGS_DEBUG))
{
add_checkbox(&g_cfg.video.overlay, localized_string_id::HOME_MENU_SETTINGS_DEBUG_OVERLAY);
add_checkbox(&g_cfg.io.debug_overlay, localized_string_id::HOME_MENU_SETTINGS_DEBUG_INPUT_OVERLAY);
add_checkbox(&g_cfg.video.disable_video_output, localized_string_id::HOME_MENU_SETTINGS_DEBUG_DISABLE_VIDEO_OUTPUT);
add_float_slider(&g_cfg.video.texture_lod_bias, localized_string_id::HOME_MENU_SETTINGS_DEBUG_TEXTURE_LOD_BIAS, "", 0.25f);
apply_layout();
}
}
}
| 10,486
|
C++
|
.cpp
| 116
| 86.577586
| 189
| 0.762232
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,428
|
overlay_home_menu_message_box.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/HomeMenu/overlay_home_menu_message_box.cpp
|
#include "stdafx.h"
#include "overlay_home_menu_message_box.h"
#include "Emu/System.h"
#include "Emu/system_config.h"
namespace rsx
{
namespace overlays
{
home_menu_message_box::home_menu_message_box(s16 x, s16 y, u16 width, u16 height)
: overlay_element()
, m_accept_btn(120, 30)
, m_cancel_btn(120, 30)
{
back_color = {0.15f, 0.15f, 0.15f, 0.95f};
set_size(width, height);
set_pos(x, y);
m_label.align_text(text_align::center);
m_label.set_font("Arial", 16);
m_label.back_color.a = 0.0f;
if (g_cfg.sys.enter_button_assignment == enter_button_assign::circle)
{
m_accept_btn.set_image_resource(resource_config::standard_image_resource::circle);
m_cancel_btn.set_image_resource(resource_config::standard_image_resource::cross);
}
else
{
m_accept_btn.set_image_resource(resource_config::standard_image_resource::cross);
m_cancel_btn.set_image_resource(resource_config::standard_image_resource::circle);
}
m_accept_btn.set_pos(x + 30, y + height + 20);
m_cancel_btn.set_pos(x + 180, y + height + 20);
m_accept_btn.set_text(localized_string_id::RSX_OVERLAYS_LIST_SELECT);
m_cancel_btn.set_text(localized_string_id::RSX_OVERLAYS_LIST_CANCEL);
m_accept_btn.set_font("Arial", 16);
m_cancel_btn.set_font("Arial", 16);
}
compiled_resource& home_menu_message_box::get_compiled()
{
if (!is_compiled)
{
compiled_resource& compiled = overlay_element::get_compiled();
compiled.add(m_label.get_compiled());
compiled.add(m_cancel_btn.get_compiled());
compiled.add(m_accept_btn.get_compiled());
}
return compiled_resources;
}
void home_menu_message_box::show(const std::string& text, std::function<void()> on_accept, std::function<void()> on_cancel)
{
m_on_accept = std::move(on_accept);
m_on_cancel = std::move(on_cancel);
m_label.set_text(text);
m_label.auto_resize();
m_label.set_pos(x + (w - m_label.w) / 2, y + (h - m_label.h) / 2);
m_visible = true;
refresh();
}
void home_menu_message_box::hide()
{
m_visible = false;
refresh();
}
page_navigation home_menu_message_box::handle_button_press(pad_button button_press)
{
switch (button_press)
{
case pad_button::cross:
{
Emu.GetCallbacks().play_sound(fs::get_config_dir() + "sounds/snd_decide.wav");
if (m_on_accept)
{
m_on_accept();
}
return page_navigation::next;
}
case pad_button::circle:
{
Emu.GetCallbacks().play_sound(fs::get_config_dir() + "sounds/snd_cancel.wav");
if (m_on_cancel)
{
m_on_cancel();
}
return page_navigation::back;
}
default:
{
return page_navigation::stay;
}
}
}
}
}
| 2,697
|
C++
|
.cpp
| 92
| 25.402174
| 125
| 0.66384
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,429
|
overlay_home_menu_components.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/HomeMenu/overlay_home_menu_components.cpp
|
#include "stdafx.h"
#include "overlay_home_menu_components.h"
namespace rsx
{
namespace overlays
{
home_menu_entry::home_menu_entry(const std::string& text)
{
std::unique_ptr<overlay_element> text_stack = std::make_unique<vertical_layout>();
std::unique_ptr<overlay_element> padding = std::make_unique<spacer>();
std::unique_ptr<overlay_element> title = std::make_unique<label>(text);
padding->set_size(1, 1);
title->set_size(overlay::virtual_width - 2 * menu_entry_margin, menu_entry_height);
title->set_font("Arial", 16);
title->set_wrap_text(true);
title->align_text(text_align::center);
// Make back color transparent for text
title->back_color.a = 0.f;
static_cast<vertical_layout*>(text_stack.get())->pack_padding = 5;
static_cast<vertical_layout*>(text_stack.get())->add_element(padding);
static_cast<vertical_layout*>(text_stack.get())->add_element(title);
add_element(text_stack);
}
home_menu_checkbox::home_menu_checkbox(cfg::_bool* setting, const std::string& text) : home_menu_setting(setting, text)
{
m_background.set_size(menu_entry_margin, menu_entry_margin);
m_background.set_pos(overlay::virtual_width / 2 + menu_entry_margin, 0);
m_checkbox.set_size(m_background.w - 2, m_background.h - 2);
m_checkbox.set_pos(m_background.x, m_background.y);
}
compiled_resource& home_menu_checkbox::get_compiled()
{
update_value();
if (!is_compiled)
{
const f32 col = m_last_value ? 1.0f : 0.3f;
const f32 bkg = m_last_value ? 0.3f : 1.0f;
m_background.back_color.r = bkg;
m_background.back_color.g = bkg;
m_background.back_color.b = bkg;
m_checkbox.back_color.r = col;
m_checkbox.back_color.g = col;
m_checkbox.back_color.b = col;
m_background.set_pos(m_background.x, y + (h - m_background.h) / 2);
m_checkbox.set_pos(m_background.x + 1, m_background.y + 1);
compiled_resources = horizontal_layout::get_compiled();
compiled_resources.add(m_background.get_compiled());
compiled_resources.add(m_checkbox.get_compiled());
}
return compiled_resources;
}
}
}
| 2,120
|
C++
|
.cpp
| 53
| 36.09434
| 121
| 0.689051
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,430
|
overlay_home_menu.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/HomeMenu/overlay_home_menu.cpp
|
#include "stdafx.h"
#include "overlay_home_menu.h"
#include "../overlay_manager.h"
#include "Emu/system_config.h"
#include "Utilities/date_time.h"
namespace rsx
{
namespace overlays
{
std::string get_time_string()
{
return date_time::fmt_time("%Y/%m/%d %H:%M:%S", time(nullptr));
}
home_menu_dialog::home_menu_dialog()
: m_main_menu(20, 85, virtual_width - 2 * 20, 540, false, nullptr)
{
m_allow_input_on_pause = true;
m_dim_background.set_size(virtual_width, virtual_height);
m_dim_background.back_color.a = 0.5f;
m_description.set_font("Arial", 20);
m_description.set_pos(20, 37);
m_description.set_text(m_main_menu.title);
m_description.auto_resize();
m_description.back_color.a = 0.f;
m_time_display.set_font("Arial", 14);
m_time_display.set_text(get_time_string());
m_time_display.auto_resize();
m_time_display.set_pos(virtual_width - (20 + m_time_display.w), (m_description.y + m_description.h) - m_time_display.h);
m_time_display.back_color.a = 0.f;
fade_animation.duration_sec = 0.15f;
return_code = selection_code::canceled;
}
void home_menu_dialog::update(u64 timestamp_us)
{
if (fade_animation.active)
{
fade_animation.update(timestamp_us);
}
static std::string last_time;
std::string new_time = get_time_string();
if (last_time != new_time)
{
m_time_display.set_text(new_time);
m_time_display.auto_resize();
last_time = std::move(new_time);
}
}
void home_menu_dialog::on_button_pressed(pad_button button_press, bool is_auto_repeat)
{
if (fade_animation.active) return;
// Increase auto repeat interval for some buttons
switch (button_press)
{
case pad_button::dpad_left:
case pad_button::dpad_right:
case pad_button::ls_left:
case pad_button::ls_right:
m_auto_repeat_ms_interval = 10;
break;
default:
m_auto_repeat_ms_interval = m_auto_repeat_ms_interval_default;
break;
}
const page_navigation navigation = m_main_menu.handle_button_press(button_press, is_auto_repeat, m_auto_repeat_ms_interval);
switch (navigation)
{
case page_navigation::back:
case page_navigation::next:
{
if (home_menu_page* page = m_main_menu.get_current_page(true))
{
std::string path = page->title;
for (home_menu_page* parent = page->parent; parent; parent = parent->parent)
{
path = parent->title + " > " + path;
}
m_description.set_text(path);
m_description.auto_resize();
}
break;
}
case page_navigation::exit:
{
fade_animation.current = color4f(1.f);
fade_animation.end = color4f(0.f);
fade_animation.active = true;
fade_animation.on_finish = [this]
{
close(true, true);
if (g_cfg.misc.pause_during_home_menu)
{
Emu.BlockingCallFromMainThread([]()
{
Emu.Resume();
});
}
};
break;
}
case page_navigation::stay:
{
break;
}
}
}
compiled_resource home_menu_dialog::get_compiled()
{
if (!visible)
{
return {};
}
compiled_resource result;
result.add(m_dim_background.get_compiled());
result.add(m_main_menu.get_compiled());
result.add(m_description.get_compiled());
result.add(m_time_display.get_compiled());
fade_animation.apply(result);
return result;
}
error_code home_menu_dialog::show(std::function<void(s32 status)> on_close)
{
visible = false;
fade_animation.current = color4f(0.f);
fade_animation.end = color4f(1.f);
fade_animation.active = true;
this->on_close = std::move(on_close);
visible = true;
const auto notify = std::make_shared<atomic_t<u32>>(0);
auto& overlayman = g_fxo->get<display_manager>();
overlayman.attach_thread_input(
uid, "Home menu",
[notify]() { *notify = true; notify->notify_one(); }
);
if (g_cfg.misc.pause_during_home_menu)
{
Emu.BlockingCallFromMainThread([]()
{
Emu.Pause(false, false);
});
}
while (!Emu.IsStopped() && !*notify)
{
notify->wait(false, atomic_wait_timeout{1'000'000});
}
return CELL_OK;
}
} // namespace overlays
} // namespace RSX
| 4,164
|
C++
|
.cpp
| 148
| 23.831081
| 127
| 0.656971
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,431
|
overlay_recvmessage_dialog.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/Network/overlay_recvmessage_dialog.cpp
|
#include "stdafx.h"
#include "../overlay_manager.h"
#include "overlay_recvmessage_dialog.h"
#include "Emu/System.h"
#include "Emu/NP/rpcn_client.h"
#include "Utilities/Thread.h"
namespace rsx
{
namespace overlays
{
void recvmessage_callback(void* param, std::shared_ptr<std::pair<std::string, message_data>> new_msg, u64 msg_id)
{
auto* dlg = static_cast<recvmessage_dialog*>(param);
dlg->callback_handler(std::move(new_msg), msg_id);
}
recvmessage_dialog::list_entry::list_entry(const std::string& name, const std::string& subj, const std::string& body)
{
std::unique_ptr<overlay_element> prefix_stack = std::make_unique<vertical_layout>();
std::unique_ptr<overlay_element> text_stack = std::make_unique<vertical_layout>();
std::unique_ptr<overlay_element> name_label = std::make_unique<label>(name);
std::unique_ptr<overlay_element> subj_label = std::make_unique<label>(subj);
std::unique_ptr<overlay_element> body_label = std::make_unique<label>(body);
std::unique_ptr<overlay_element> name_prefix_label = std::make_unique<label>(get_localized_string(localized_string_id::CELL_NP_RECVMESSAGE_DIALOG_FROM));
std::unique_ptr<overlay_element> subj_prefix_label = std::make_unique<label>(get_localized_string(localized_string_id::CELL_NP_RECVMESSAGE_DIALOG_SUBJECT));
name_prefix_label->set_size(0, 40);
name_prefix_label->set_font("Arial", 16);
name_prefix_label->set_wrap_text(false);
static_cast<label*>(name_prefix_label.get())->auto_resize(true);
subj_prefix_label->set_size(0, 40);
subj_prefix_label->set_font("Arial", 16);
subj_prefix_label->set_wrap_text(false);
static_cast<label*>(subj_prefix_label.get())->auto_resize(true);
name_label->set_size(200, 40);
name_label->set_font("Arial", 16);
name_label->set_wrap_text(false);
subj_label->set_size(600, 40);
subj_label->set_font("Arial", 16);
subj_label->set_wrap_text(false);
body_label->set_size(800, 0);
body_label->set_font("Arial", 16);
body_label->set_wrap_text(true);
static_cast<label*>(body_label.get())->auto_resize(true);
// Make back color transparent for text
name_label->back_color.a = 0.f;
subj_label->back_color.a = 0.f;
body_label->back_color.a = 0.f;
name_prefix_label->back_color.a = 0.f;
subj_prefix_label->back_color.a = 0.f;
static_cast<vertical_layout*>(prefix_stack.get())->pack_padding = 5;
static_cast<vertical_layout*>(prefix_stack.get())->add_spacer();
static_cast<vertical_layout*>(prefix_stack.get())->add_element(name_prefix_label);
static_cast<vertical_layout*>(prefix_stack.get())->add_element(subj_prefix_label);
static_cast<vertical_layout*>(text_stack.get())->pack_padding = 5;
static_cast<vertical_layout*>(text_stack.get())->add_spacer();
static_cast<vertical_layout*>(text_stack.get())->add_element(name_label);
static_cast<vertical_layout*>(text_stack.get())->add_element(subj_label);
static_cast<vertical_layout*>(text_stack.get())->add_element(body_label);
// Add spacer to make the thing look a bit nicer at the bottom... should ideally not be necessary
static_cast<vertical_layout*>(text_stack.get())->pack_padding = 25;
static_cast<vertical_layout*>(text_stack.get())->add_spacer();
// Pack
pack_padding = 15;
add_element(prefix_stack);
add_element(text_stack);
}
recvmessage_dialog::recvmessage_dialog() : RecvMessageDialogBase()
{
m_dim_background = std::make_unique<overlay_element>();
m_dim_background->set_size(virtual_width, virtual_height);
m_dim_background->back_color.a = 0.5f;
m_list = std::make_unique<list_view>(virtual_width - 2 * 20, 540, true, true);
m_list->set_pos(20, 85);
m_description = std::make_unique<label>();
m_description->set_font("Arial", 20);
m_description->set_pos(20, 37);
m_description->set_text(get_localized_string(localized_string_id::CELL_NP_RECVMESSAGE_DIALOG_TITLE));
m_description->auto_resize();
m_description->back_color.a = 0.f;
fade_animation.duration_sec = 0.15f;
return_code = selection_code::canceled;
}
void recvmessage_dialog::update(u64 timestamp_us)
{
if (fade_animation.active)
{
fade_animation.update(timestamp_us);
}
}
void recvmessage_dialog::on_button_pressed(pad_button button_press, bool is_auto_repeat)
{
if (fade_animation.active) return;
bool close_dialog = false;
std::lock_guard lock(m_mutex);
switch (button_press)
{
case pad_button::cross:
case pad_button::triangle:
if (m_list->m_items.empty())
break;
if (const usz index = static_cast<usz>(m_list->get_selected_index()); index < m_entry_ids.size())
{
return_code = button_press == pad_button::cross ? selection_code::ok : selection_code::no;
}
else
{
return_code = selection_code::error;
}
Emu.GetCallbacks().play_sound(fs::get_config_dir() + "sounds/snd_decide.wav");
close_dialog = true;
break;
case pad_button::circle:
Emu.GetCallbacks().play_sound(fs::get_config_dir() + "sounds/snd_cancel.wav");
close_dialog = true;
break;
case pad_button::dpad_up:
case pad_button::ls_up:
m_list->select_previous();
break;
case pad_button::dpad_down:
case pad_button::ls_down:
m_list->select_next();
break;
case pad_button::L1:
m_list->select_previous(10);
break;
case pad_button::R1:
m_list->select_next(10);
break;
default:
rsx_log.trace("[ui] Button %d pressed", static_cast<u8>(button_press));
break;
}
if (close_dialog)
{
fade_animation.current = color4f(1.f);
fade_animation.end = color4f(0.f);
fade_animation.active = true;
fade_animation.on_finish = [this]
{
close(true, true);
};
}
// Play a sound unless this is a fast auto repeat which would induce a nasty noise
else if (!is_auto_repeat || m_auto_repeat_ms_interval >= m_auto_repeat_ms_interval_default)
{
Emu.GetCallbacks().play_sound(fs::get_config_dir() + "sounds/snd_cursor.wav");
}
}
compiled_resource recvmessage_dialog::get_compiled()
{
if (!visible)
{
return {};
}
compiled_resource result;
result.add(m_dim_background->get_compiled());
result.add(m_list->get_compiled());
result.add(m_description->get_compiled());
fade_animation.apply(result);
return result;
}
error_code recvmessage_dialog::Exec(SceNpBasicMessageMainType type, SceNpBasicMessageRecvOptions options, SceNpBasicMessageRecvAction& recv_result, u64& chosen_msg_id)
{
visible = false;
switch (type)
{
case SceNpBasicMessageMainType::SCE_NP_BASIC_MESSAGE_MAIN_TYPE_ADD_FRIEND:
m_description->set_text(get_localized_string(localized_string_id::CELL_NP_RECVMESSAGE_DIALOG_TITLE_ADD_FRIEND));
m_description->auto_resize();
break;
case SceNpBasicMessageMainType::SCE_NP_BASIC_MESSAGE_MAIN_TYPE_INVITE:
m_description->set_text(get_localized_string(localized_string_id::CELL_NP_RECVMESSAGE_DIALOG_TITLE_INVITE));
m_description->auto_resize();
break;
case SceNpBasicMessageMainType::SCE_NP_BASIC_MESSAGE_MAIN_TYPE_DATA_ATTACHMENT:
case SceNpBasicMessageMainType::SCE_NP_BASIC_MESSAGE_MAIN_TYPE_GENERAL:
case SceNpBasicMessageMainType::SCE_NP_BASIC_MESSAGE_MAIN_TYPE_CUSTOM_DATA:
case SceNpBasicMessageMainType::SCE_NP_BASIC_MESSAGE_MAIN_TYPE_URL_ATTACHMENT:
default:
break; // Title already set in constructor
}
const bool preserve = options & SCE_NP_BASIC_RECV_MESSAGE_OPTIONS_PRESERVE;
const bool include_bootable = options & SCE_NP_BASIC_RECV_MESSAGE_OPTIONS_INCLUDE_BOOTABLE;
m_rpcn = rpcn::rpcn_client::get_instance(true);
// Get list of messages
const auto messages = m_rpcn->get_messages_and_register_cb(type, include_bootable, recvmessage_callback, this);
{
std::lock_guard lock(m_mutex);
for (const auto& [id, message] : messages)
{
ensure(message);
std::unique_ptr<overlay_element> entry = std::make_unique<list_entry>(message->first, message->second.subject, message->second.body);
m_entries.emplace_back(std::move(entry));
m_entry_ids.push_back(id);
}
for (auto& entry : m_entries)
{
m_list->add_entry(entry);
}
if (m_list->m_items.empty())
{
m_list->set_cancel_only(true);
}
else
{
// Only select an entry if there are entries available
m_list->select_entry(0);
}
}
fade_animation.current = color4f(0.f);
fade_animation.end = color4f(1.f);
fade_animation.active = true;
visible = true;
const auto notify = std::make_shared<atomic_t<u32>>(0);
auto& overlayman = g_fxo->get<display_manager>();
auto& nps = g_fxo->get<np_state>();
// Block until the user exits the dialog
overlayman.attach_thread_input(
uid, "Recvmessage dialog", nullptr,
[notify](s32) { *notify = true; notify->notify_one(); }
);
while (!Emu.IsStopped() && !*notify && !nps.abort_gui_flag)
{
notify->wait(0, atomic_wait_timeout{1'000'000});
}
m_rpcn->remove_message_cb(recvmessage_callback, this);
error_code result = CELL_CANCEL;
if (nps.abort_gui_flag.exchange(false))
{
rsx_log.warning("Recvmessage dialog aborted by sceNp!");
close(false, true);
return result;
}
auto accept_or_deny = [preserve, this, &result, &recv_result, &chosen_msg_id](SceNpBasicMessageRecvAction result_from_action)
{
{
std::lock_guard lock(m_mutex);
const int selected_index = m_list->get_selected_index();
if (selected_index < 0 || static_cast<usz>(selected_index) >= m_entry_ids.size())
{
rsx_log.error("recvmessage dialog exited with unexpected selection: index=%d, entries=%d", selected_index, m_entry_ids.size());
return;
}
chosen_msg_id = ::at32(m_entry_ids, selected_index);
}
recv_result = result_from_action;
result = CELL_OK;
if (!preserve)
{
m_rpcn->mark_message_used(chosen_msg_id);
}
};
switch (return_code)
{
case selection_code::ok:
accept_or_deny(SCE_NP_BASIC_MESSAGE_ACTION_ACCEPT);
break;
case selection_code::no:
accept_or_deny(SCE_NP_BASIC_MESSAGE_ACTION_DENY);
break;
case selection_code::canceled:
rsx_log.notice("recvmessage dialog was canceled");
break;
default:
rsx_log.error("recvmessage dialog exited with error: %d", return_code);
break;
}
return result;
}
void recvmessage_dialog::callback_handler(std::shared_ptr<std::pair<std::string, message_data>> new_msg, u64 msg_id)
{
ensure(new_msg);
std::lock_guard lock(m_mutex);
std::unique_ptr<overlay_element> entry = std::make_unique<list_entry>(new_msg->first, new_msg->second.subject, new_msg->second.body);
m_entries.emplace_back(std::move(entry));
m_entry_ids.push_back(msg_id);
m_list->add_entry(m_entries.back());
if (m_list->get_cancel_only())
{
m_list->set_cancel_only(false);
m_list->select_entry(0);
}
}
} // namespace overlays
} // namespace RSX
| 11,015
|
C++
|
.cpp
| 285
| 34.238596
| 169
| 0.689461
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,432
|
overlay_sendmessage_dialog.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/Network/overlay_sendmessage_dialog.cpp
|
#include "stdafx.h"
#include "../overlay_manager.h"
#include "overlay_sendmessage_dialog.h"
#include "Emu/System.h"
#include "Emu/NP/rpcn_client.h"
#include "Emu/Cell/Modules/cellMsgDialog.h"
#include "Emu/Cell/PPUThread.h" // for vm_var
#include "Emu/Memory/vm_var.h"
#include "Emu/Io/interception.h"
#include "Utilities/Thread.h"
namespace rsx
{
namespace overlays
{
void sendmessage_friend_callback(void* param, rpcn::NotificationType ntype, const std::string& username, bool status)
{
auto* dlg = static_cast<sendmessage_dialog*>(param);
dlg->callback_handler(ntype, username, status);
}
sendmessage_dialog::list_entry::list_entry(const std::string& msg)
{
std::unique_ptr<overlay_element> text_stack = std::make_unique<vertical_layout>();
std::unique_ptr<overlay_element> padding = std::make_unique<spacer>();
std::unique_ptr<overlay_element> text_label = std::make_unique<label>(msg);
padding->set_size(1, 1);
text_label->set_size(800, 40);
text_label->set_font("Arial", 16);
text_label->set_wrap_text(true);
// Make back color transparent for text
text_label->back_color.a = 0.f;
static_cast<vertical_layout*>(text_stack.get())->pack_padding = 5;
static_cast<vertical_layout*>(text_stack.get())->add_element(padding);
static_cast<vertical_layout*>(text_stack.get())->add_element(text_label);
// Pack
pack_padding = 15;
add_element(text_stack);
}
sendmessage_dialog::sendmessage_dialog() : SendMessageDialogBase()
{
m_dim_background = std::make_unique<overlay_element>();
m_dim_background->set_size(virtual_width, virtual_height);
m_dim_background->back_color.a = 0.5f;
m_description = std::make_unique<label>();
m_description->set_font("Arial", 20);
m_description->set_pos(20, 37);
m_description->set_text(get_localized_string(localized_string_id::CELL_NP_SENDMESSAGE_DIALOG_TITLE));
m_description->auto_resize();
m_description->back_color.a = 0.f;
fade_animation.duration_sec = 0.15f;
return_code = selection_code::canceled;
}
void sendmessage_dialog::update(u64 timestamp_us)
{
if (fade_animation.active)
{
fade_animation.update(timestamp_us);
}
}
void sendmessage_dialog::on_button_pressed(pad_button button_press, bool is_auto_repeat)
{
if (fade_animation.active) return;
if (m_confirmation_dialog_open) return; // Ignore input while the confirmation dialog is open
bool close_dialog = false;
std::lock_guard lock(m_mutex);
switch (button_press)
{
case pad_button::cross:
if (m_list->m_items.empty() || is_auto_repeat)
break;
Emu.GetCallbacks().play_sound(fs::get_config_dir() + "sounds/snd_decide.wav");
if (!get_current_selection().empty())
{
m_open_confirmation_dialog = true;
m_confirmation_dialog_open = true; // Ignore input while the confirmation dialog is open. Set this here due to avoid a race condition.
break;
}
return_code = selection_code::error;
close_dialog = true;
break;
case pad_button::circle:
Emu.GetCallbacks().play_sound(fs::get_config_dir() + "sounds/snd_cancel.wav");
close_dialog = true;
break;
case pad_button::dpad_up:
case pad_button::ls_up:
m_list->select_previous();
break;
case pad_button::dpad_down:
case pad_button::ls_down:
m_list->select_next();
break;
case pad_button::L1:
m_list->select_previous(10);
break;
case pad_button::R1:
m_list->select_next(10);
break;
default:
rsx_log.trace("[ui] Button %d pressed", static_cast<u8>(button_press));
break;
}
if (close_dialog)
{
fade_animation.current = color4f(1.f);
fade_animation.end = color4f(0.f);
fade_animation.active = true;
fade_animation.on_finish = [this]
{
close(true, true);
};
}
// Play a sound unless this is a fast auto repeat which would induce a nasty noise
else if (!is_auto_repeat || m_auto_repeat_ms_interval >= m_auto_repeat_ms_interval_default)
{
Emu.GetCallbacks().play_sound(fs::get_config_dir() + "sounds/snd_cursor.wav");
}
}
compiled_resource sendmessage_dialog::get_compiled()
{
if (!visible)
{
return {};
}
compiled_resource result;
result.add(m_dim_background->get_compiled());
if (m_list)
{
result.add(m_list->get_compiled());
}
result.add(m_description->get_compiled());
fade_animation.apply(result);
return result;
}
error_code sendmessage_dialog::Exec(message_data& msg_data, std::set<std::string>& npids)
{
visible = false;
localized_string_id confirmation_loc_id = localized_string_id::CELL_NP_SENDMESSAGE_DIALOG_CONFIRMATION;
switch (msg_data.mainType)
{
case SceNpBasicMessageMainType::SCE_NP_BASIC_MESSAGE_MAIN_TYPE_ADD_FRIEND:
m_description->set_text(get_localized_string(localized_string_id::CELL_NP_SENDMESSAGE_DIALOG_TITLE_ADD_FRIEND));
m_description->auto_resize();
confirmation_loc_id = localized_string_id::CELL_NP_SENDMESSAGE_DIALOG_CONFIRMATION_ADD_FRIEND;
break;
case SceNpBasicMessageMainType::SCE_NP_BASIC_MESSAGE_MAIN_TYPE_INVITE:
m_description->set_text(get_localized_string(localized_string_id::CELL_NP_SENDMESSAGE_DIALOG_TITLE_INVITE));
m_description->auto_resize();
confirmation_loc_id = localized_string_id::CELL_NP_SENDMESSAGE_DIALOG_CONFIRMATION_INVITE;
break;
case SceNpBasicMessageMainType::SCE_NP_BASIC_MESSAGE_MAIN_TYPE_DATA_ATTACHMENT:
case SceNpBasicMessageMainType::SCE_NP_BASIC_MESSAGE_MAIN_TYPE_GENERAL:
case SceNpBasicMessageMainType::SCE_NP_BASIC_MESSAGE_MAIN_TYPE_CUSTOM_DATA:
case SceNpBasicMessageMainType::SCE_NP_BASIC_MESSAGE_MAIN_TYPE_URL_ATTACHMENT:
default:
break; // Title already set in constructor
}
m_rpcn = rpcn::rpcn_client::get_instance(true);
// Get list of messages
rpcn::friend_data data;
m_rpcn->get_friends_and_register_cb(data, sendmessage_friend_callback, this);
{
std::lock_guard lock(m_mutex);
for (const auto& [name, online_data] : data.friends)
{
// Only add online friends to the list
if (online_data.online)
{
if (std::any_of(m_entry_names.cbegin(), m_entry_names.cend(), [&name](const std::string& entry){ return entry == name; }))
continue;
m_entry_names.push_back(name);
}
}
reload({});
}
fade_animation.current = color4f(0.f);
fade_animation.end = color4f(1.f);
fade_animation.active = true;
visible = true;
const auto notify = std::make_shared<atomic_t<u32>>(0);
auto& overlayman = g_fxo->get<display_manager>();
auto& nps = g_fxo->get<np_state>();
// Block until the user exits the dialog
overlayman.attach_thread_input(
uid, "Sendmessage dialog", nullptr,
[notify](s32) { *notify = true; notify->notify_one(); }
);
bool confirmation_error = false;
while (!Emu.IsStopped() && !*notify && !nps.abort_gui_flag)
{
if (m_open_confirmation_dialog.exchange(false))
{
// Get user confirmation by opening a blocking dialog
const std::string npid = get_current_selection();
if (npid.empty())
{
rsx_log.fatal("sendmessage dialog can't open confirmation dialog with empty npid");
confirmation_error = true;
break;
}
rsx_log.notice("sendmessage dialog about to open confirmation dialog");
const std::string loc_msg = get_localized_string(confirmation_loc_id, npid.c_str());
const std::string confirmation_msg = fmt::format("%s %s\n\n%s", loc_msg, msg_data.subject, msg_data.body);
s32 confirmation_code = CELL_MSGDIALOG_BUTTON_NO;
// Hide list
visible = false;
error_code res = open_msg_dialog(true, CELL_MSGDIALOG_TYPE_BUTTON_TYPE_YESNO, vm::make_str(confirmation_msg), msg_dialog_source::_sceNp, vm::null, vm::null, vm::null, &confirmation_code);
if (res != CELL_OK)
{
rsx_log.fatal("sendmessage dialog failed to open confirmation dialog (error=%d)", +res);
confirmation_error = true;
break;
}
rsx_log.notice("sendmessage dialog received confirmation dialog result %d", confirmation_code);
if (confirmation_code == CELL_MSGDIALOG_BUTTON_YES)
{
return_code = selection_code::ok;
close(false, true);
break;
}
// Show list again
visible = true;
// Allow input again
m_confirmation_dialog_open = false;
// Intercept pads again (Only needed because we currently don't have an interception stack and the confirmation dialog disables it on close)
input::SetIntercepted(true);
}
notify->wait(0, atomic_wait_timeout{1'000'000});
}
m_rpcn->remove_friend_cb(sendmessage_friend_callback, this);
error_code result = CELL_CANCEL;
if (confirmation_error)
{
rsx_log.error("Sendmessage dialog aborted internally!");
close(false, true);
return result;
}
if (nps.abort_gui_flag.exchange(false))
{
rsx_log.warning("Sendmessage dialog aborted by sceNp!");
close(false, true);
return result;
}
switch (return_code)
{
case selection_code::ok:
{
const std::string current_selection = get_current_selection();
if (current_selection.empty())
{
rsx_log.fatal("sendmessage dialog can't send message to empty npid");
break;
}
npids.insert(current_selection);
// Send the message
if (m_rpcn->send_message(msg_data, npids))
{
result = CELL_OK;
}
break;
}
case selection_code::canceled:
rsx_log.notice("sendmessage dialog was canceled");
break;
default:
rsx_log.error("sendmessage dialog exited with error: %d", return_code);
break;
}
return result;
}
std::string sendmessage_dialog::get_current_selection() const
{
if (const s32 index = m_list->get_selected_index(); index >= 0 && static_cast<usz>(index) < m_entry_names.size())
{
return m_entry_names[index];
}
return {};
}
void sendmessage_dialog::reload(const std::string& previous_selection)
{
if (m_list)
{
status_flags |= status_bits::invalidate_image_cache;
}
m_list = std::make_unique<list_view>(virtual_width - 2 * 20, 540, false);
m_list->set_pos(20, 85);
for (const std::string& name : m_entry_names)
{
std::unique_ptr<overlay_element> entry = std::make_unique<list_entry>(name);
m_list->add_entry(entry);
}
if (m_list->m_items.empty())
{
m_list->set_cancel_only(true);
}
else if (m_list->get_cancel_only())
{
m_list->set_cancel_only(false);
m_list->select_entry(0);
}
else
{
// Only select an entry if there are entries available
s32 selected_index = 0;
// Try to select the previous selection
if (!previous_selection.empty())
{
for (s32 i = 0; i < ::narrow<s32>(m_entry_names.size()); i++)
{
if (m_entry_names[i] == previous_selection)
{
selected_index = i;
break;
}
}
}
m_list->select_entry(selected_index);
}
}
void sendmessage_dialog::callback_handler(u16 ntype, const std::string& username, bool status)
{
std::lock_guard lock(m_mutex);
const auto add_friend = [&]()
{
if (std::any_of(m_entry_names.cbegin(), m_entry_names.cend(), [&username](const std::string& entry){ return entry == username; }))
return;
const std::string current_selection = get_current_selection();
m_entry_names.push_back(username);
reload(current_selection);
};
const auto remove_friend = [&]()
{
const auto it = std::find(m_entry_names.cbegin(), m_entry_names.cend(), username);
if (it == m_entry_names.cend())
return;
const std::string current_selection = get_current_selection();
m_entry_names.erase(it);
reload(current_selection);
};
switch (ntype)
{
case rpcn::NotificationType::FriendQuery: // Other user sent a friend request
case rpcn::NotificationType::FriendPresenceChanged:
break;
case rpcn::NotificationType::FriendNew: // Add a friend to the friendlist(either accepted a friend request or friend accepted it)
{
if (status)
{
add_friend();
}
break;
}
case rpcn::NotificationType::FriendLost: // Remove friend from the friendlist(user removed friend or friend removed friend)
{
remove_friend();
break;
}
case rpcn::NotificationType::FriendStatus: // Set status of friend to Offline or Online
{
if (status)
{
add_friend();
}
else
{
remove_friend();
}
break;
}
default:
{
rsx_log.fatal("An unhandled notification type was received by the sendmessage dialog callback!");
break;
}
}
}
} // namespace overlays
} // namespace RSX
| 12,723
|
C++
|
.cpp
| 377
| 29.071618
| 192
| 0.678158
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,433
|
VKDraw.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKDraw.cpp
|
#include "stdafx.h"
#include "../Common/BufferUtils.h"
#include "../rsx_methods.h"
#include "VKAsyncScheduler.h"
#include "VKGSRender.h"
#include "vkutils/buffer_object.h"
#include "vkutils/chip_class.h"
namespace vk
{
VkImageViewType get_view_type(rsx::texture_dimension_extended type)
{
switch (type)
{
case rsx::texture_dimension_extended::texture_dimension_1d:
return VK_IMAGE_VIEW_TYPE_1D;
case rsx::texture_dimension_extended::texture_dimension_2d:
return VK_IMAGE_VIEW_TYPE_2D;
case rsx::texture_dimension_extended::texture_dimension_cubemap:
return VK_IMAGE_VIEW_TYPE_CUBE;
case rsx::texture_dimension_extended::texture_dimension_3d:
return VK_IMAGE_VIEW_TYPE_3D;
default: fmt::throw_exception("Unreachable");
}
}
VkCompareOp get_compare_func(rsx::comparison_function op, bool reverse_direction = false)
{
switch (op)
{
case rsx::comparison_function::never: return VK_COMPARE_OP_NEVER;
case rsx::comparison_function::greater: return reverse_direction ? VK_COMPARE_OP_LESS: VK_COMPARE_OP_GREATER;
case rsx::comparison_function::less: return reverse_direction ? VK_COMPARE_OP_GREATER: VK_COMPARE_OP_LESS;
case rsx::comparison_function::less_or_equal: return reverse_direction ? VK_COMPARE_OP_GREATER_OR_EQUAL: VK_COMPARE_OP_LESS_OR_EQUAL;
case rsx::comparison_function::greater_or_equal: return reverse_direction ? VK_COMPARE_OP_LESS_OR_EQUAL: VK_COMPARE_OP_GREATER_OR_EQUAL;
case rsx::comparison_function::equal: return VK_COMPARE_OP_EQUAL;
case rsx::comparison_function::not_equal: return VK_COMPARE_OP_NOT_EQUAL;
case rsx::comparison_function::always: return VK_COMPARE_OP_ALWAYS;
default:
fmt::throw_exception("Unknown compare op: 0x%x", static_cast<u32>(op));
}
}
void validate_image_layout_for_read_access(
vk::command_buffer& cmd,
vk::image_view* view,
VkPipelineStageFlags dst_stage,
const rsx::sampled_image_descriptor_base* sampler_state)
{
switch (auto raw = view->image(); +raw->current_layout)
{
default:
//case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
break;
case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
//ensure(sampler_state->upload_context == rsx::texture_upload_context::blit_engine_dst);
raw->change_layout(cmd, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
break;
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
ensure(sampler_state->upload_context == rsx::texture_upload_context::blit_engine_src);
raw->change_layout(cmd, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
break;
case VK_IMAGE_LAYOUT_GENERAL:
case VK_IMAGE_LAYOUT_ATTACHMENT_FEEDBACK_LOOP_OPTIMAL_EXT:
ensure(sampler_state->upload_context == rsx::texture_upload_context::framebuffer_storage);
if (!sampler_state->is_cyclic_reference)
{
// This was used in a cyclic ref before, but is missing a barrier
// No need for a full stall, use a custom barrier instead
VkPipelineStageFlags src_stage;
VkAccessFlags src_access;
if (raw->aspect() == VK_IMAGE_ASPECT_COLOR_BIT)
{
src_stage = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
src_access = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
}
else
{
src_stage = VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
src_access = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
}
vk::insert_image_memory_barrier(
cmd,
raw->value,
raw->current_layout, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
src_stage, dst_stage,
src_access, VK_ACCESS_SHADER_READ_BIT,
{ raw->aspect(), 0, 1, 0, 1 });
raw->current_layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
}
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
ensure(sampler_state->upload_context == rsx::texture_upload_context::framebuffer_storage);
raw->change_layout(cmd, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
break;
}
}
}
void VKGSRender::begin_render_pass()
{
vk::begin_renderpass(
*m_current_command_buffer,
get_render_pass(),
m_draw_fbo->value,
{ positionu{0u, 0u}, sizeu{m_draw_fbo->width(), m_draw_fbo->height()} });
}
void VKGSRender::close_render_pass()
{
vk::end_renderpass(*m_current_command_buffer);
}
VkRenderPass VKGSRender::get_render_pass()
{
if (!m_cached_renderpass)
{
m_cached_renderpass = vk::get_renderpass(*m_device, m_current_renderpass_key);
}
return m_cached_renderpass;
}
void VKGSRender::update_draw_state()
{
m_profiler.start();
// Update conditional dynamic state
if (rsx::method_registers.current_draw_clause.primitive >= rsx::primitive_type::points && // AMD/AMDVLK driver does not like it if you render points without setting line width for some reason
rsx::method_registers.current_draw_clause.primitive <= rsx::primitive_type::line_strip)
{
const float actual_line_width =
m_device->get_wide_lines_support() ? rsx::method_registers.line_width() * rsx::get_resolution_scale() : 1.f;
vkCmdSetLineWidth(*m_current_command_buffer, actual_line_width);
}
if (rsx::method_registers.blend_enabled())
{
// Update blend constants
auto blend_colors = rsx::get_constant_blend_colors();
vkCmdSetBlendConstants(*m_current_command_buffer, blend_colors.data());
}
if (rsx::method_registers.stencil_test_enabled())
{
const bool two_sided_stencil = rsx::method_registers.two_sided_stencil_test_enabled();
VkStencilFaceFlags face_flag = (two_sided_stencil) ? VK_STENCIL_FACE_FRONT_BIT : VK_STENCIL_FRONT_AND_BACK;
vkCmdSetStencilWriteMask(*m_current_command_buffer, face_flag, rsx::method_registers.stencil_mask());
vkCmdSetStencilCompareMask(*m_current_command_buffer, face_flag, rsx::method_registers.stencil_func_mask());
vkCmdSetStencilReference(*m_current_command_buffer, face_flag, rsx::method_registers.stencil_func_ref());
if (two_sided_stencil)
{
vkCmdSetStencilWriteMask(*m_current_command_buffer, VK_STENCIL_FACE_BACK_BIT, rsx::method_registers.back_stencil_mask());
vkCmdSetStencilCompareMask(*m_current_command_buffer, VK_STENCIL_FACE_BACK_BIT, rsx::method_registers.back_stencil_func_mask());
vkCmdSetStencilReference(*m_current_command_buffer, VK_STENCIL_FACE_BACK_BIT, rsx::method_registers.back_stencil_func_ref());
}
}
// The remaining dynamic state should only be set once and we have signals to enable/disable mid-renderpass
if (!(m_current_command_buffer->flags & vk::command_buffer::cb_reload_dynamic_state))
{
// Dynamic state already set
m_frame_stats.setup_time += m_profiler.duration();
return;
}
if (rsx::method_registers.poly_offset_fill_enabled())
{
// offset_bias is the constant factor, multiplied by the implementation factor R
// offst_scale is the slope factor, multiplied by the triangle slope factor M
// R is implementation dependent and has to be derived empirically for supported implementations.
// Lucky for us, only NVIDIA currently supports fixed-point 24-bit depth buffers.
const auto polygon_offset_scale = rsx::method_registers.poly_offset_scale();
auto polygon_offset_bias = rsx::method_registers.poly_offset_bias();
if (m_draw_fbo->depth_format() == VK_FORMAT_D24_UNORM_S8_UINT && is_NVIDIA(vk::get_chip_family()))
{
// Empirically derived to be 0.5 * (2^24 - 1) for fixed type on Pascal. The same seems to apply for other NVIDIA GPUs.
// RSX seems to be using 2^24 - 1 instead making the biases twice as large when using fixed type Z-buffer on NVIDIA.
// Note, that the formula for floating point is complicated, but actually works out for us.
// Since the exponent range for a polygon is around 0, and we have 23 (+1) mantissa bits, R just works out to the same range by chance \o/.
polygon_offset_bias *= 0.5f;
}
vkCmdSetDepthBias(*m_current_command_buffer, polygon_offset_bias, 0.f, polygon_offset_scale);
}
else
{
// Zero bias value - disables depth bias
vkCmdSetDepthBias(*m_current_command_buffer, 0.f, 0.f, 0.f);
}
if (m_device->get_depth_bounds_support())
{
f32 bounds_min, bounds_max;
if (rsx::method_registers.depth_bounds_test_enabled())
{
// Update depth bounds min/max
bounds_min = rsx::method_registers.depth_bounds_min();
bounds_max = rsx::method_registers.depth_bounds_max();
}
else
{
// Avoid special case where min=max and depth bounds (incorrectly) fails
bounds_min = std::min(0.f, rsx::method_registers.clip_min());
bounds_max = std::max(1.f, rsx::method_registers.clip_max());
}
if (!m_device->get_unrestricted_depth_range_support())
{
bounds_min = std::clamp(bounds_min, 0.f, 1.f);
bounds_max = std::clamp(bounds_max, 0.f, 1.f);
}
vkCmdSetDepthBounds(*m_current_command_buffer, bounds_min, bounds_max);
}
bind_viewport();
m_current_command_buffer->flags &= ~vk::command_buffer::cb_reload_dynamic_state;
m_graphics_state.clear(rsx::pipeline_state::polygon_offset_state_dirty | rsx::pipeline_state::depth_bounds_state_dirty);
m_frame_stats.setup_time += m_profiler.duration();
}
void VKGSRender::load_texture_env()
{
// Load textures
bool check_for_cyclic_refs = false;
auto check_surface_cache_sampler = [&](auto descriptor, const auto& tex)
{
if (!m_texture_cache.test_if_descriptor_expired(*m_current_command_buffer, m_rtts, descriptor, tex))
{
check_for_cyclic_refs |= descriptor->is_cyclic_reference;
return true;
}
return false;
};
std::lock_guard lock(m_sampler_mutex);
for (u32 textures_ref = current_fp_metadata.referenced_textures_mask, i = 0; textures_ref; textures_ref >>= 1, ++i)
{
if (!(textures_ref & 1))
continue;
if (!fs_sampler_state[i])
fs_sampler_state[i] = std::make_unique<vk::texture_cache::sampled_image_descriptor>();
auto sampler_state = static_cast<vk::texture_cache::sampled_image_descriptor*>(fs_sampler_state[i].get());
const auto& tex = rsx::method_registers.fragment_textures[i];
const auto previous_format_class = fs_sampler_state[i]->format_class;
if (m_samplers_dirty || m_textures_dirty[i] || !check_surface_cache_sampler(sampler_state, tex))
{
if (tex.enabled())
{
check_heap_status(VK_HEAP_CHECK_TEXTURE_UPLOAD_STORAGE);
*sampler_state = m_texture_cache.upload_texture(*m_current_command_buffer, tex, m_rtts);
}
else
{
*sampler_state = {};
}
if (sampler_state->validate())
{
if (sampler_state->is_cyclic_reference)
{
check_for_cyclic_refs |= true;
}
if (!m_textures_dirty[i] && sampler_state->format_class != previous_format_class)
{
// Host details changed but RSX is not aware
m_graphics_state |= rsx::fragment_program_state_dirty;
}
bool replace = !fs_sampler_handles[i];
VkFilter mag_filter;
vk::minification_filter min_filter;
f32 min_lod = 0.f, max_lod = 0.f;
f32 lod_bias = 0.f;
const u32 texture_format = tex.format() & ~(CELL_GCM_TEXTURE_UN | CELL_GCM_TEXTURE_LN);
VkBool32 compare_enabled = VK_FALSE;
VkCompareOp depth_compare_mode = VK_COMPARE_OP_NEVER;
if (texture_format >= CELL_GCM_TEXTURE_DEPTH24_D8 && texture_format <= CELL_GCM_TEXTURE_DEPTH16_FLOAT)
{
compare_enabled = VK_TRUE;
depth_compare_mode = vk::get_compare_func(tex.zfunc(), true);
}
const f32 af_level = vk::max_aniso(tex.max_aniso());
const auto wrap_s = vk::vk_wrap_mode(tex.wrap_s());
const auto wrap_t = vk::vk_wrap_mode(tex.wrap_t());
const auto wrap_r = vk::vk_wrap_mode(tex.wrap_r());
// NOTE: In vulkan, the border color bypasses the swizzling defined in the image view.
// It is a direct texel replacement and must be remapped before attaching to the sampler.
const auto border_color = rsx::is_border_clamped_texture(tex)
? vk::border_color_t(tex.remapped_border_color())
: vk::border_color_t(VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK);
// Check if non-point filtering can even be used on this format
bool can_sample_linear;
if (sampler_state->format_class == RSX_FORMAT_CLASS_COLOR) [[likely]]
{
// Most PS3-like formats can be linearly filtered without problem
can_sample_linear = true;
}
else
{
// Not all GPUs support linear filtering of depth formats
const auto vk_format = sampler_state->image_handle ? sampler_state->image_handle->image()->format() :
vk::get_compatible_sampler_format(m_device->get_formats_support(), sampler_state->external_subresource_desc.gcm_format);
can_sample_linear = m_device->get_format_properties(vk_format).optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT;
}
const auto mipmap_count = tex.get_exact_mipmap_count();
min_filter = vk::get_min_filter(tex.min_filter());
if (can_sample_linear)
{
mag_filter = vk::get_mag_filter(tex.mag_filter());
}
else
{
mag_filter = VK_FILTER_NEAREST;
min_filter.filter = VK_FILTER_NEAREST;
}
if (min_filter.sample_mipmaps && mipmap_count > 1)
{
f32 actual_mipmaps;
if (sampler_state->upload_context == rsx::texture_upload_context::shader_read)
{
actual_mipmaps = static_cast<f32>(mipmap_count);
}
else if (sampler_state->external_subresource_desc.op == rsx::deferred_request_command::mipmap_gather)
{
// Clamp min and max lod
actual_mipmaps = static_cast<f32>(sampler_state->external_subresource_desc.sections_to_copy.size());
}
else
{
actual_mipmaps = 1.f;
}
if (actual_mipmaps > 1.f)
{
min_lod = tex.min_lod();
max_lod = tex.max_lod();
lod_bias = tex.bias();
min_lod = std::min(min_lod, actual_mipmaps - 1.f);
max_lod = std::min(max_lod, actual_mipmaps - 1.f);
if (min_filter.mipmap_mode == VK_SAMPLER_MIPMAP_MODE_NEAREST)
{
// Round to nearest 0.5 to work around some broken games
// Unlike openGL, sampler parameters cannot be dynamically changed on vulkan, leading to many permutations
lod_bias = std::floor(lod_bias * 2.f + 0.5f) * 0.5f;
}
}
else
{
min_lod = max_lod = lod_bias = 0.f;
min_filter.mipmap_mode = VK_SAMPLER_MIPMAP_MODE_NEAREST;
}
}
if (fs_sampler_handles[i] && m_textures_dirty[i])
{
if (!fs_sampler_handles[i]->matches(wrap_s, wrap_t, wrap_r, false, lod_bias, af_level, min_lod, max_lod,
min_filter.filter, mag_filter, min_filter.mipmap_mode, border_color, compare_enabled, depth_compare_mode))
{
replace = true;
}
}
if (replace)
{
fs_sampler_handles[i] = vk::get_resource_manager()->get_sampler(
*m_device,
fs_sampler_handles[i],
wrap_s, wrap_t, wrap_r,
false,
lod_bias, af_level, min_lod, max_lod,
min_filter.filter, mag_filter, min_filter.mipmap_mode,
border_color, compare_enabled, depth_compare_mode);
}
}
m_textures_dirty[i] = false;
}
}
for (u32 textures_ref = current_vp_metadata.referenced_textures_mask, i = 0; textures_ref; textures_ref >>= 1, ++i)
{
if (!(textures_ref & 1))
continue;
if (!vs_sampler_state[i])
vs_sampler_state[i] = std::make_unique<vk::texture_cache::sampled_image_descriptor>();
auto sampler_state = static_cast<vk::texture_cache::sampled_image_descriptor*>(vs_sampler_state[i].get());
const auto& tex = rsx::method_registers.vertex_textures[i];
const auto previous_format_class = sampler_state->format_class;
if (m_samplers_dirty || m_vertex_textures_dirty[i] || !check_surface_cache_sampler(sampler_state, tex))
{
if (rsx::method_registers.vertex_textures[i].enabled())
{
check_heap_status(VK_HEAP_CHECK_TEXTURE_UPLOAD_STORAGE);
*sampler_state = m_texture_cache.upload_texture(*m_current_command_buffer, tex, m_rtts);
}
else
{
*sampler_state = {};
}
if (sampler_state->validate())
{
if (sampler_state->is_cyclic_reference || sampler_state->external_subresource_desc.do_not_cache)
{
check_for_cyclic_refs |= true;
}
if (!m_vertex_textures_dirty[i] && sampler_state->format_class != previous_format_class)
{
// Host details changed but RSX is not aware
m_graphics_state |= rsx::vertex_program_state_dirty;
}
bool replace = !vs_sampler_handles[i];
const VkBool32 unnormalized_coords = !!(tex.format() & CELL_GCM_TEXTURE_UN);
const auto min_lod = tex.min_lod();
const auto max_lod = tex.max_lod();
const auto wrap_s = vk::vk_wrap_mode(tex.wrap_s());
const auto wrap_t = vk::vk_wrap_mode(tex.wrap_t());
// NOTE: In vulkan, the border color bypasses the swizzling defined in the image view.
// It is a direct texel replacement and must be remapped before attaching to the sampler.
const auto border_color = is_border_clamped_texture(tex)
? vk::border_color_t(tex.remapped_border_color())
: vk::border_color_t(VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK);
if (vs_sampler_handles[i])
{
if (!vs_sampler_handles[i]->matches(wrap_s, wrap_t, VK_SAMPLER_ADDRESS_MODE_REPEAT,
unnormalized_coords, 0.f, 1.f, min_lod, max_lod, VK_FILTER_NEAREST, VK_FILTER_NEAREST, VK_SAMPLER_MIPMAP_MODE_NEAREST, border_color))
{
replace = true;
}
}
if (replace)
{
vs_sampler_handles[i] = vk::get_resource_manager()->get_sampler(
*m_device,
vs_sampler_handles[i],
wrap_s, wrap_t, VK_SAMPLER_ADDRESS_MODE_REPEAT,
unnormalized_coords,
0.f, 1.f, min_lod, max_lod,
VK_FILTER_NEAREST, VK_FILTER_NEAREST, VK_SAMPLER_MIPMAP_MODE_NEAREST, border_color);
}
}
m_vertex_textures_dirty[i] = false;
}
}
m_samplers_dirty.store(false);
if (check_for_cyclic_refs)
{
// Regenerate renderpass key
if (const auto key = vk::get_renderpass_key(m_fbo_images, m_current_renderpass_key);
key != m_current_renderpass_key)
{
m_current_renderpass_key = key;
m_cached_renderpass = VK_NULL_HANDLE;
}
}
if (g_cfg.video.vk.asynchronous_texture_streaming)
{
// We have to do this here, because we have to assume the CB will be dumped
auto& async_task_scheduler = g_fxo->get<vk::AsyncTaskScheduler>();
if (async_task_scheduler.is_recording() &&
!async_task_scheduler.is_host_mode())
{
// Sync any async scheduler tasks
if (auto ev = async_task_scheduler.get_primary_sync_label())
{
ev->gpu_wait(*m_current_command_buffer, m_async_compute_dependency_info);
}
}
}
}
bool VKGSRender::bind_texture_env()
{
bool out_of_memory = false;
for (u32 textures_ref = current_fp_metadata.referenced_textures_mask, i = 0; textures_ref; textures_ref >>= 1, ++i)
{
if (!(textures_ref & 1))
continue;
vk::image_view* view = nullptr;
auto sampler_state = static_cast<vk::texture_cache::sampled_image_descriptor*>(fs_sampler_state[i].get());
if (rsx::method_registers.fragment_textures[i].enabled() &&
sampler_state->validate())
{
if (view = sampler_state->image_handle; !view)
{
//Requires update, copy subresource
if (!(view = m_texture_cache.create_temporary_subresource(*m_current_command_buffer, sampler_state->external_subresource_desc)))
{
out_of_memory = true;
}
}
else
{
validate_image_layout_for_read_access(*m_current_command_buffer, view, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, sampler_state);
}
}
if (view) [[likely]]
{
m_program->bind_uniform({ fs_sampler_handles[i]->value, view->value, view->image()->current_layout },
i,
::glsl::program_domain::glsl_fragment_program,
m_current_frame->descriptor_set);
if (current_fragment_program.texture_state.redirected_textures & (1 << i))
{
// Stencil mirror required
auto root_image = static_cast<vk::viewable_image*>(view->image());
auto stencil_view = root_image->get_view(rsx::default_remap_vector, VK_IMAGE_ASPECT_STENCIL_BIT);
if (!m_stencil_mirror_sampler)
{
m_stencil_mirror_sampler = std::make_unique<vk::sampler>(*m_device,
VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER,
VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER,
VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER,
VK_FALSE, 0.f, 1.f, 0.f, 0.f,
VK_FILTER_NEAREST, VK_FILTER_NEAREST, VK_SAMPLER_MIPMAP_MODE_NEAREST,
VK_BORDER_COLOR_INT_OPAQUE_BLACK);
}
m_program->bind_uniform({ m_stencil_mirror_sampler->value, stencil_view->value, stencil_view->image()->current_layout },
i,
::glsl::program_domain::glsl_fragment_program,
m_current_frame->descriptor_set,
true);
}
}
else
{
const VkImageViewType view_type = vk::get_view_type(current_fragment_program.get_texture_dimension(i));
m_program->bind_uniform({ vk::null_sampler(), vk::null_image_view(*m_current_command_buffer, view_type)->value, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL },
i,
::glsl::program_domain::glsl_fragment_program,
m_current_frame->descriptor_set);
if (current_fragment_program.texture_state.redirected_textures & (1 << i))
{
m_program->bind_uniform({ vk::null_sampler(), vk::null_image_view(*m_current_command_buffer, view_type)->value, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL },
i,
::glsl::program_domain::glsl_fragment_program,
m_current_frame->descriptor_set,
true);
}
}
}
for (u32 textures_ref = current_vp_metadata.referenced_textures_mask, i = 0; textures_ref; textures_ref >>= 1, ++i)
{
if (!(textures_ref & 1))
continue;
if (!rsx::method_registers.vertex_textures[i].enabled())
{
const auto view_type = vk::get_view_type(current_vertex_program.get_texture_dimension(i));
m_program->bind_uniform({ vk::null_sampler(), vk::null_image_view(*m_current_command_buffer, view_type)->value, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL },
i,
::glsl::program_domain::glsl_vertex_program,
m_current_frame->descriptor_set);
continue;
}
auto sampler_state = static_cast<vk::texture_cache::sampled_image_descriptor*>(vs_sampler_state[i].get());
auto image_ptr = sampler_state->image_handle;
if (!image_ptr && sampler_state->validate())
{
if (!(image_ptr = m_texture_cache.create_temporary_subresource(*m_current_command_buffer, sampler_state->external_subresource_desc)))
{
out_of_memory = true;
}
}
if (!image_ptr)
{
rsx_log.error("Texture upload failed to vtexture index %d. Binding null sampler.", i);
const auto view_type = vk::get_view_type(current_vertex_program.get_texture_dimension(i));
m_program->bind_uniform({ vk::null_sampler(), vk::null_image_view(*m_current_command_buffer, view_type)->value, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL },
i,
::glsl::program_domain::glsl_vertex_program,
m_current_frame->descriptor_set);
continue;
}
validate_image_layout_for_read_access(*m_current_command_buffer, image_ptr, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, sampler_state);
m_program->bind_uniform({ vs_sampler_handles[i]->value, image_ptr->value, image_ptr->image()->current_layout },
i,
::glsl::program_domain::glsl_vertex_program,
m_current_frame->descriptor_set);
}
return out_of_memory;
}
bool VKGSRender::bind_interpreter_texture_env()
{
if (current_fp_metadata.referenced_textures_mask == 0)
{
// Nothing to do
return false;
}
std::array<VkDescriptorImageInfo, 68> texture_env;
VkDescriptorImageInfo fallback = { vk::null_sampler(), vk::null_image_view(*m_current_command_buffer, VK_IMAGE_VIEW_TYPE_1D)->value, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL };
auto start = texture_env.begin();
auto end = start;
// Fill default values
// 1D
std::advance(end, 16);
std::fill(start, end, fallback);
// 2D
start = end;
fallback.imageView = vk::null_image_view(*m_current_command_buffer, VK_IMAGE_VIEW_TYPE_2D)->value;
std::advance(end, 16);
std::fill(start, end, fallback);
// 3D
start = end;
fallback.imageView = vk::null_image_view(*m_current_command_buffer, VK_IMAGE_VIEW_TYPE_3D)->value;
std::advance(end, 16);
std::fill(start, end, fallback);
// CUBE
start = end;
fallback.imageView = vk::null_image_view(*m_current_command_buffer, VK_IMAGE_VIEW_TYPE_CUBE)->value;
std::advance(end, 16);
std::fill(start, end, fallback);
bool out_of_memory = false;
for (u32 textures_ref = current_fp_metadata.referenced_textures_mask, i = 0; textures_ref; textures_ref >>= 1, ++i)
{
if (!(textures_ref & 1))
continue;
vk::image_view* view = nullptr;
auto sampler_state = static_cast<vk::texture_cache::sampled_image_descriptor*>(fs_sampler_state[i].get());
if (rsx::method_registers.fragment_textures[i].enabled() &&
sampler_state->validate())
{
if (view = sampler_state->image_handle; !view)
{
//Requires update, copy subresource
if (!(view = m_texture_cache.create_temporary_subresource(*m_current_command_buffer, sampler_state->external_subresource_desc)))
{
out_of_memory = true;
}
}
else
{
validate_image_layout_for_read_access(*m_current_command_buffer, view, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, sampler_state);
}
}
if (view)
{
const int offsets[] = { 0, 16, 48, 32 };
auto& sampled_image_info = texture_env[offsets[static_cast<u32>(sampler_state->image_type)] + i];
sampled_image_info = { fs_sampler_handles[i]->value, view->value, view->image()->current_layout };
}
}
m_shader_interpreter.update_fragment_textures(texture_env, m_current_frame->descriptor_set);
return out_of_memory;
}
void VKGSRender::emit_geometry(u32 sub_index)
{
auto &draw_call = rsx::method_registers.current_draw_clause;
m_profiler.start();
const rsx::flags32_t vertex_state_mask = rsx::vertex_base_changed | rsx::vertex_arrays_changed;
const rsx::flags32_t state_flags = (sub_index == 0) ? rsx::vertex_arrays_changed : draw_call.execute_pipeline_dependencies(m_ctx);
if (state_flags & rsx::vertex_arrays_changed)
{
analyse_inputs_interleaved(m_vertex_layout);
}
else if (state_flags & rsx::vertex_base_changed)
{
// Rebase vertex bases instead of
for (auto& info : m_vertex_layout.interleaved_blocks)
{
info->vertex_range.second = 0;
const auto vertex_base_offset = rsx::method_registers.vertex_data_base_offset();
info->real_offset_address = rsx::get_address(rsx::get_vertex_offset_from_base(vertex_base_offset, info->base_offset), info->memory_location);
}
}
else
{
// Discard cached results
for (auto& info : m_vertex_layout.interleaved_blocks)
{
info->vertex_range.second = 0;
}
}
if ((state_flags & vertex_state_mask) && !m_vertex_layout.validate())
{
// No vertex inputs enabled
// Execute remainining pipeline barriers with NOP draw
do
{
draw_call.execute_pipeline_dependencies(m_ctx);
}
while (draw_call.next());
draw_call.end();
return;
}
const auto old_persistent_buffer = m_persistent_attribute_storage ? m_persistent_attribute_storage->value : null_buffer_view->value;
const auto old_volatile_buffer = m_volatile_attribute_storage ? m_volatile_attribute_storage->value : null_buffer_view->value;
// Programs data is dependent on vertex state
auto upload_info = upload_vertex_data();
if (!upload_info.vertex_draw_count)
{
// Malformed vertex setup; abort
return;
}
m_frame_stats.vertex_upload_time += m_profiler.duration();
// Faults are allowed during vertex upload. Ensure consistent CB state after uploads.
// Queries are spawned and closed outside render pass scope for consistency reasons.
if (m_current_command_buffer->flags & vk::command_buffer::cb_load_occluson_task)
{
u32 occlusion_id = m_occlusion_query_manager->allocate_query(*m_current_command_buffer);
if (occlusion_id == umax)
{
// Force flush
rsx_log.warning("[Performance Warning] Out of free occlusion slots. Forcing hard sync.");
ZCULL_control::sync(this);
occlusion_id = m_occlusion_query_manager->allocate_query(*m_current_command_buffer);
if (occlusion_id == umax)
{
//rsx_log.error("Occlusion pool overflow");
if (m_current_task) m_current_task->result = 1;
}
}
// Begin query
m_occlusion_query_manager->begin_query(*m_current_command_buffer, occlusion_id);
auto& data = m_occlusion_map[m_active_query_info->driver_handle];
data.indices.push_back(occlusion_id);
data.set_sync_command_buffer(m_current_command_buffer);
m_current_command_buffer->flags &= ~vk::command_buffer::cb_load_occluson_task;
m_current_command_buffer->flags |= (vk::command_buffer::cb_has_occlusion_task | vk::command_buffer::cb_has_open_query);
}
auto persistent_buffer = m_persistent_attribute_storage ? m_persistent_attribute_storage->value : null_buffer_view->value;
auto volatile_buffer = m_volatile_attribute_storage ? m_volatile_attribute_storage->value : null_buffer_view->value;
bool update_descriptors = false;
const auto& binding_table = m_device->get_pipeline_binding_table();
if (m_current_draw.subdraw_id == 0)
{
update_descriptors = true;
// Allocate stream layout memory for this batch
m_vertex_layout_stream_info.range = rsx::method_registers.current_draw_clause.pass_count() * 128;
m_vertex_layout_stream_info.offset = m_vertex_layout_ring_info.alloc<256>(m_vertex_layout_stream_info.range);
if (vk::test_status_interrupt(vk::heap_changed))
{
if (m_vertex_layout_storage &&
m_vertex_layout_storage->info.buffer != m_vertex_layout_ring_info.heap->value)
{
m_current_frame->buffer_views_to_clean.push_back(std::move(m_vertex_layout_storage));
}
vk::clear_status_interrupt(vk::heap_changed);
}
}
else if (persistent_buffer != old_persistent_buffer || volatile_buffer != old_volatile_buffer)
{
// Need to update descriptors; make a copy for the next draw
VkDescriptorSet previous_set = m_current_frame->descriptor_set.value();
m_current_frame->descriptor_set.flush();
m_current_frame->descriptor_set = allocate_descriptor_set();
rsx::simple_array<VkCopyDescriptorSet> copy_cmds(binding_table.total_descriptor_bindings);
for (u32 n = 0; n < binding_table.total_descriptor_bindings; ++n)
{
copy_cmds[n] =
{
VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET, // sType
nullptr, // pNext
previous_set, // srcSet
n, // srcBinding
0u, // srcArrayElement
m_current_frame->descriptor_set.value(), // dstSet
n, // dstBinding
0u, // dstArrayElement
1u // descriptorCount
};
}
m_current_frame->descriptor_set.push(copy_cmds);
update_descriptors = true;
}
// Update vertex fetch parameters
update_vertex_env(sub_index, upload_info);
ensure(m_vertex_layout_storage);
if (update_descriptors)
{
m_program->bind_uniform(persistent_buffer, binding_table.vertex_buffers_first_bind_slot, m_current_frame->descriptor_set);
m_program->bind_uniform(volatile_buffer, binding_table.vertex_buffers_first_bind_slot + 1, m_current_frame->descriptor_set);
m_program->bind_uniform(m_vertex_layout_storage->value, binding_table.vertex_buffers_first_bind_slot + 2, m_current_frame->descriptor_set);
}
bool reload_state = (!m_current_draw.subdraw_id++);
vk::renderpass_op(*m_current_command_buffer, [&](const vk::command_buffer& cmd, VkRenderPass pass, VkFramebuffer fbo)
{
if (get_render_pass() == pass && m_draw_fbo->value == fbo)
{
// Nothing to do
return;
}
if (pass)
{
// Subpass mismatch, end it before proceeding
vk::end_renderpass(cmd);
}
// Starting a new renderpass should clobber dynamic state
m_current_command_buffer->flags |= vk::command_buffer::cb_reload_dynamic_state;
reload_state = true;
});
if (reload_state)
{
vkCmdBindPipeline(*m_current_command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, m_program->pipeline);
update_draw_state();
begin_render_pass();
if (cond_render_ctrl.hw_cond_active && m_device->get_conditional_render_support())
{
// It is inconvenient that conditional rendering breaks other things like compute dispatch
// TODO: If this is heavy, add refactor the resources into global and add checks around compute dispatch
VkConditionalRenderingBeginInfoEXT info{};
info.sType = VK_STRUCTURE_TYPE_CONDITIONAL_RENDERING_BEGIN_INFO_EXT;
info.buffer = m_cond_render_buffer->value;
m_device->_vkCmdBeginConditionalRenderingEXT(*m_current_command_buffer, &info);
m_current_command_buffer->flags |= vk::command_buffer::cb_has_conditional_render;
}
}
// Bind the new set of descriptors for use with this draw call
m_current_frame->descriptor_set.bind(*m_current_command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, m_program->pipeline_layout);
m_frame_stats.setup_time += m_profiler.duration();
if (!upload_info.index_info)
{
if (draw_call.is_single_draw())
{
vkCmdDraw(*m_current_command_buffer, upload_info.vertex_draw_count, 1, 0, 0);
}
else
{
u32 vertex_offset = 0;
const auto subranges = draw_call.get_subranges();
for (const auto &range : subranges)
{
vkCmdDraw(*m_current_command_buffer, range.count, 1, vertex_offset, 0);
vertex_offset += range.count;
}
}
}
else
{
const VkIndexType index_type = std::get<1>(*upload_info.index_info);
const VkDeviceSize offset = std::get<0>(*upload_info.index_info);
vkCmdBindIndexBuffer(*m_current_command_buffer, m_index_buffer_ring_info.heap->value, offset, index_type);
if (rsx::method_registers.current_draw_clause.is_single_draw())
{
const u32 index_count = upload_info.vertex_draw_count;
vkCmdDrawIndexed(*m_current_command_buffer, index_count, 1, 0, 0, 0);
}
else
{
u32 vertex_offset = 0;
const auto subranges = draw_call.get_subranges();
for (const auto &range : subranges)
{
const auto count = get_index_count(draw_call.primitive, range.count);
vkCmdDrawIndexed(*m_current_command_buffer, count, 1, vertex_offset, 0, 0);
vertex_offset += count;
}
}
}
m_frame_stats.draw_exec_time += m_profiler.duration();
}
void VKGSRender::begin()
{
// Save shader state now before prefetch and loading happens
m_interpreter_state = (m_graphics_state.load() & rsx::pipeline_state::invalidate_pipeline_bits);
rsx::thread::begin();
if (skip_current_frame ||
swapchain_unavailable ||
cond_render_ctrl.disable_rendering())
{
return;
}
init_buffers(rsx::framebuffer_creation_context::context_draw);
if (m_graphics_state & rsx::pipeline_state::invalidate_pipeline_bits)
{
// Shaders need to be reloaded.
m_prev_program = m_program;
m_program = nullptr;
}
}
void VKGSRender::end()
{
if (skip_current_frame || !m_graphics_state.test(rsx::rtt_config_valid) || swapchain_unavailable || cond_render_ctrl.disable_rendering())
{
execute_nop_draw();
rsx::thread::end();
return;
}
m_profiler.start();
// Check for frame resource status here because it is possible for an async flip to happen between begin/end
if (m_current_frame->flags & frame_context_state::dirty) [[unlikely]]
{
check_present_status();
if (m_current_frame->swap_command_buffer) [[unlikely]]
{
// Borrow time by using the auxilliary context
m_aux_frame_context.grab_resources(*m_current_frame);
m_current_frame = &m_aux_frame_context;
}
ensure(!m_current_frame->swap_command_buffer);
m_current_frame->flags &= ~frame_context_state::dirty;
}
if (m_graphics_state & (rsx::pipeline_state::fragment_program_ucode_dirty | rsx::pipeline_state::vertex_program_ucode_dirty))
{
analyse_current_rsx_pipeline();
}
m_frame_stats.setup_time += m_profiler.duration();
load_texture_env();
m_frame_stats.textures_upload_time += m_profiler.duration();
if (!load_program())
{
// Program is not ready, skip drawing this
std::this_thread::yield();
execute_nop_draw();
// m_rtts.on_write(); - breaks games for obvious reasons
rsx::thread::end();
return;
}
// Allocate descriptor set
m_current_frame->descriptor_set = allocate_descriptor_set();
// Load program execution environment
load_program_env();
m_frame_stats.setup_time += m_profiler.duration();
// Apply write memory barriers
if (auto ds = std::get<1>(m_rtts.m_bound_depth_stencil)) ds->write_barrier(*m_current_command_buffer);
for (auto &rtt : m_rtts.m_bound_render_targets)
{
if (auto surface = std::get<1>(rtt))
{
surface->write_barrier(*m_current_command_buffer);
}
}
m_frame_stats.setup_time += m_profiler.duration();
// Now bind the shader resources. It is important that this takes place after the barriers so that we don't end up with stale descriptors
for (int retry = 0; retry < 3; ++retry)
{
if (retry > 0 && m_samplers_dirty) [[ unlikely ]]
{
// Reload texture env if referenced objects were invalidated during OOM handling.
load_texture_env();
// Do not trust fragment/vertex texture state after a texture state reset.
// NOTE: We don't want to change the program - it's too late for that now. We just need to harmonize the state.
m_graphics_state |= rsx::vertex_program_state_dirty | rsx::fragment_program_state_dirty;
get_current_fragment_program(fs_sampler_state);
get_current_vertex_program(vs_sampler_state);
m_graphics_state.clear(rsx::pipeline_state::invalidate_pipeline_bits);
}
const bool out_of_memory = m_shader_interpreter.is_interpreter(m_program)
? bind_interpreter_texture_env()
: bind_texture_env();
if (!out_of_memory)
{
break;
}
// Handle OOM
if (!on_vram_exhausted(rsx::problem_severity::fatal))
{
// It is not possible to free memory. Just use placeholder textures. Can cause graphics glitches but shouldn't crash otherwise
break;
}
}
m_texture_cache.release_uncached_temporary_subresources();
m_frame_stats.textures_upload_time += m_profiler.duration();
// Final heap check...
check_heap_status(VK_HEAP_CHECK_VERTEX_STORAGE | VK_HEAP_CHECK_VERTEX_LAYOUT_STORAGE);
u32 sub_index = 0; // RSX subdraw ID
m_current_draw.subdraw_id = 0; // Host subdraw ID. Invalid RSX subdraws do not increment this value
if (m_graphics_state & rsx::pipeline_state::invalidate_vk_dynamic_state)
{
m_current_command_buffer->flags |= vk::command_buffer::cb_reload_dynamic_state;
}
rsx::method_registers.current_draw_clause.begin();
do
{
emit_geometry(sub_index++);
}
while (rsx::method_registers.current_draw_clause.next());
if (m_current_command_buffer->flags & vk::command_buffer::cb_has_conditional_render)
{
m_device->_vkCmdEndConditionalRenderingEXT(*m_current_command_buffer);
m_current_command_buffer->flags &= ~(vk::command_buffer::cb_has_conditional_render);
}
m_rtts.on_write(m_framebuffer_layout.color_write_enabled, m_framebuffer_layout.zeta_write_enabled);
rsx::thread::end();
}
| 38,505
|
C++
|
.cpp
| 948
| 36.908228
| 194
| 0.706274
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,434
|
VKPipelineCompiler.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKPipelineCompiler.cpp
|
#include "stdafx.h"
#include "VKPipelineCompiler.h"
#include "VKRenderPass.h"
#include "vkutils/device.h"
#include "Utilities/Thread.h"
#include <thread>
#include "util/sysinfo.hpp"
namespace vk
{
// Global list of worker threads
std::unique_ptr<named_thread_group<pipe_compiler>> g_pipe_compilers;
int g_num_pipe_compilers = 0;
atomic_t<int> g_compiler_index{};
pipe_compiler::pipe_compiler()
{
// TODO: Initialize workqueue
}
pipe_compiler::~pipe_compiler()
{
// TODO: Destroy and do cleanup
}
void pipe_compiler::initialize(const vk::render_device* pdev)
{
m_device = pdev;
}
void pipe_compiler::operator()()
{
while (thread_ctrl::state() != thread_state::aborting)
{
for (auto&& job : m_work_queue.pop_all())
{
if (job.is_graphics_job)
{
auto compiled = int_compile_graphics_pipe(job.graphics_data, job.graphics_modules, job.pipe_layout, job.inputs, {});
job.callback_func(compiled);
}
else
{
auto compiled = int_compile_compute_pipe(job.compute_data, job.pipe_layout);
job.callback_func(compiled);
}
}
thread_ctrl::wait_on(m_work_queue);
}
}
std::unique_ptr<glsl::program> pipe_compiler::int_compile_compute_pipe(const VkComputePipelineCreateInfo& create_info, VkPipelineLayout pipe_layout)
{
VkPipeline pipeline;
vkCreateComputePipelines(*g_render_device, nullptr, 1, &create_info, nullptr, &pipeline);
return std::make_unique<vk::glsl::program>(*m_device, pipeline, pipe_layout);
}
std::unique_ptr<glsl::program> pipe_compiler::int_compile_graphics_pipe(const VkGraphicsPipelineCreateInfo& create_info, VkPipelineLayout pipe_layout,
const std::vector<glsl::program_input>& vs_inputs, const std::vector<glsl::program_input>& fs_inputs)
{
VkPipeline pipeline;
CHECK_RESULT(vkCreateGraphicsPipelines(*m_device, nullptr, 1, &create_info, NULL, &pipeline));
auto result = std::make_unique<vk::glsl::program>(*m_device, pipeline, pipe_layout, vs_inputs, fs_inputs);
result->link();
return result;
}
std::unique_ptr<glsl::program> pipe_compiler::int_compile_graphics_pipe(const vk::pipeline_props &create_info, VkShaderModule modules[2], VkPipelineLayout pipe_layout,
const std::vector<glsl::program_input>& vs_inputs, const std::vector<glsl::program_input>& fs_inputs)
{
VkPipelineShaderStageCreateInfo shader_stages[2] = {};
shader_stages[0].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
shader_stages[0].stage = VK_SHADER_STAGE_VERTEX_BIT;
shader_stages[0].module = modules[0];
shader_stages[0].pName = "main";
shader_stages[1].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
shader_stages[1].stage = VK_SHADER_STAGE_FRAGMENT_BIT;
shader_stages[1].module = modules[1];
shader_stages[1].pName = "main";
std::vector<VkDynamicState> dynamic_state_descriptors;
dynamic_state_descriptors.push_back(VK_DYNAMIC_STATE_VIEWPORT);
dynamic_state_descriptors.push_back(VK_DYNAMIC_STATE_SCISSOR);
dynamic_state_descriptors.push_back(VK_DYNAMIC_STATE_LINE_WIDTH);
dynamic_state_descriptors.push_back(VK_DYNAMIC_STATE_BLEND_CONSTANTS);
dynamic_state_descriptors.push_back(VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK);
dynamic_state_descriptors.push_back(VK_DYNAMIC_STATE_STENCIL_WRITE_MASK);
dynamic_state_descriptors.push_back(VK_DYNAMIC_STATE_STENCIL_REFERENCE);
dynamic_state_descriptors.push_back(VK_DYNAMIC_STATE_DEPTH_BIAS);
auto pdss = &create_info.state.ds;
VkPipelineDepthStencilStateCreateInfo ds2;
if (g_render_device->get_depth_bounds_support()) [[likely]]
{
dynamic_state_descriptors.push_back(VK_DYNAMIC_STATE_DEPTH_BOUNDS);
}
else if (pdss->depthBoundsTestEnable)
{
rsx_log.warning("Depth bounds test is enabled in the pipeline object but not supported by the current driver.");
ds2 = *pdss;
pdss = &ds2;
ds2.depthBoundsTestEnable = VK_FALSE;
}
VkPipelineDynamicStateCreateInfo dynamic_state_info = {};
dynamic_state_info.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
dynamic_state_info.pDynamicStates = dynamic_state_descriptors.data();
dynamic_state_info.dynamicStateCount = ::size32(dynamic_state_descriptors);
VkPipelineVertexInputStateCreateInfo vi = { VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO };
VkPipelineViewportStateCreateInfo vp = {};
vp.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
vp.viewportCount = 1;
vp.scissorCount = 1;
auto pmss = &create_info.state.ms;
VkPipelineMultisampleStateCreateInfo ms2;
ensure(pmss->rasterizationSamples == VkSampleCountFlagBits((create_info.renderpass_key >> 16) & 0xF)); // "Multisample state mismatch!"
if (pmss->rasterizationSamples != VK_SAMPLE_COUNT_1_BIT || pmss->sampleShadingEnable) [[unlikely]]
{
ms2 = *pmss;
pmss = &ms2;
if (ms2.rasterizationSamples != VK_SAMPLE_COUNT_1_BIT)
{
// Update the sample mask pointer
ms2.pSampleMask = &create_info.state.temp_storage.msaa_sample_mask;
}
if (g_cfg.video.antialiasing_level == msaa_level::none && ms2.sampleShadingEnable)
{
// Do not compile with MSAA enabled if multisampling is disabled
rsx_log.warning("MSAA is disabled globally but a shader with multi-sampling enabled was submitted for compilation.");
ms2.sampleShadingEnable = VK_FALSE;
}
}
// Rebase pointers from pipeline structure in case it is moved/copied
VkPipelineColorBlendStateCreateInfo cs = create_info.state.cs;
cs.pAttachments = create_info.state.att_state;
VkGraphicsPipelineCreateInfo info = {};
info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
info.pVertexInputState = &vi;
info.pInputAssemblyState = &create_info.state.ia;
info.pRasterizationState = &create_info.state.rs;
info.pColorBlendState = &cs;
info.pMultisampleState = pmss;
info.pViewportState = &vp;
info.pDepthStencilState = pdss;
info.stageCount = 2;
info.pStages = shader_stages;
info.pDynamicState = &dynamic_state_info;
info.layout = pipe_layout;
info.basePipelineIndex = -1;
info.basePipelineHandle = VK_NULL_HANDLE;
info.renderPass = vk::get_renderpass(*m_device, create_info.renderpass_key);
return int_compile_graphics_pipe(info, pipe_layout, vs_inputs, fs_inputs);
}
std::unique_ptr<glsl::program> pipe_compiler::compile(
const VkComputePipelineCreateInfo& create_info,
VkPipelineLayout pipe_layout,
op_flags flags, callback_t callback)
{
if (flags == COMPILE_INLINE)
{
return int_compile_compute_pipe(create_info, pipe_layout);
}
m_work_queue.push(create_info, pipe_layout, callback);
return {};
}
std::unique_ptr<glsl::program> pipe_compiler::compile(
const VkGraphicsPipelineCreateInfo& create_info,
VkPipelineLayout pipe_layout,
op_flags flags, callback_t /*callback*/,
const std::vector<glsl::program_input>& vs_inputs, const std::vector<glsl::program_input>& fs_inputs)
{
// It is very inefficient to defer this as all pointers need to be saved
ensure(flags == COMPILE_INLINE);
return int_compile_graphics_pipe(create_info, pipe_layout, vs_inputs, fs_inputs);
}
std::unique_ptr<glsl::program> pipe_compiler::compile(
const vk::pipeline_props& create_info,
VkShaderModule module_handles[2],
VkPipelineLayout pipe_layout,
op_flags flags, callback_t callback,
const std::vector<glsl::program_input>& vs_inputs, const std::vector<glsl::program_input>& fs_inputs)
{
if (flags == COMPILE_INLINE)
{
return int_compile_graphics_pipe(create_info, module_handles, pipe_layout, vs_inputs, fs_inputs);
}
m_work_queue.push(create_info, pipe_layout, module_handles, vs_inputs, fs_inputs, callback);
return {};
}
void initialize_pipe_compiler(int num_worker_threads)
{
if (num_worker_threads == 0)
{
// Select optimal number of compiler threads
const auto hw_threads = utils::get_thread_count();
if (hw_threads > 12)
{
num_worker_threads = 6;
}
else if (hw_threads > 8)
{
num_worker_threads = 4;
}
else if (hw_threads == 8)
{
num_worker_threads = 2;
}
else
{
num_worker_threads = 1;
}
}
ensure(num_worker_threads >= 1);
ensure(g_render_device); // "Cannot initialize pipe compiler before creating a logical device"
// Create the thread pool
g_pipe_compilers = std::make_unique<named_thread_group<pipe_compiler>>("RSX.W", num_worker_threads);
g_num_pipe_compilers = num_worker_threads;
// Initialize the workers. At least one inline compiler shall exist (doesn't actually run)
for (pipe_compiler& compiler : *g_pipe_compilers.get())
{
compiler.initialize(g_render_device);
}
}
void destroy_pipe_compiler()
{
g_pipe_compilers.reset();
}
pipe_compiler* get_pipe_compiler()
{
ensure(g_pipe_compilers);
int thread_index = g_compiler_index++;
return g_pipe_compilers.get()->begin() + (thread_index % g_num_pipe_compilers);
}
}
| 9,103
|
C++
|
.cpp
| 224
| 36.241071
| 169
| 0.723112
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,435
|
VKTexture.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKTexture.cpp
|
#include "stdafx.h"
#include "VKAsyncScheduler.h"
#include "VKCompute.h"
#include "VKDMA.h"
#include "VKHelpers.h"
#include "VKFormats.h"
#include "VKRenderPass.h"
#include "VKRenderTargets.h"
#include "vkutils/data_heap.h"
#include "vkutils/image_helpers.h"
#include "VKGSRender.h"
#include "../GCM.h"
#include "../rsx_utils.h"
#include "util/asm.hpp"
namespace vk
{
static void gpu_swap_bytes_impl(const vk::command_buffer& cmd, vk::buffer* buf, u32 element_size, u32 data_offset, u32 data_length)
{
if (element_size == 4)
{
vk::get_compute_task<vk::cs_shuffle_32>()->run(cmd, buf, data_length, data_offset);
}
else if (element_size == 2)
{
vk::get_compute_task<vk::cs_shuffle_16>()->run(cmd, buf, data_length, data_offset);
}
else
{
fmt::throw_exception("Unreachable");
}
}
u64 calculate_working_buffer_size(u64 base_size, VkImageAspectFlags aspect)
{
if (aspect & (VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_DEPTH_BIT))
{
return (base_size * 3);
}
else
{
return base_size;
}
}
void copy_image_to_buffer(
const vk::command_buffer& cmd,
const vk::image* src,
const vk::buffer* dst,
const VkBufferImageCopy& region,
const image_readback_options_t& options)
{
// Always validate
ensure(src->current_layout == VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL || src->current_layout == VK_IMAGE_LAYOUT_GENERAL);
if (vk::is_renderpass_open(cmd))
{
vk::end_renderpass(cmd);
}
ensure((region.imageExtent.width + region.imageOffset.x) <= src->width());
ensure((region.imageExtent.height + region.imageOffset.y) <= src->height());
switch (src->format())
{
default:
{
ensure(!options.swap_bytes); // "Implicit byteswap option not supported for speficied format"
vkCmdCopyImageToBuffer(cmd, src->value, src->current_layout, dst->value, 1, ®ion);
if (options.sync_region)
{
// Post-Transfer barrier
vk::insert_buffer_memory_barrier(cmd, dst->value,
options.sync_region.offset, options.sync_region.length,
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT);
}
break;
}
case VK_FORMAT_D32_SFLOAT:
{
rsx_log.error("Unsupported transfer (D16_FLOAT)"); // Need real games to test this.
ensure(region.imageSubresource.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT);
const u32 out_w = region.bufferRowLength ? region.bufferRowLength : region.imageExtent.width;
const u32 out_h = region.bufferImageHeight ? region.bufferImageHeight : region.imageExtent.height;
const u32 packed32_length = out_w * out_h * 4;
const u32 packed16_length = out_w * out_h * 2;
const auto allocation_end = region.bufferOffset + packed32_length + packed16_length;
ensure(dst->size() >= allocation_end);
const auto data_offset = u32(region.bufferOffset);
const auto z32_offset = utils::align<u32>(data_offset + packed16_length, 256);
// 1. Copy the depth to buffer
VkBufferImageCopy region2;
region2 = region;
region2.bufferOffset = z32_offset;
vkCmdCopyImageToBuffer(cmd, src->value, src->current_layout, dst->value, 1, ®ion2);
// 2. Pre-compute barrier
vk::insert_buffer_memory_barrier(cmd, dst->value, z32_offset, packed32_length,
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT);
// 3. Do conversion with byteswap [D32->D16F]
if (!options.swap_bytes) [[likely]]
{
auto job = vk::get_compute_task<vk::cs_fconvert_task<f32, f16>>();
job->run(cmd, dst, z32_offset, packed32_length, data_offset);
}
else
{
auto job = vk::get_compute_task<vk::cs_fconvert_task<f32, f16, false, true>>();
job->run(cmd, dst, z32_offset, packed32_length, data_offset);
}
if (options.sync_region)
{
const u64 sync_end = options.sync_region.offset + options.sync_region.length;
const u64 write_end = region.bufferOffset + packed16_length;
const u64 sync_offset = std::min<u64>(region.bufferOffset, options.sync_region.offset);
const u64 sync_length = std::max<u64>(sync_end, write_end) - sync_offset;
// 4. Post-compute barrier
vk::insert_buffer_memory_barrier(cmd, dst->value, sync_offset, sync_length,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT);
}
break;
}
case VK_FORMAT_D24_UNORM_S8_UINT:
case VK_FORMAT_D32_SFLOAT_S8_UINT:
{
ensure(region.imageSubresource.aspectMask == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT));
const u32 out_w = region.bufferRowLength? region.bufferRowLength : region.imageExtent.width;
const u32 out_h = region.bufferImageHeight? region.bufferImageHeight : region.imageExtent.height;
const u32 packed_length = out_w * out_h * 4;
const u32 in_depth_size = packed_length;
const u32 in_stencil_size = out_w * out_h;
const auto allocation_end = region.bufferOffset + packed_length + in_depth_size + in_stencil_size;
ensure(dst->size() >= allocation_end);
const auto data_offset = u32(region.bufferOffset);
const auto z_offset = utils::align<u32>(data_offset + packed_length, 256);
const auto s_offset = utils::align<u32>(z_offset + in_depth_size, 256);
// 1. Copy the depth and stencil blocks to separate banks
VkBufferImageCopy sub_regions[2];
sub_regions[0] = sub_regions[1] = region;
sub_regions[0].bufferOffset = z_offset;
sub_regions[0].imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
sub_regions[1].bufferOffset = s_offset;
sub_regions[1].imageSubresource.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
vkCmdCopyImageToBuffer(cmd, src->value, src->current_layout, dst->value, 2, sub_regions);
// 2. Interleave the separated data blocks with a compute job
vk::cs_interleave_task *job;
if (!options.swap_bytes) [[likely]]
{
if (src->format() == VK_FORMAT_D24_UNORM_S8_UINT)
{
job = vk::get_compute_task<vk::cs_gather_d24x8<false>>();
}
else if (src->format_class() == RSX_FORMAT_CLASS_DEPTH24_FLOAT_X8_PACK32)
{
job = vk::get_compute_task<vk::cs_gather_d32x8<false, true>>();
}
else
{
job = vk::get_compute_task<vk::cs_gather_d32x8<false>>();
}
}
else
{
if (src->format() == VK_FORMAT_D24_UNORM_S8_UINT)
{
job = vk::get_compute_task<vk::cs_gather_d24x8<true>>();
}
else if (src->format_class() == RSX_FORMAT_CLASS_DEPTH24_FLOAT_X8_PACK32)
{
job = vk::get_compute_task<vk::cs_gather_d32x8<true, true>>();
}
else
{
job = vk::get_compute_task<vk::cs_gather_d32x8<true>>();
}
}
vk::insert_buffer_memory_barrier(cmd, dst->value, z_offset, in_depth_size + in_stencil_size,
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT);
job->run(cmd, dst, data_offset, packed_length, z_offset, s_offset);
if (options.sync_region)
{
const u64 sync_end = options.sync_region.offset + options.sync_region.length;
const u64 write_end = region.bufferOffset + packed_length;
const u64 sync_offset = std::min<u64>(region.bufferOffset, options.sync_region.offset);
const u64 sync_length = std::max<u64>(sync_end, write_end) - sync_offset;
vk::insert_buffer_memory_barrier(cmd, dst->value, sync_offset, sync_length,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT);
}
break;
}
}
}
void copy_buffer_to_image(const vk::command_buffer& cmd, const vk::buffer* src, const vk::image* dst, const VkBufferImageCopy& region)
{
// Always validate
ensure(dst->current_layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL || dst->current_layout == VK_IMAGE_LAYOUT_GENERAL);
if (vk::is_renderpass_open(cmd))
{
vk::end_renderpass(cmd);
}
switch (dst->format())
{
default:
{
vkCmdCopyBufferToImage(cmd, src->value, dst->value, dst->current_layout, 1, ®ion);
break;
}
case VK_FORMAT_D32_SFLOAT:
{
rsx_log.error("Unsupported transfer (D16_FLOAT)");
ensure(region.imageSubresource.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT);
const u32 out_w = region.bufferRowLength ? region.bufferRowLength : region.imageExtent.width;
const u32 out_h = region.bufferImageHeight ? region.bufferImageHeight : region.imageExtent.height;
const u32 packed32_length = out_w * out_h * 4;
const u32 packed16_length = out_w * out_h * 2;
const auto allocation_end = region.bufferOffset + packed32_length + packed16_length;
ensure(src->size() >= allocation_end);
const auto data_offset = u32(region.bufferOffset);
const auto z32_offset = utils::align<u32>(data_offset + packed16_length, 256);
// 1. Pre-compute barrier
vk::insert_buffer_memory_barrier(cmd, src->value, z32_offset, packed32_length,
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT);
// 2. Do conversion with byteswap [D16F->D32F]
auto job = vk::get_compute_task<vk::cs_fconvert_task<f16, f32>>();
job->run(cmd, src, data_offset, packed16_length, z32_offset);
// 4. Post-compute barrier
vk::insert_buffer_memory_barrier(cmd, src->value, z32_offset, packed32_length,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT);
// 5. Copy the depth data to image
VkBufferImageCopy region2 = region;
region2.bufferOffset = z32_offset;
vkCmdCopyBufferToImage(cmd, src->value, dst->value, dst->current_layout, 1, ®ion2);
break;
}
case VK_FORMAT_D24_UNORM_S8_UINT:
case VK_FORMAT_D32_SFLOAT_S8_UINT:
{
const u32 out_w = region.bufferRowLength? region.bufferRowLength : region.imageExtent.width;
const u32 out_h = region.bufferImageHeight? region.bufferImageHeight : region.imageExtent.height;
const u32 packed_length = out_w * out_h * 4;
const u32 in_depth_size = packed_length;
const u32 in_stencil_size = out_w * out_h;
const auto allocation_end = region.bufferOffset + packed_length + in_depth_size + in_stencil_size;
ensure(src->size() >= allocation_end); // "Out of memory (compute heap). Lower your resolution scale setting."
const auto data_offset = u32(region.bufferOffset);
const auto z_offset = utils::align<u32>(data_offset + packed_length, 256);
const auto s_offset = utils::align<u32>(z_offset + in_depth_size, 256);
// Zero out the stencil block
vkCmdFillBuffer(cmd, src->value, s_offset, utils::align(in_stencil_size, 4), 0);
vk::insert_buffer_memory_barrier(cmd, src->value, s_offset, in_stencil_size,
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT);
// 1. Scatter the interleaved data into separate depth and stencil blocks
vk::cs_interleave_task *job;
if (dst->format() == VK_FORMAT_D24_UNORM_S8_UINT)
{
job = vk::get_compute_task<vk::cs_scatter_d24x8>();
}
else if (dst->format_class() == RSX_FORMAT_CLASS_DEPTH24_FLOAT_X8_PACK32)
{
job = vk::get_compute_task<vk::cs_scatter_d32x8<true>>();
}
else
{
job = vk::get_compute_task<vk::cs_scatter_d32x8<false>>();
}
job->run(cmd, src, data_offset, packed_length, z_offset, s_offset);
vk::insert_buffer_memory_barrier(cmd, src->value, z_offset, in_depth_size + in_stencil_size,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT);
// 2. Copy the separated blocks into the target
VkBufferImageCopy sub_regions[2];
sub_regions[0] = sub_regions[1] = region;
sub_regions[0].bufferOffset = z_offset;
sub_regions[0].imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
sub_regions[1].bufferOffset = s_offset;
sub_regions[1].imageSubresource.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
vkCmdCopyBufferToImage(cmd, src->value, dst->value, dst->current_layout, 2, sub_regions);
break;
}
}
}
void copy_image_typeless(const vk::command_buffer& cmd, vk::image* src, vk::image* dst, const areai& src_rect, const areai& dst_rect,
u32 mipmaps, VkImageAspectFlags src_transfer_mask, VkImageAspectFlags dst_transfer_mask)
{
if (src->format() == dst->format())
{
if (src->format_class() == dst->format_class())
{
rsx_log.warning("[Performance warning] Image copy requested incorrectly for matching formats.");
copy_image(cmd, src, dst, src_rect, dst_rect, mipmaps, src_transfer_mask, dst_transfer_mask);
return;
}
else
{
// Should only happen for DEPTH_FLOAT <-> DEPTH_UINT at this time
const u32 mask = src->format_class() | dst->format_class();
if (mask != (RSX_FORMAT_CLASS_DEPTH24_FLOAT_X8_PACK32 | RSX_FORMAT_CLASS_DEPTH24_UNORM_X8_PACK32))
{
rsx_log.error("Unexpected (and possibly incorrect) typeless transfer setup.");
}
}
}
if (vk::is_renderpass_open(cmd))
{
vk::end_renderpass(cmd);
}
if (src != dst) [[likely]]
{
src->push_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
dst->push_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
}
else
{
src->push_layout(cmd, VK_IMAGE_LAYOUT_GENERAL);
}
VkBufferImageCopy src_copy{}, dst_copy{};
src_copy.imageExtent = { u32(src_rect.x2 - src_rect.x1), u32(src_rect.y2 - src_rect.y1), 1 };
src_copy.imageOffset = { src_rect.x1, src_rect.y1, 0 };
src_copy.imageSubresource = { src->aspect() & src_transfer_mask, 0, 0, 1 };
dst_copy.imageExtent = { u32(dst_rect.x2 - dst_rect.x1), u32(dst_rect.y2 - dst_rect.y1), 1 };
dst_copy.imageOffset = { dst_rect.x1, dst_rect.y1, 0 };
dst_copy.imageSubresource = { dst->aspect() & dst_transfer_mask, 0, 0, 1 };
const auto src_texel_size = vk::get_format_texel_width(src->info.format);
const auto src_length = src_texel_size * src_copy.imageExtent.width * src_copy.imageExtent.height;
const auto min_scratch_size = calculate_working_buffer_size(src_length, src->aspect() | dst->aspect());
// Initialize scratch memory
auto scratch_buf = vk::get_scratch_buffer(cmd, min_scratch_size);
for (u32 mip_level = 0; mip_level < mipmaps; ++mip_level)
{
if (mip_level > 0)
{
// Technically never reached as this method only ever processes 1 mip
insert_buffer_memory_barrier(cmd, scratch_buf->value, 0, src_length,
VK_PIPELINE_STAGE_TRANSFER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT, VK_ACCESS_TRANSFER_WRITE_BIT);
}
vk::copy_image_to_buffer(cmd, src, scratch_buf, src_copy);
auto src_convert = get_format_convert_flags(src->info.format);
auto dst_convert = get_format_convert_flags(dst->info.format);
bool require_rw_barrier = true;
if (src_convert.first || dst_convert.first)
{
if (src_convert.first == dst_convert.first &&
src_convert.second == dst_convert.second)
{
// NOP, the two operations will cancel out
}
else
{
insert_buffer_memory_barrier(cmd, scratch_buf->value, 0, src_length,
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT);
vk::cs_shuffle_base *shuffle_kernel = nullptr;
if (src_convert.first && dst_convert.first)
{
shuffle_kernel = vk::get_compute_task<vk::cs_shuffle_32_16>();
}
else
{
const auto block_size = src_convert.first ? src_convert.second : dst_convert.second;
if (block_size == 4)
{
shuffle_kernel = vk::get_compute_task<vk::cs_shuffle_32>();
}
else if (block_size == 2)
{
shuffle_kernel = vk::get_compute_task<vk::cs_shuffle_16>();
}
else
{
fmt::throw_exception("Unreachable");
}
}
shuffle_kernel->run(cmd, scratch_buf, src_length);
insert_buffer_memory_barrier(cmd, scratch_buf->value, 0, src_length,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT);
require_rw_barrier = false;
}
}
if (require_rw_barrier)
{
insert_buffer_memory_barrier(cmd, scratch_buf->value, 0, src_length,
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT);
}
vk::copy_buffer_to_image(cmd, scratch_buf, dst, dst_copy);
src_copy.imageSubresource.mipLevel++;
dst_copy.imageSubresource.mipLevel++;
}
src->pop_layout(cmd);
if (src != dst) dst->pop_layout(cmd);
}
void copy_image(const vk::command_buffer& cmd, vk::image* src, vk::image* dst,
const areai& src_rect, const areai& dst_rect, u32 mipmaps,
VkImageAspectFlags src_transfer_mask, VkImageAspectFlags dst_transfer_mask)
{
// NOTE: src_aspect should match dst_aspect according to spec but some drivers seem to work just fine with the mismatch
if (const u32 aspect_bridge = (src->aspect() | dst->aspect());
(aspect_bridge & VK_IMAGE_ASPECT_COLOR_BIT) == 0 &&
src->format() != dst->format())
{
// Copying between two depth formats must match exactly or crashes will happen
rsx_log.warning("[Performance warning] Image copy was requested incorrectly for mismatched depth formats");
copy_image_typeless(cmd, src, dst, src_rect, dst_rect, mipmaps);
return;
}
VkImageSubresourceLayers a_src = {}, a_dst = {};
a_src.aspectMask = src->aspect() & src_transfer_mask;
a_src.baseArrayLayer = 0;
a_src.layerCount = 1;
a_src.mipLevel = 0;
a_dst = a_src;
a_dst.aspectMask = dst->aspect() & dst_transfer_mask;
VkImageCopy rgn = {};
rgn.extent.depth = 1;
rgn.extent.width = u32(src_rect.x2 - src_rect.x1);
rgn.extent.height = u32(src_rect.y2 - src_rect.y1);
rgn.dstOffset = { dst_rect.x1, dst_rect.y1, 0 };
rgn.srcOffset = { src_rect.x1, src_rect.y1, 0 };
rgn.srcSubresource = a_src;
rgn.dstSubresource = a_dst;
if (vk::is_renderpass_open(cmd))
{
vk::end_renderpass(cmd);
}
if (src != dst)
{
src->push_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
dst->push_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
}
else
{
src->push_layout(cmd, VK_IMAGE_LAYOUT_GENERAL);
}
for (u32 mip_level = 0; mip_level < mipmaps; ++mip_level)
{
vkCmdCopyImage(cmd, src->value, src->current_layout, dst->value, dst->current_layout, 1, &rgn);
rgn.srcSubresource.mipLevel++;
rgn.dstSubresource.mipLevel++;
}
src->pop_layout(cmd);
if (src != dst) dst->pop_layout(cmd);
}
void copy_scaled_image(const vk::command_buffer& cmd,
vk::image* src, vk::image* dst,
const areai& src_rect, const areai& dst_rect, u32 mipmaps,
bool compatible_formats, VkFilter filter)
{
VkImageSubresourceLayers a_src = {}, a_dst = {};
a_src.aspectMask = src->aspect();
a_src.baseArrayLayer = 0;
a_src.layerCount = 1;
a_src.mipLevel = 0;
a_dst = a_src;
if (vk::is_renderpass_open(cmd))
{
vk::end_renderpass(cmd);
}
//TODO: Use an array of offsets/dimensions for mipmapped blits (mipmap count > 1) since subimages will have different dimensions
if (src != dst)
{
src->push_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
dst->push_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
}
else
{
src->push_layout(cmd, VK_IMAGE_LAYOUT_GENERAL);
}
if (compatible_formats && !src_rect.is_flipped() && !dst_rect.is_flipped() &&
src_rect.width() == dst_rect.width() && src_rect.height() == dst_rect.height())
{
VkImageCopy copy_rgn;
copy_rgn.srcOffset = { src_rect.x1, src_rect.y1, 0 };
copy_rgn.dstOffset = { dst_rect.x1, dst_rect.y1, 0 };
copy_rgn.dstSubresource = { dst->aspect(), 0, 0, 1 };
copy_rgn.srcSubresource = { src->aspect(), 0, 0, 1 };
copy_rgn.extent = { static_cast<u32>(src_rect.width()), static_cast<u32>(src_rect.height()), 1 };
vkCmdCopyImage(cmd, src->value, src->current_layout, dst->value, dst->current_layout, 1, ©_rgn);
}
else if ((src->aspect() & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) != 0)
{
//Most depth/stencil formats cannot be scaled using hw blit
if (src->format() != dst->format())
{
// Can happen because of depth float mismatch. Format width should be equal RSX-side
auto typeless = vk::get_typeless_helper(dst->format(), dst->format_class(), src_rect.width(), src_rect.height());
copy_image_typeless(cmd, src, typeless, src_rect, src_rect, mipmaps);
copy_scaled_image(cmd, typeless, dst, src_rect, dst_rect, mipmaps, true, filter);
}
else
{
ensure(!dst_rect.is_flipped());
auto stretch_image_typeless_unsafe = [&cmd, filter](vk::image* src, vk::image* dst, vk::image* typeless,
const areai& src_rect, const areai& dst_rect, VkImageAspectFlags /*aspect*/, VkImageAspectFlags transfer_flags = 0xFF)
{
const auto src_w = src_rect.width();
const auto src_h = src_rect.height();
const auto dst_w = dst_rect.width();
const auto dst_h = dst_rect.height();
// Drivers are not very accepting of aspect COLOR -> aspect DEPTH or aspect STENCIL separately
// However, this works okay for D24S8 (nvidia-only format)
// NOTE: Tranfers of single aspect D/S from Nvidia's D24S8 is very slow
//1. Copy unscaled to typeless surface
copy_image(cmd, src, typeless, src_rect, { 0, 0, src_w, src_h }, 1, transfer_flags, 0xFF);
//2. Blit typeless surface to self
copy_scaled_image(cmd, typeless, typeless, { 0, 0, src_w, src_h }, { 0, src_h, dst_w, (src_h + dst_h) }, 1, true, filter);
//3. Copy back the aspect bits
copy_image(cmd, typeless, dst, {0, src_h, dst_w, (src_h + dst_h) }, dst_rect, 1, 0xFF, transfer_flags);
};
auto stretch_image_typeless_safe = [&cmd, filter](vk::image* src, vk::image* dst, vk::image* typeless,
const areai& src_rect, const areai& dst_rect, VkImageAspectFlags aspect, VkImageAspectFlags transfer_flags = 0xFF)
{
const auto src_w = src_rect.width();
const auto src_h = src_rect.height();
const auto dst_w = dst_rect.width();
const auto dst_h = dst_rect.height();
auto scratch_buf = vk::get_scratch_buffer(cmd, std::max(src_w, dst_w) * std::max(src_h, dst_h) * 4);
//1. Copy unscaled to typeless surface
VkBufferImageCopy info{};
info.imageOffset = { std::min(src_rect.x1, src_rect.x2), std::min(src_rect.y1, src_rect.y2), 0 };
info.imageExtent = { static_cast<u32>(src_w), static_cast<u32>(src_h), 1 };
info.imageSubresource = { aspect & transfer_flags, 0, 0, 1 };
vkCmdCopyImageToBuffer(cmd, src->value, src->current_layout, scratch_buf->value, 1, &info);
insert_buffer_memory_barrier(cmd, scratch_buf->value, 0, VK_WHOLE_SIZE, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT);
info.imageOffset = {};
info.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
vkCmdCopyBufferToImage(cmd, scratch_buf->value, typeless->value, VK_IMAGE_LAYOUT_GENERAL, 1, &info);
//2. Blit typeless surface to self and apply transform if necessary
areai src_rect2 = { 0, 0, src_w, src_h };
if (src_rect.x1 > src_rect.x2) src_rect2.flip_horizontal();
if (src_rect.y1 > src_rect.y2) src_rect2.flip_vertical();
insert_image_memory_barrier(cmd, typeless->value, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL,
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
{ VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 });
copy_scaled_image(cmd, typeless, typeless, src_rect2, { 0, src_h, dst_w, (src_h + dst_h) }, 1, true, filter);
insert_image_memory_barrier(cmd, typeless->value, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL,
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
{ VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 });
//3. Copy back the aspect bits
info.imageExtent = { static_cast<u32>(dst_w), static_cast<u32>(dst_h), 1 };
info.imageOffset = { 0, src_h, 0 };
vkCmdCopyImageToBuffer(cmd, typeless->value, VK_IMAGE_LAYOUT_GENERAL, scratch_buf->value, 1, &info);
insert_buffer_memory_barrier(cmd, scratch_buf->value, 0, VK_WHOLE_SIZE, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT);
info.imageOffset = { dst_rect.x1, dst_rect.y1, 0 };
info.imageSubresource = { aspect & transfer_flags, 0, 0, 1 };
vkCmdCopyBufferToImage(cmd, scratch_buf->value, dst->value, dst->current_layout, 1, &info);
};
const u32 typeless_w = std::max(dst_rect.width(), src_rect.width());
const u32 typeless_h = src_rect.height() + dst_rect.height();
const auto gpu_family = vk::get_chip_family();
const bool use_unsafe_transport = !g_cfg.video.strict_rendering_mode && (gpu_family != chip_class::NV_generic && gpu_family < chip_class::NV_turing);
switch (src->format())
{
case VK_FORMAT_D16_UNORM:
{
auto typeless = vk::get_typeless_helper(VK_FORMAT_R16_UNORM, RSX_FORMAT_CLASS_COLOR, typeless_w, typeless_h);
change_image_layout(cmd, typeless, VK_IMAGE_LAYOUT_GENERAL);
if (use_unsafe_transport)
{
stretch_image_typeless_unsafe(src, dst, typeless, src_rect, dst_rect, VK_IMAGE_ASPECT_DEPTH_BIT);
}
else
{
// Ampere GPUs don't like the direct transfer hack above
stretch_image_typeless_safe(src, dst, typeless, src_rect, dst_rect, VK_IMAGE_ASPECT_DEPTH_BIT);
}
break;
}
case VK_FORMAT_D32_SFLOAT:
{
auto typeless = vk::get_typeless_helper(VK_FORMAT_R32_SFLOAT, RSX_FORMAT_CLASS_COLOR, typeless_w, typeless_h);
change_image_layout(cmd, typeless, VK_IMAGE_LAYOUT_GENERAL);
if (use_unsafe_transport)
{
stretch_image_typeless_unsafe(src, dst, typeless, src_rect, dst_rect, VK_IMAGE_ASPECT_DEPTH_BIT);
}
else
{
stretch_image_typeless_safe(src, dst, typeless, src_rect, dst_rect, VK_IMAGE_ASPECT_DEPTH_BIT);
}
break;
}
case VK_FORMAT_D24_UNORM_S8_UINT:
{
const VkImageAspectFlags depth_stencil = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
if (use_unsafe_transport)
{
auto typeless = vk::get_typeless_helper(VK_FORMAT_B8G8R8A8_UNORM, RSX_FORMAT_CLASS_COLOR, typeless_w, typeless_h);
change_image_layout(cmd, typeless, VK_IMAGE_LAYOUT_GENERAL);
stretch_image_typeless_unsafe(src, dst, typeless, src_rect, dst_rect, depth_stencil);
}
else
{
auto typeless_depth = vk::get_typeless_helper(VK_FORMAT_B8G8R8A8_UNORM, RSX_FORMAT_CLASS_COLOR, typeless_w, typeless_h);
auto typeless_stencil = vk::get_typeless_helper(VK_FORMAT_R8_UNORM, RSX_FORMAT_CLASS_COLOR, typeless_w, typeless_h);
change_image_layout(cmd, typeless_depth, VK_IMAGE_LAYOUT_GENERAL);
change_image_layout(cmd, typeless_stencil, VK_IMAGE_LAYOUT_GENERAL);
stretch_image_typeless_safe(src, dst, typeless_depth, src_rect, dst_rect, depth_stencil, VK_IMAGE_ASPECT_DEPTH_BIT);
stretch_image_typeless_safe(src, dst, typeless_stencil, src_rect, dst_rect, depth_stencil, VK_IMAGE_ASPECT_STENCIL_BIT);
}
break;
}
case VK_FORMAT_D32_SFLOAT_S8_UINT:
{
// NOTE: Typeless transfer (Depth/Stencil->Equivalent Color->Depth/Stencil) of single aspects does not work on AMD when done from a non-depth texture
// Since the typeless transfer itself violates spec, the only way to make it work is to use a D32S8 intermediate
// Copy from src->intermediate then intermediate->dst for each aspect separately
// NOTE: While it may seem intuitive to use R32_SFLOAT as the carrier for the depth aspect, this does not work properly
// Floating point interpolation is non-linear from a bit-by-bit perspective and generates undesirable effects
auto typeless_depth = vk::get_typeless_helper(VK_FORMAT_B8G8R8A8_UNORM, RSX_FORMAT_CLASS_COLOR, typeless_w, typeless_h);
auto typeless_stencil = vk::get_typeless_helper(VK_FORMAT_R8_UNORM, RSX_FORMAT_CLASS_COLOR, typeless_w, typeless_h);
change_image_layout(cmd, typeless_depth, VK_IMAGE_LAYOUT_GENERAL);
change_image_layout(cmd, typeless_stencil, VK_IMAGE_LAYOUT_GENERAL);
const VkImageAspectFlags depth_stencil = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
stretch_image_typeless_safe(src, dst, typeless_depth, src_rect, dst_rect, depth_stencil, VK_IMAGE_ASPECT_DEPTH_BIT);
stretch_image_typeless_safe(src, dst, typeless_stencil, src_rect, dst_rect, depth_stencil, VK_IMAGE_ASPECT_STENCIL_BIT);
break;
}
default:
fmt::throw_exception("Unreachable");
break;
}
}
}
else
{
VkImageBlit rgn = {};
rgn.srcOffsets[0] = { src_rect.x1, src_rect.y1, 0 };
rgn.srcOffsets[1] = { src_rect.x2, src_rect.y2, 1 };
rgn.dstOffsets[0] = { dst_rect.x1, dst_rect.y1, 0 };
rgn.dstOffsets[1] = { dst_rect.x2, dst_rect.y2, 1 };
rgn.dstSubresource = a_dst;
rgn.srcSubresource = a_src;
for (u32 mip_level = 0; mip_level < mipmaps; ++mip_level)
{
vkCmdBlitImage(cmd, src->value, src->current_layout, dst->value, dst->current_layout, 1, &rgn, filter);
rgn.srcSubresource.mipLevel++;
rgn.dstSubresource.mipLevel++;
}
}
src->pop_layout(cmd);
if (src != dst) dst->pop_layout(cmd);
}
template <typename WordType, bool SwapBytes>
cs_deswizzle_base* get_deswizzle_transformation(u32 block_size)
{
switch (block_size)
{
case 4:
return vk::get_compute_task<cs_deswizzle_3d<u32, WordType, SwapBytes>>();
case 8:
return vk::get_compute_task<cs_deswizzle_3d<u64, WordType, SwapBytes>>();
case 16:
return vk::get_compute_task<cs_deswizzle_3d<u128, WordType, SwapBytes>>();
default:
fmt::throw_exception("Unreachable");
}
}
static void gpu_deswizzle_sections_impl(const vk::command_buffer& cmd, vk::buffer* scratch_buf, u32 dst_offset, int word_size, int word_count, bool swap_bytes, std::vector<VkBufferImageCopy>& sections)
{
// NOTE: This has to be done individually for every LOD
vk::cs_deswizzle_base* job = nullptr;
const auto block_size = (word_size * word_count);
ensure(word_size == 4 || word_size == 2);
if (!swap_bytes)
{
if (word_size == 4)
{
job = get_deswizzle_transformation<u32, false>(block_size);
}
else
{
job = get_deswizzle_transformation<u16, false>(block_size);
}
}
else
{
if (word_size == 4)
{
job = get_deswizzle_transformation<u32, true>(block_size);
}
else
{
job = get_deswizzle_transformation<u16, true>(block_size);
}
}
ensure(job);
auto next_layer = sections.front().imageSubresource.baseArrayLayer;
auto next_level = sections.front().imageSubresource.mipLevel;
unsigned base = 0;
unsigned lods = 0;
std::vector<std::pair<unsigned, unsigned>> packets;
for (unsigned i = 0; i < sections.size(); ++i)
{
ensure(sections[i].bufferRowLength);
const auto layer = sections[i].imageSubresource.baseArrayLayer;
const auto level = sections[i].imageSubresource.mipLevel;
if (layer == next_layer &&
level == next_level)
{
next_level++;
lods++;
continue;
}
packets.emplace_back(base, lods);
next_layer = layer;
next_level = 1;
base = i;
lods = 1;
}
if (packets.empty() ||
(packets.back().first + packets.back().second) < sections.size())
{
packets.emplace_back(base, lods);
}
for (const auto &packet : packets)
{
const auto& section = sections[packet.first];
const auto src_offset = section.bufferOffset;
// Align output to 128-byte boundary to keep some drivers happy
dst_offset = utils::align(dst_offset, 128);
u32 data_length = 0;
for (unsigned i = 0, j = packet.first; i < packet.second; ++i, ++j)
{
const u32 packed_size = sections[j].imageExtent.width * sections[j].imageExtent.height * sections[j].imageExtent.depth * block_size;
sections[j].bufferOffset = dst_offset;
dst_offset += packed_size;
data_length += packed_size;
}
const u32 buf_off32 = static_cast<u32>(section.bufferOffset);
const u32 src_off32 = static_cast<u32>(src_offset);
job->run(cmd, scratch_buf, buf_off32, scratch_buf, src_off32, data_length,
section.imageExtent.width, section.imageExtent.height, section.imageExtent.depth, packet.second);
}
ensure(dst_offset <= scratch_buf->size());
}
static const vk::command_buffer& prepare_for_transfer(const vk::command_buffer& primary_cb, vk::image* dst_image, rsx::flags32_t& flags)
{
AsyncTaskScheduler* async_scheduler = (flags & image_upload_options::upload_contents_async)
? std::addressof(g_fxo->get<AsyncTaskScheduler>())
: nullptr;
if (async_scheduler && (dst_image->aspect() & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)))
{
auto pdev = vk::get_current_renderer();
if (pdev->get_graphics_queue_family() != pdev->get_transfer_queue_family()) [[ likely ]]
{
// According to spec, we cannot call vkCopyBufferToImage on a queue that does not support VK_QUEUE_GRAPHICS_BIT (VUID-vkCmdCopyBufferToImage-commandBuffer-07737)
// AMD doesn't care about this, but NVIDIA will crash if you try to cheat.
// We can just disable it for this case - it is actually very rare to upload depth-stencil stuff from CPU and RDB already uses inline uploads
flags &= ~image_upload_options::upload_contents_async;
async_scheduler = nullptr;
}
}
const vk::command_buffer* pcmd = nullptr;
if (async_scheduler)
{
auto async_cmd = async_scheduler->get_current();
async_cmd->begin();
pcmd = async_cmd;
if (!(flags & image_upload_options::preserve_image_layout))
{
flags |= image_upload_options::initialize_image_layout;
}
// Queue transfer stuff. Must release from primary if owned and acquire in secondary.
// Ignore queue transfers when running in the hacky "fast" mode. We're already violating spec there.
if (dst_image->current_layout != VK_IMAGE_LAYOUT_UNDEFINED && async_scheduler->is_host_mode())
{
// Release barrier
dst_image->queue_release(primary_cb, pcmd->get_queue_family(), dst_image->current_layout);
// Acquire barrier. This is not needed if we're going to be changing layouts later anyway (implicit acquire)
if (!(flags & image_upload_options::initialize_image_layout))
{
dst_image->queue_acquire(*pcmd, dst_image->current_layout);
}
}
}
else
{
if (vk::is_renderpass_open(primary_cb))
{
vk::end_renderpass(primary_cb);
}
pcmd = &primary_cb;
}
ensure(pcmd);
if (flags & image_upload_options::initialize_image_layout)
{
dst_image->change_layout(*pcmd, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
}
return *pcmd;
}
static const std::pair<u32, u32> calculate_upload_pitch(int format, u32 heap_align, vk::image* dst_image, const rsx::subresource_layout& layout)
{
u32 block_in_pixel = rsx::get_format_block_size_in_texel(format);
u8 block_size_in_bytes = rsx::get_format_block_size_in_bytes(format);
u32 row_pitch, upload_pitch_in_texel;
if (!heap_align) [[likely]]
{
if (!layout.border) [[likely]]
{
row_pitch = (layout.pitch_in_block * block_size_in_bytes);
}
else
{
// Skip the border texels if possible. Padding is undesirable for GPU deswizzle
row_pitch = (layout.width_in_block * block_size_in_bytes);
}
// We have row_pitch in source coordinates. But some formats have a software decode step which can affect this packing!
// For such formats, the packed pitch on src does not match packed pitch on dst
if (!rsx::is_compressed_host_format(format))
{
const auto host_texel_width = vk::get_format_texel_width(dst_image->format());
const auto host_packed_pitch = host_texel_width * layout.width_in_texel;
row_pitch = std::max<u32>(row_pitch, host_packed_pitch);
upload_pitch_in_texel = row_pitch / host_texel_width;
}
else
{
upload_pitch_in_texel = std::max<u32>(block_in_pixel * row_pitch / block_size_in_bytes, layout.width_in_texel);
}
}
else
{
row_pitch = rsx::align2(layout.width_in_block * block_size_in_bytes, heap_align);
upload_pitch_in_texel = std::max<u32>(block_in_pixel * row_pitch / block_size_in_bytes, layout.width_in_texel);
ensure(row_pitch == heap_align);
}
return { row_pitch, upload_pitch_in_texel };
}
void upload_image(const vk::command_buffer& cmd, vk::image* dst_image,
const std::vector<rsx::subresource_layout>& subresource_layout, int format, bool is_swizzled, u16 layer_count,
VkImageAspectFlags flags, vk::data_heap &upload_heap, u32 heap_align, rsx::flags32_t image_setup_flags)
{
const bool requires_depth_processing = (dst_image->aspect() & VK_IMAGE_ASPECT_STENCIL_BIT) || (format == CELL_GCM_TEXTURE_DEPTH16_FLOAT);
rsx::texture_uploader_capabilities caps{ .alignment = heap_align };
rsx::texture_memory_info opt{};
bool check_caps = true;
vk::buffer* scratch_buf = nullptr;
u32 scratch_offset = 0;
u32 image_linear_size;
vk::buffer* upload_buffer = nullptr;
usz offset_in_upload_buffer = 0;
std::vector<VkBufferImageCopy> copy_regions;
std::vector<VkBufferCopy> buffer_copies;
std::vector<std::pair<VkBuffer, u32>> upload_commands;
copy_regions.reserve(subresource_layout.size());
auto& cmd2 = prepare_for_transfer(cmd, dst_image, image_setup_flags);
for (const rsx::subresource_layout &layout : subresource_layout)
{
const auto [row_pitch, upload_pitch_in_texel] = calculate_upload_pitch(format, heap_align, dst_image, layout);
caps.alignment = row_pitch;
// Calculate estimated memory utilization for this subresource
image_linear_size = row_pitch * layout.height_in_block * layout.depth;
// Only do GPU-side conversion if occupancy is good
if (check_caps)
{
caps.supports_byteswap = (image_linear_size >= 1024) || (image_setup_flags & source_is_gpu_resident);
caps.supports_hw_deswizzle = caps.supports_byteswap;
caps.supports_zero_copy = caps.supports_byteswap;
caps.supports_vtc_decoding = false;
check_caps = false;
}
auto buf_allocator = [&](usz) -> std::tuple<void*, usz>
{
if (image_setup_flags & source_is_gpu_resident)
{
// We should never reach here, unless something is very wrong...
fmt::throw_exception("Cannot allocate CPU memory for GPU-only data");
}
// Map with extra padding bytes in case of realignment
offset_in_upload_buffer = upload_heap.alloc<512>(image_linear_size + 8);
void* mapped_buffer = upload_heap.map(offset_in_upload_buffer, image_linear_size + 8);
return { mapped_buffer, image_linear_size };
};
auto io_buf = rsx::io_buffer(buf_allocator);
opt = upload_texture_subresource(io_buf, layout, format, is_swizzled, caps);
upload_heap.unmap();
if (image_setup_flags & source_is_gpu_resident)
{
// Read from GPU buf if the input is already uploaded.
auto [iobuf, io_offset] = layout.data.raw();
upload_buffer = static_cast<buffer*>(iobuf);
offset_in_upload_buffer = io_offset;
// Never upload. Data is already resident.
opt.require_upload = false;
}
else
{
// Read from upload buffer
upload_buffer = upload_heap.heap.get();
}
copy_regions.push_back({});
auto& copy_info = copy_regions.back();
copy_info.bufferOffset = offset_in_upload_buffer;
copy_info.imageExtent.height = layout.height_in_texel;
copy_info.imageExtent.width = layout.width_in_texel;
copy_info.imageExtent.depth = layout.depth;
copy_info.imageSubresource.aspectMask = flags;
copy_info.imageSubresource.layerCount = 1;
copy_info.imageSubresource.baseArrayLayer = layout.layer;
copy_info.imageSubresource.mipLevel = layout.level;
copy_info.bufferRowLength = upload_pitch_in_texel;
if (opt.require_upload)
{
ensure(!opt.deferred_cmds.empty());
auto base_addr = static_cast<const char*>(opt.deferred_cmds.front().src);
auto end_addr = static_cast<const char*>(opt.deferred_cmds.back().src) + opt.deferred_cmds.back().length;
auto data_length = static_cast<u32>(end_addr - base_addr);
u64 src_address = 0;
if (uptr(base_addr) > uptr(vm::g_sudo_addr))
{
src_address = uptr(base_addr) - uptr(vm::g_sudo_addr);
}
else
{
src_address = uptr(base_addr) - uptr(vm::g_base_addr);
}
auto dma_mapping = vk::map_dma(static_cast<u32>(src_address), static_cast<u32>(data_length));
ensure(dma_mapping.second->size() >= (dma_mapping.first + data_length));
vk::load_dma(::narrow<u32>(src_address), data_length);
upload_buffer = dma_mapping.second;
offset_in_upload_buffer = dma_mapping.first;
copy_info.bufferOffset = offset_in_upload_buffer;
}
else if (!layout.layer && !layout.level)
{
// Do not allow mixed transfer modes.
// This can happen in special cases, e.g mipN having different processing than mip0 as is the case with the last VTC mip
caps.supports_zero_copy = false;
}
if (opt.require_swap || opt.require_deswizzle || requires_depth_processing)
{
if (!scratch_buf)
{
// Calculate enough scratch memory. We need 2x the size of layer 0 to fit all the mip levels and an extra 128 bytes per level as alignment overhead.
const u64 layer_size = (image_linear_size + image_linear_size);
u64 scratch_buf_size = 128u * ::size32(subresource_layout) + (layer_size * layer_count);
if (opt.require_deswizzle)
{
// Double the memory if hw deswizzle is going to be used.
// For GPU deswizzle, the memory is not transformed in-place, rather the decoded texture is placed at the end of the uploaded data.
scratch_buf_size += scratch_buf_size;
}
if (requires_depth_processing)
{
// D-S aspect requires a load section that can fit a separated block => D(4) + S(1)
// Due to reverse processing of inputs, only enough space to fit one layer is needed here.
scratch_buf_size += (image_linear_size * 5) / 4;
}
scratch_buf = vk::get_scratch_buffer(cmd2, scratch_buf_size);
buffer_copies.reserve(subresource_layout.size());
}
if (layout.level == 0)
{
// Align mip0 on a 128-byte boundary
scratch_offset = utils::align(scratch_offset, 128);
}
// Copy from upload heap to scratch mem
if (opt.require_upload)
{
for (const auto& copy_cmd : opt.deferred_cmds)
{
buffer_copies.push_back({});
auto& copy = buffer_copies.back();
copy.srcOffset = uptr(copy_cmd.dst) + offset_in_upload_buffer;
copy.dstOffset = scratch_offset;
copy.size = copy_cmd.length;
}
}
else if (upload_buffer != scratch_buf || offset_in_upload_buffer != scratch_offset)
{
buffer_copies.push_back({});
auto& copy = buffer_copies.back();
copy.srcOffset = offset_in_upload_buffer;
copy.dstOffset = scratch_offset;
copy.size = image_linear_size;
}
// Point data source to scratch mem
copy_info.bufferOffset = scratch_offset;
scratch_offset += image_linear_size;
ensure((scratch_offset + image_linear_size) <= scratch_buf->size()); // "Out of scratch memory"
}
if (opt.require_upload)
{
if (upload_commands.empty() || upload_buffer->value != upload_commands.back().first)
{
upload_commands.emplace_back(upload_buffer->value, 1);
}
else
{
upload_commands.back().second++;
}
copy_info.bufferRowLength = upload_pitch_in_texel;
}
}
ensure(upload_buffer);
if (opt.require_swap || opt.require_deswizzle || requires_depth_processing)
{
ensure(scratch_buf);
if (upload_commands.size() > 1)
{
auto range_ptr = buffer_copies.data();
for (const auto& op : upload_commands)
{
vkCmdCopyBuffer(cmd2, op.first, scratch_buf->value, op.second, range_ptr);
range_ptr += op.second;
}
}
else if (!buffer_copies.empty())
{
vkCmdCopyBuffer(cmd2, upload_buffer->value, scratch_buf->value, static_cast<u32>(buffer_copies.size()), buffer_copies.data());
}
insert_buffer_memory_barrier(cmd2, scratch_buf->value, 0, scratch_offset, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT);
}
// Swap and deswizzle if requested
if (opt.require_deswizzle)
{
gpu_deswizzle_sections_impl(cmd2, scratch_buf, scratch_offset, opt.element_size, opt.block_length, opt.require_swap, copy_regions);
}
else if (opt.require_swap)
{
gpu_swap_bytes_impl(cmd2, scratch_buf, opt.element_size, 0, scratch_offset);
}
// CopyBufferToImage routines
if (requires_depth_processing)
{
// Upload in reverse to avoid polluting data in lower space
for (auto rIt = copy_regions.crbegin(); rIt != copy_regions.crend(); ++rIt)
{
vk::copy_buffer_to_image(cmd2, scratch_buf, dst_image, *rIt);
}
}
else if (scratch_buf)
{
ensure(opt.require_deswizzle || opt.require_swap);
const auto block_start = copy_regions.front().bufferOffset;
insert_buffer_memory_barrier(cmd2, scratch_buf->value, block_start, scratch_offset, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT);
vkCmdCopyBufferToImage(cmd2, scratch_buf->value, dst_image->value, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, static_cast<u32>(copy_regions.size()), copy_regions.data());
}
else if (upload_commands.size() > 1)
{
auto region_ptr = copy_regions.data();
for (const auto& op : upload_commands)
{
vkCmdCopyBufferToImage(cmd2, op.first, dst_image->value, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, op.second, region_ptr);
region_ptr += op.second;
}
}
else
{
vkCmdCopyBufferToImage(cmd2, upload_buffer->value, dst_image->value, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, static_cast<u32>(copy_regions.size()), copy_regions.data());
}
if (cmd2.get_queue_family() != cmd.get_queue_family())
{
// Release from async chain, the primary chain will acquire later
dst_image->queue_release(cmd2, cmd.get_queue_family(), dst_image->current_layout);
}
if (auto rsxthr = static_cast<VKGSRender*>(rsx::get_current_renderer()))
{
rsxthr->on_guest_texture_read(cmd2);
}
}
std::pair<buffer*, u32> detile_memory_block(const vk::command_buffer& cmd, const rsx::GCM_tile_reference& tiled_region,
const utils::address_range& range, u16 width, u16 height, u8 bpp)
{
// Calculate the true length of the usable memory section
const auto available_tile_size = tiled_region.tile->size - (range.start - tiled_region.base_address);
const auto max_content_size = tiled_region.tile->pitch * utils::align<u32>(height, 64);
const auto section_length = std::min(max_content_size, available_tile_size);
// Sync the DMA layer
const auto dma_mapping = vk::map_dma(range.start, section_length);
vk::load_dma(range.start, section_length);
// Allocate scratch and prepare for the GPU job
const auto scratch_buf = vk::get_scratch_buffer(cmd, section_length * 3); // 0 = linear data, 1 = padding (deswz), 2 = tiled data
const auto tiled_data_scratch_offset = section_length * 2;
const auto linear_data_scratch_offset = 0u;
// Schedule the job
const RSX_detiler_config config =
{
.tile_base_address = tiled_region.base_address,
.tile_base_offset = range.start - tiled_region.base_address,
.tile_rw_offset = range.start - tiled_region.base_address, // TODO
.tile_size = tiled_region.tile->size,
.tile_pitch = tiled_region.tile->pitch,
.bank = tiled_region.tile->bank,
.dst = scratch_buf,
.dst_offset = linear_data_scratch_offset,
.src = scratch_buf,
.src_offset = section_length * 2,
.image_width = width,
.image_height = height,
.image_pitch = static_cast<u32>(width) * bpp,
.image_bpp = bpp
};
// Transfer
VkBufferCopy copy_rgn
{
.srcOffset = dma_mapping.first,
.dstOffset = tiled_data_scratch_offset,
.size = section_length
};
vkCmdCopyBuffer(cmd, dma_mapping.second->value, scratch_buf->value, 1, ©_rgn);
// Barrier
vk::insert_buffer_memory_barrier(
cmd, scratch_buf->value, linear_data_scratch_offset, section_length,
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT);
// Detile
vk::get_compute_task<vk::cs_tile_memcpy<RSX_detiler_op::decode>>()->run(cmd, config);
// Barrier
vk::insert_buffer_memory_barrier(
cmd, scratch_buf->value, linear_data_scratch_offset, static_cast<u32>(width) * height * bpp,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_TRANSFER_READ_BIT);
// Return a descriptor pointing to the decrypted data
return { scratch_buf, linear_data_scratch_offset };
}
void blitter::scale_image(vk::command_buffer& cmd, vk::image* src, vk::image* dst, areai src_area, areai dst_area, bool interpolate, const rsx::typeless_xfer& xfer_info)
{
vk::image* real_src = src;
vk::image* real_dst = dst;
if (dst->current_layout == VK_IMAGE_LAYOUT_UNDEFINED)
{
// Watch out for lazy init
ensure(src != dst);
dst->change_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
}
// Optimization pass; check for pass-through data transfer
if (!xfer_info.flip_horizontal && !xfer_info.flip_vertical && src_area.height() == dst_area.height())
{
auto src_w = src_area.width();
auto dst_w = dst_area.width();
if (xfer_info.src_is_typeless) src_w = static_cast<int>(src_w * xfer_info.src_scaling_hint);
if (xfer_info.dst_is_typeless) dst_w = static_cast<int>(dst_w * xfer_info.dst_scaling_hint);
if (src_w == dst_w)
{
// Final dimensions are a match
if (xfer_info.src_is_typeless || xfer_info.dst_is_typeless)
{
vk::copy_image_typeless(cmd, src, dst, src_area, dst_area, 1);
}
else
{
copy_image(cmd, src, dst, src_area, dst_area, 1);
}
return;
}
}
if (xfer_info.src_is_typeless)
{
const auto format = xfer_info.src_native_format_override ?
VkFormat(xfer_info.src_native_format_override) :
vk::get_compatible_sampler_format(vk::get_current_renderer()->get_formats_support(), xfer_info.src_gcm_format);
if (format != src->format())
{
// Normalize input region (memory optimization)
const auto old_src_area = src_area;
src_area.y2 -= src_area.y1;
src_area.y1 = 0;
src_area.x2 = static_cast<int>(src_area.width() * xfer_info.src_scaling_hint);
src_area.x1 = 0;
// Transfer bits from src to typeless src
real_src = vk::get_typeless_helper(format, rsx::classify_format(xfer_info.src_gcm_format), src_area.width(), src_area.height());
real_src->change_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
vk::copy_image_typeless(cmd, src, real_src, old_src_area, src_area, 1);
}
}
// Save output region descriptor
const auto old_dst_area = dst_area;
if (xfer_info.dst_is_typeless)
{
const auto format = xfer_info.dst_native_format_override ?
VkFormat(xfer_info.dst_native_format_override) :
vk::get_compatible_sampler_format(vk::get_current_renderer()->get_formats_support(), xfer_info.dst_gcm_format);
if (format != dst->format())
{
// Normalize output region (memory optimization)
dst_area.y2 -= dst_area.y1;
dst_area.y1 = 0;
dst_area.x2 = static_cast<int>(dst_area.width() * xfer_info.dst_scaling_hint);
dst_area.x1 = 0;
// Account for possibility where SRC is typeless and DST is typeless and both map to the same format
auto required_height = dst_area.height();
if (real_src != src && real_src->format() == format)
{
required_height += src_area.height();
// Move the dst area just below the src area
dst_area.y1 += src_area.y2;
dst_area.y2 += src_area.y2;
}
real_dst = vk::get_typeless_helper(format, rsx::classify_format(xfer_info.dst_gcm_format), dst_area.width(), required_height);
}
}
// Prepare typeless resources for the operation if needed
if (real_src != src)
{
const auto layout = ((real_src == real_dst) ? VK_IMAGE_LAYOUT_GENERAL : VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
real_src->change_layout(cmd, layout);
}
if (real_dst != dst && real_dst != real_src)
{
real_dst->change_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
}
// Checks
if (src_area.x2 <= src_area.x1 || src_area.y2 <= src_area.y1 || dst_area.x2 <= dst_area.x1 || dst_area.y2 <= dst_area.y1)
{
rsx_log.error("Blit request consists of an empty region descriptor!");
return;
}
if (src_area.x1 < 0 || src_area.x2 > static_cast<s32>(real_src->width()) || src_area.y1 < 0 || src_area.y2 > static_cast<s32>(real_src->height()))
{
rsx_log.error("Blit request denied because the source region does not fit!");
return;
}
if (dst_area.x1 < 0 || dst_area.x2 > static_cast<s32>(real_dst->width()) || dst_area.y1 < 0 || dst_area.y2 > static_cast<s32>(real_dst->height()))
{
rsx_log.error("Blit request denied because the destination region does not fit!");
return;
}
if (xfer_info.flip_horizontal)
{
src_area.flip_horizontal();
}
if (xfer_info.flip_vertical)
{
src_area.flip_vertical();
}
ensure(real_src->aspect() == real_dst->aspect()); // "Incompatible source and destination format!"
copy_scaled_image(cmd, real_src, real_dst, src_area, dst_area, 1,
formats_are_bitcast_compatible(real_src, real_dst),
interpolate ? VK_FILTER_LINEAR : VK_FILTER_NEAREST);
if (real_dst != dst)
{
vk::copy_image_typeless(cmd, real_dst, dst, dst_area, old_dst_area, 1);
}
}
}
| 54,017
|
C++
|
.cpp
| 1,243
| 39.097345
| 202
| 0.692079
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,436
|
VKResourceManager.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKResourceManager.cpp
|
#include "stdafx.h"
#include "VKResourceManager.h"
#include "VKGSRender.h"
#include "VKCommandStream.h"
namespace vk
{
struct vmm_memory_stats
{
std::unordered_map<uptr, vmm_allocation_t> allocations;
std::unordered_map<uptr, atomic_t<u64>> memory_usage;
std::unordered_map<vmm_allocation_pool, atomic_t<u64>> pool_usage;
void clear()
{
if (!allocations.empty())
{
rsx_log.error("Leaking memory allocations!");
for (auto& leak : allocations)
{
rsx_log.error("Memory handle 0x%llx (%llu bytes) allocated from pool %d was not freed.",
leak.first, leak.second.size, static_cast<int>(leak.second.pool));
}
}
allocations.clear();
memory_usage.clear();
pool_usage.clear();
}
}
g_vmm_stats;
resource_manager g_resource_manager;
atomic_t<u64> g_event_ctr;
atomic_t<u64> g_last_completed_event;
constexpr u64 s_vmm_warn_threshold_size = 2000 * 0x100000; // Warn if allocation on a single heap exceeds this value
resource_manager* get_resource_manager()
{
return &g_resource_manager;
}
garbage_collector* get_gc()
{
return &g_resource_manager;
}
void resource_manager::trim()
{
// For any managed resources, try to keep the number of unused/idle resources as low as possible.
// Improves search times as well as keeping us below the hardware limit.
const auto limits = get_current_renderer()->gpu().get_limits();
const auto allocated_sampler_count = vmm_get_application_pool_usage(VMM_ALLOCATION_POOL_SAMPLER);
const auto max_allowed_samplers = std::min((limits.maxSamplerAllocationCount * 3u) / 4u, 2048u);
if (allocated_sampler_count > max_allowed_samplers)
{
ensure(max_allowed_samplers);
rsx_log.warning("Trimming allocated samplers. Allocated = %u, Max = %u", allocated_sampler_count, limits.maxSamplerAllocationCount);
auto filter_expr = [](const cached_sampler_object_t& sampler)
{
// Pick only where we have no ref
return !sampler.has_refs();
};
for (auto& object : m_sampler_pool.collect(filter_expr))
{
dispose(object);
}
}
}
u64 get_event_id()
{
return g_event_ctr++;
}
u64 current_event_id()
{
return g_event_ctr.load();
}
u64 last_completed_event_id()
{
return g_last_completed_event.load();
}
void on_event_completed(u64 event_id, bool flush)
{
if (!flush && g_cfg.video.multithreaded_rsx)
{
auto& offloader_thread = g_fxo->get<rsx::dma_manager>();
ensure(!offloader_thread.is_current_thread());
offloader_thread.backend_ctrl(rctrl_run_gc, reinterpret_cast<void*>(event_id));
return;
}
g_resource_manager.eid_completed(event_id);
g_last_completed_event = std::max(event_id, g_last_completed_event.load());
}
void print_debug_markers()
{
for (const auto marker : g_resource_manager.gather_debug_markers())
{
marker->dump();
}
}
static constexpr f32 size_in_GiB(u64 size)
{
return size / (1024.f * 1024.f * 1024.f);
}
void vmm_notify_memory_allocated(void* handle, u32 memory_type, u64 memory_size, vmm_allocation_pool pool)
{
auto key = reinterpret_cast<uptr>(handle);
const vmm_allocation_t info = { memory_size, memory_type, pool };
if (const auto ins = g_vmm_stats.allocations.insert_or_assign(key, info);
!ins.second)
{
rsx_log.error("Duplicate vmm entry with memory handle 0x%llx", key);
}
g_vmm_stats.pool_usage[pool] += memory_size;
auto& vmm_size = g_vmm_stats.memory_usage[memory_type];
vmm_size += memory_size;
if (vmm_size > s_vmm_warn_threshold_size && (vmm_size - memory_size) <= s_vmm_warn_threshold_size)
{
rsx_log.warning("Memory type 0x%x has allocated more than %04.2fG. Currently allocated %04.2fG",
memory_type, size_in_GiB(s_vmm_warn_threshold_size), size_in_GiB(vmm_size));
}
}
void vmm_notify_memory_freed(void* handle)
{
auto key = reinterpret_cast<uptr>(handle);
if (auto found = g_vmm_stats.allocations.find(key);
found != g_vmm_stats.allocations.end())
{
const auto& info = found->second;
g_vmm_stats.memory_usage[info.type_index] -= info.size;
g_vmm_stats.pool_usage[info.pool] -= info.size;
g_vmm_stats.allocations.erase(found);
}
}
void vmm_reset()
{
g_vmm_stats.clear();
g_event_ctr = 0;
g_last_completed_event = 0;
}
u64 vmm_get_application_memory_usage(const memory_type_info& memory_type)
{
u64 result = 0;
for (const auto& memory_type_index : memory_type)
{
auto it = g_vmm_stats.memory_usage.find(memory_type_index);
if (it == g_vmm_stats.memory_usage.end())
{
continue;
}
result += it->second.observe();
}
return result;
}
u64 vmm_get_application_pool_usage(vmm_allocation_pool pool)
{
return g_vmm_stats.pool_usage[pool];
}
rsx::problem_severity vmm_determine_memory_load_severity()
{
const auto vmm_load = get_current_mem_allocator()->get_memory_usage();
rsx::problem_severity load_severity = rsx::problem_severity::low;
// Fragmentation tuning
if (vmm_load < 50.f)
{
get_current_mem_allocator()->set_fastest_allocation_flags();
}
else if (vmm_load > 75.f)
{
// Avoid fragmentation if we can
get_current_mem_allocator()->set_safest_allocation_flags();
if (vmm_load > 95.f)
{
// Drivers will often crash long before returning OUT_OF_DEVICE_MEMORY errors.
load_severity = rsx::problem_severity::fatal;
}
else if (vmm_load > 90.f)
{
load_severity = rsx::problem_severity::severe;
}
else
{
load_severity = rsx::problem_severity::moderate;
}
// Query actual usage for comparison. Maybe we just have really fragmented memory...
const auto mem_info = get_current_renderer()->get_memory_mapping();
const auto local_memory_usage = vmm_get_application_memory_usage(mem_info.device_local);
constexpr u64 _1M = 0x100000;
const auto res_scale = rsx::get_resolution_scale();
const auto mem_threshold_1 = static_cast<u64>(256 * res_scale * res_scale) * _1M;
const auto mem_threshold_2 = static_cast<u64>(64 * res_scale * res_scale) * _1M;
if (local_memory_usage < (mem_info.device_local_total_bytes / 2) || // Less than 50% VRAM usage OR
(mem_info.device_local_total_bytes - local_memory_usage) > mem_threshold_1) // Enough to hold all required resources left
{
// Lower severity to avoid slowing performance too much
load_severity = rsx::problem_severity::low;
}
else if ((mem_info.device_local_total_bytes - local_memory_usage) > mem_threshold_2) // Enough to hold basic resources like textures, buffers, etc
{
// At least 512MB left, do not overreact
load_severity = rsx::problem_severity::moderate;
}
if (load_severity >= rsx::problem_severity::moderate)
{
// NOTE: For some reason fmt::format with a sized float followed by percentage sign causes random crashing.
// This is a bug unrelated to this, but explains why we're going with integral percentages here.
const auto application_memory_load = (local_memory_usage * 100) / mem_info.device_local_total_bytes;
rsx_log.warning("Actual device memory used by internal allocations is %lluM (%llu%%)", local_memory_usage / 0x100000, application_memory_load);
rsx_log.warning("Video memory usage is at %d%%. Will attempt to reclaim some resources.", static_cast<int>(vmm_load));
}
}
return load_severity;
}
bool vmm_handle_memory_pressure(rsx::problem_severity severity)
{
if (auto vkthr = dynamic_cast<VKGSRender*>(rsx::get_current_renderer()))
{
return vkthr->on_vram_exhausted(severity);
}
return false;
}
void vmm_check_memory_usage()
{
if (const auto load_severity = vmm_determine_memory_load_severity();
load_severity >= rsx::problem_severity::moderate)
{
vmm_handle_memory_pressure(load_severity);
}
}
void vmm_notify_object_allocated(vmm_allocation_pool pool)
{
ensure(pool >= VMM_ALLOCATION_POOL_SAMPLER);
g_vmm_stats.pool_usage[pool]++;
}
void vmm_notify_object_freed(vmm_allocation_pool pool)
{
ensure(pool >= VMM_ALLOCATION_POOL_SAMPLER);
g_vmm_stats.pool_usage[pool]--;
}
}
| 8,036
|
C++
|
.cpp
| 233
| 31.145923
| 150
| 0.705412
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,437
|
VKFramebuffer.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKFramebuffer.cpp
|
#include "stdafx.h"
#include "VKFramebuffer.h"
#include "vkutils/image.h"
#include "vkutils/image_helpers.h"
#include <unordered_map>
namespace vk
{
std::unordered_map<u64, std::vector<std::unique_ptr<vk::framebuffer_holder>>> g_framebuffers_cache;
union framebuffer_storage_key
{
u64 encoded;
struct
{
u64 width : 16; // Width of FBO
u64 height : 16; // Height of FBO
u64 ia_ref : 1; // Input attachment references?
};
framebuffer_storage_key(u16 width_, u16 height_, VkBool32 has_input_attachments)
: width(width_), height(height_), ia_ref(has_input_attachments)
{}
};
vk::framebuffer_holder* get_framebuffer(VkDevice dev, u16 width, u16 height, VkBool32 has_input_attachments, VkRenderPass renderpass, const std::vector<vk::image*>& image_list)
{
framebuffer_storage_key key(width, height, has_input_attachments);
auto &queue = g_framebuffers_cache[key.encoded];
for (auto &fbo : queue)
{
if (fbo->matches(image_list, width, height))
{
return fbo.get();
}
}
std::vector<std::unique_ptr<vk::image_view>> image_views;
image_views.reserve(image_list.size());
for (auto &e : image_list)
{
const VkImageSubresourceRange subres = { e->aspect(), 0, 1, 0, 1 };
image_views.push_back(std::make_unique<vk::image_view>(dev, e, VK_IMAGE_VIEW_TYPE_2D, vk::default_component_map, subres));
}
auto value = std::make_unique<vk::framebuffer_holder>(dev, renderpass, width, height, std::move(image_views));
auto ret = value.get();
queue.push_back(std::move(value));
return ret;
}
vk::framebuffer_holder* get_framebuffer(VkDevice dev, u16 width, u16 height, VkBool32 has_input_attachments, VkRenderPass renderpass, VkFormat format, VkImage attachment)
{
framebuffer_storage_key key(width, height, has_input_attachments);
auto &queue = g_framebuffers_cache[key.encoded];
for (const auto &e : queue)
{
if (e->attachments[0]->info.image == attachment)
{
return e.get();
}
}
VkImageSubresourceRange range = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 };
std::vector<std::unique_ptr<vk::image_view>> views;
views.push_back(std::make_unique<vk::image_view>(dev, attachment, VK_IMAGE_VIEW_TYPE_2D, format, vk::default_component_map, range));
auto value = std::make_unique<vk::framebuffer_holder>(dev, renderpass, width, height, std::move(views));
auto ret = value.get();
queue.push_back(std::move(value));
return ret;
}
void remove_unused_framebuffers()
{
// Remove stale framebuffers. Ref counted to prevent use-after-free
for (auto It = g_framebuffers_cache.begin(); It != g_framebuffers_cache.end();)
{
It->second.erase(
std::remove_if(It->second.begin(), It->second.end(), [](const auto& fbo)
{
return (fbo->unused_check_count() >= 2);
}),
It->second.end()
);
if (It->second.empty())
{
It = g_framebuffers_cache.erase(It);
}
else
{
++It;
}
}
}
void clear_framebuffer_cache()
{
g_framebuffers_cache.clear();
}
}
| 3,003
|
C++
|
.cpp
| 90
| 30.066667
| 177
| 0.69167
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
5,438
|
VKVertexBuffers.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKVertexBuffers.cpp
|
#include "stdafx.h"
#include "VKGSRender.h"
#include "../Common/BufferUtils.h"
#include "../rsx_methods.h"
#include "vkutils/buffer_object.h"
#include <span>
namespace vk
{
std::pair<VkPrimitiveTopology, bool> get_appropriate_topology(rsx::primitive_type mode)
{
switch (mode)
{
case rsx::primitive_type::lines:
return { VK_PRIMITIVE_TOPOLOGY_LINE_LIST, false };
case rsx::primitive_type::line_loop:
return { VK_PRIMITIVE_TOPOLOGY_LINE_STRIP, true };
case rsx::primitive_type::line_strip:
return { VK_PRIMITIVE_TOPOLOGY_LINE_STRIP, false };
case rsx::primitive_type::points:
return { VK_PRIMITIVE_TOPOLOGY_POINT_LIST, false };
case rsx::primitive_type::triangles:
return { VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, false };
case rsx::primitive_type::triangle_strip:
case rsx::primitive_type::quad_strip:
return { VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, false };
case rsx::primitive_type::triangle_fan:
#ifndef __APPLE__
return { VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN, false };
#endif
case rsx::primitive_type::quads:
case rsx::primitive_type::polygon:
return { VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, true };
default:
fmt::throw_exception("Unsupported primitive topology 0x%x", static_cast<u8>(mode));
}
}
bool is_primitive_native(rsx::primitive_type mode)
{
return !get_appropriate_topology(mode).second;
}
VkIndexType get_index_type(rsx::index_array_type type)
{
switch (type)
{
case rsx::index_array_type::u32:
return VK_INDEX_TYPE_UINT32;
case rsx::index_array_type::u16:
return VK_INDEX_TYPE_UINT16;
}
fmt::throw_exception("Invalid index array type (%u)", static_cast<u8>(type));
}
}
namespace
{
std::tuple<u32, std::tuple<VkDeviceSize, VkIndexType>> generate_emulating_index_buffer(
const rsx::draw_clause& clause, u32 vertex_count,
vk::data_heap& m_index_buffer_ring_info)
{
u32 index_count = get_index_count(clause.primitive, vertex_count);
u32 upload_size = index_count * sizeof(u16);
VkDeviceSize offset_in_index_buffer = m_index_buffer_ring_info.alloc<256>(upload_size);
void* buf = m_index_buffer_ring_info.map(offset_in_index_buffer, upload_size);
g_fxo->get<rsx::dma_manager>().emulate_as_indexed(buf, clause.primitive, vertex_count);
m_index_buffer_ring_info.unmap();
return std::make_tuple(
index_count, std::make_tuple(offset_in_index_buffer, VK_INDEX_TYPE_UINT16));
}
struct vertex_input_state
{
VkPrimitiveTopology native_primitive_type;
bool index_rebase;
u32 min_index;
u32 max_index;
u32 vertex_draw_count;
u32 vertex_index_offset;
std::optional<std::tuple<VkDeviceSize, VkIndexType>> index_info;
};
struct draw_command_visitor
{
draw_command_visitor(vk::data_heap& index_buffer_ring_info, rsx::vertex_input_layout& layout)
: m_index_buffer_ring_info(index_buffer_ring_info)
, m_vertex_layout(layout)
{
}
vertex_input_state operator()(const rsx::draw_array_command& /*command*/)
{
const auto [prims, primitives_emulated] = vk::get_appropriate_topology(rsx::method_registers.current_draw_clause.primitive);
const u32 vertex_count = rsx::method_registers.current_draw_clause.get_elements_count();
const u32 min_index = rsx::method_registers.current_draw_clause.min_index();
const u32 max_index = (min_index + vertex_count) - 1;
if (primitives_emulated)
{
u32 index_count;
std::optional<std::tuple<VkDeviceSize, VkIndexType>> index_info;
std::tie(index_count, index_info) =
generate_emulating_index_buffer(rsx::method_registers.current_draw_clause,
vertex_count, m_index_buffer_ring_info);
return{ prims, false, min_index, max_index, index_count, 0, index_info };
}
return{ prims, false, min_index, max_index, vertex_count, 0, {} };
}
vertex_input_state operator()(const rsx::draw_indexed_array_command& command)
{
auto primitive = rsx::method_registers.current_draw_clause.primitive;
const auto [prims, primitives_emulated] = vk::get_appropriate_topology(primitive);
const bool emulate_restart = rsx::method_registers.restart_index_enabled() && vk::emulate_primitive_restart(primitive);
rsx::index_array_type index_type = rsx::method_registers.current_draw_clause.is_immediate_draw ?
rsx::index_array_type::u32 :
rsx::method_registers.index_type();
u32 type_size = get_index_type_size(index_type);
u32 index_count = rsx::method_registers.current_draw_clause.get_elements_count();
if (primitives_emulated)
index_count = get_index_count(rsx::method_registers.current_draw_clause.primitive, index_count);
u32 upload_size = index_count * type_size;
if (emulate_restart) upload_size *= 2;
VkDeviceSize offset_in_index_buffer = m_index_buffer_ring_info.alloc<64>(upload_size);
void* buf = m_index_buffer_ring_info.map(offset_in_index_buffer, upload_size);
std::span<std::byte> dst;
stx::single_ptr<std::byte[]> tmp;
if (emulate_restart)
{
tmp = stx::make_single<std::byte[], false, 64>(upload_size);
dst = std::span<std::byte>(tmp.get(), upload_size);
}
else
{
dst = std::span<std::byte>(static_cast<std::byte*>(buf), upload_size);
}
/**
* Upload index (and expands it if primitive type is not natively supported).
*/
u32 min_index, max_index;
std::tie(min_index, max_index, index_count) = write_index_array_data_to_buffer(
dst,
command.raw_index_buffer, index_type,
rsx::method_registers.current_draw_clause.primitive,
rsx::method_registers.restart_index_enabled(),
rsx::method_registers.restart_index(),
[](auto prim) { return !vk::is_primitive_native(prim); });
if (min_index >= max_index)
{
//empty set, do not draw
m_index_buffer_ring_info.unmap();
return{ prims, false, 0, 0, 0, 0, {} };
}
if (emulate_restart)
{
if (index_type == rsx::index_array_type::u16)
{
index_count = rsx::remove_restart_index(static_cast<u16*>(buf), reinterpret_cast<u16*>(tmp.get()), index_count, u16{umax});
}
else
{
index_count = rsx::remove_restart_index(static_cast<u32*>(buf), reinterpret_cast<u32*>(tmp.get()), index_count, u32{umax});
}
}
m_index_buffer_ring_info.unmap();
std::optional<std::tuple<VkDeviceSize, VkIndexType>> index_info =
std::make_tuple(offset_in_index_buffer, vk::get_index_type(index_type));
const auto index_offset = rsx::method_registers.vertex_data_base_index();
return {prims, true, min_index, max_index, index_count, index_offset, index_info};
}
vertex_input_state operator()(const rsx::draw_inlined_array& /*command*/)
{
auto &draw_clause = rsx::method_registers.current_draw_clause;
const auto [prims, primitives_emulated] = vk::get_appropriate_topology(draw_clause.primitive);
const auto stream_length = rsx::method_registers.current_draw_clause.inline_vertex_array.size();
const u32 vertex_count = u32(stream_length * sizeof(u32)) / m_vertex_layout.interleaved_blocks[0]->attribute_stride;
if (!primitives_emulated)
{
return{ prims, false, 0, vertex_count - 1, vertex_count, 0, {} };
}
u32 index_count;
std::optional<std::tuple<VkDeviceSize, VkIndexType>> index_info;
std::tie(index_count, index_info) = generate_emulating_index_buffer(draw_clause, vertex_count, m_index_buffer_ring_info);
return{ prims, false, 0, vertex_count - 1, index_count, 0, index_info };
}
private:
vk::data_heap& m_index_buffer_ring_info;
rsx::vertex_input_layout& m_vertex_layout;
};
}
vk::vertex_upload_info VKGSRender::upload_vertex_data()
{
draw_command_visitor visitor(m_index_buffer_ring_info, m_vertex_layout);
auto result = std::visit(visitor, get_draw_command(rsx::method_registers));
const u32 vertex_count = (result.max_index - result.min_index) + 1;
u32 vertex_base = result.min_index;
u32 index_base = 0;
if (result.index_rebase)
{
vertex_base = rsx::get_index_from_base(vertex_base, rsx::method_registers.vertex_data_base_index());
index_base = result.min_index;
}
//Do actual vertex upload
auto required = calculate_memory_requirements(m_vertex_layout, vertex_base, vertex_count);
u32 persistent_range_base = -1, volatile_range_base = -1;
usz persistent_offset = -1, volatile_offset = -1;
if (required.first > 0)
{
//Check if cacheable
//Only data in the 'persistent' block may be cached
//TODO: make vertex cache keep local data beyond frame boundaries and hook notify command
bool in_cache = false;
bool to_store = false;
u32 storage_address = -1;
m_frame_stats.vertex_cache_request_count++;
if (m_vertex_layout.interleaved_blocks.size() == 1 &&
rsx::method_registers.current_draw_clause.command != rsx::draw_command::inlined_array)
{
const auto data_offset = (vertex_base * m_vertex_layout.interleaved_blocks[0]->attribute_stride);
storage_address = m_vertex_layout.interleaved_blocks[0]->real_offset_address + data_offset;
if (auto cached = m_vertex_cache->find_vertex_range(storage_address, required.first))
{
ensure(cached->local_address == storage_address);
in_cache = true;
persistent_range_base = cached->offset_in_heap;
}
else
{
to_store = true;
}
}
if (!in_cache)
{
m_frame_stats.vertex_cache_miss_count++;
persistent_offset = static_cast<u32>(m_attrib_ring_info.alloc<256>(required.first));
persistent_range_base = static_cast<u32>(persistent_offset);
if (to_store)
{
//store ref in vertex cache
m_vertex_cache->store_range(storage_address, required.first, static_cast<u32>(persistent_offset));
}
}
}
if (required.second > 0)
{
volatile_offset = static_cast<u32>(m_attrib_ring_info.alloc<256>(required.second));
volatile_range_base = static_cast<u32>(volatile_offset);
}
//Write all the data once if possible
if (required.first && required.second && volatile_offset > persistent_offset)
{
//Do this once for both to save time on map/unmap cycles
const usz block_end = (volatile_offset + required.second);
const usz block_size = block_end - persistent_offset;
const usz volatile_offset_in_block = volatile_offset - persistent_offset;
void *block_mapping = m_attrib_ring_info.map(persistent_offset, block_size);
write_vertex_data_to_memory(m_vertex_layout, vertex_base, vertex_count, block_mapping, static_cast<char*>(block_mapping) + volatile_offset_in_block);
m_attrib_ring_info.unmap();
}
else
{
if (required.first > 0 && persistent_offset != umax)
{
void *persistent_mapping = m_attrib_ring_info.map(persistent_offset, required.first);
write_vertex_data_to_memory(m_vertex_layout, vertex_base, vertex_count, persistent_mapping, nullptr);
m_attrib_ring_info.unmap();
}
if (required.second > 0)
{
void *volatile_mapping = m_attrib_ring_info.map(volatile_offset, required.second);
write_vertex_data_to_memory(m_vertex_layout, vertex_base, vertex_count, nullptr, volatile_mapping);
m_attrib_ring_info.unmap();
}
}
if (vk::test_status_interrupt(vk::heap_changed))
{
// Check for validity
if (m_persistent_attribute_storage &&
m_persistent_attribute_storage->info.buffer != m_attrib_ring_info.heap->value)
{
m_current_frame->buffer_views_to_clean.push_back(std::move(m_persistent_attribute_storage));
}
if (m_volatile_attribute_storage &&
m_volatile_attribute_storage->info.buffer != m_attrib_ring_info.heap->value)
{
m_current_frame->buffer_views_to_clean.push_back(std::move(m_volatile_attribute_storage));
}
vk::clear_status_interrupt(vk::heap_changed);
}
if (persistent_range_base != umax)
{
if (!m_persistent_attribute_storage || !m_persistent_attribute_storage->in_range(persistent_range_base, required.first, persistent_range_base))
{
ensure(m_texbuffer_view_size >= required.first); // "Incompatible driver (MacOS?)"
if (m_persistent_attribute_storage)
m_current_frame->buffer_views_to_clean.push_back(std::move(m_persistent_attribute_storage));
//View 64M blocks at a time (different drivers will only allow a fixed viewable heap size, 64M should be safe)
const usz view_size = (persistent_range_base + m_texbuffer_view_size) > m_attrib_ring_info.size() ? m_attrib_ring_info.size() - persistent_range_base : m_texbuffer_view_size;
m_persistent_attribute_storage = std::make_unique<vk::buffer_view>(*m_device, m_attrib_ring_info.heap->value, VK_FORMAT_R8_UINT, persistent_range_base, view_size);
persistent_range_base = 0;
}
}
if (volatile_range_base != umax)
{
if (!m_volatile_attribute_storage || !m_volatile_attribute_storage->in_range(volatile_range_base, required.second, volatile_range_base))
{
ensure(m_texbuffer_view_size >= required.second); // "Incompatible driver (MacOS?)"
if (m_volatile_attribute_storage)
m_current_frame->buffer_views_to_clean.push_back(std::move(m_volatile_attribute_storage));
const usz view_size = (volatile_range_base + m_texbuffer_view_size) > m_attrib_ring_info.size() ? m_attrib_ring_info.size() - volatile_range_base : m_texbuffer_view_size;
m_volatile_attribute_storage = std::make_unique<vk::buffer_view>(*m_device, m_attrib_ring_info.heap->value, VK_FORMAT_R8_UINT, volatile_range_base, view_size);
volatile_range_base = 0;
}
}
return{ result.native_primitive_type, // Primitive
result.vertex_draw_count, // Vertex count
vertex_count, // Allocated vertex count
vertex_base, // First vertex in stream
index_base, // Index of vertex at data location 0
result.vertex_index_offset, // Index offset
persistent_range_base, volatile_range_base, // Binding range
result.index_info }; // Index buffer info
}
| 13,742
|
C++
|
.cpp
| 315
| 40.104762
| 177
| 0.711176
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,439
|
VKAsyncScheduler.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKAsyncScheduler.cpp
|
#include "VKAsyncScheduler.h"
#include "VKHelpers.h"
#include "VKResourceManager.h"
#include "Emu/IdManager.h"
#include "Utilities/lockless.h"
#include "Utilities/mutex.h"
#include <vector>
namespace vk
{
AsyncTaskScheduler::AsyncTaskScheduler(vk_gpu_scheduler_mode mode, const VkDependencyInfoKHR& queue_dependency)
{
if (g_cfg.video.renderer != video_renderer::vulkan || !g_cfg.video.vk.asynchronous_texture_streaming)
{
// Invalid renderer combination, do not proceed. This should never happen.
// NOTE: If managed by fxo, this object may be created automatically on boot.
rsx_log.notice("Vulkan async streaming is disabled. This thread will now exit.");
return;
}
init_config_options(mode, queue_dependency);
}
AsyncTaskScheduler::~AsyncTaskScheduler()
{
if (!m_async_command_queue.empty())
{
// Driver resources should be destroyed before driver is detached or you get crashes. RAII won't save you here.
rsx_log.error("Async task scheduler resources were not freed correctly!");
}
}
void AsyncTaskScheduler::init_config_options(vk_gpu_scheduler_mode mode, const VkDependencyInfoKHR& queue_dependency)
{
std::lock_guard lock(m_config_mutex);
if (std::exchange(m_options_initialized, true))
{
// Nothing to do
return;
}
m_use_host_scheduler = (mode == vk_gpu_scheduler_mode::safe) || g_cfg.video.strict_rendering_mode;
rsx_log.notice("Asynchronous task scheduler is active running in %s mode", m_use_host_scheduler? "'Safe'" : "'Fast'");
m_dependency_info = queue_dependency;
}
void AsyncTaskScheduler::delayed_init()
{
ensure(m_options_initialized);
auto pdev = get_current_renderer();
m_command_pool.create(*const_cast<render_device*>(pdev), pdev->get_transfer_queue_family());
if (m_use_host_scheduler)
{
for (usz i = 0; i < events_pool_size; ++i)
{
auto sema = std::make_unique<semaphore>(*pdev);
m_semaphore_pool.emplace_back(std::move(sema));
}
return;
}
for (usz i = 0; i < events_pool_size; ++i)
{
auto ev = std::make_unique<vk::event>(*pdev, sync_domain::gpu);
m_events_pool.emplace_back(std::move(ev));
}
}
void AsyncTaskScheduler::insert_sync_event()
{
ensure(m_current_cb);
auto& sync_label = m_events_pool[m_next_event_id++ % events_pool_size];
sync_label->reset();
sync_label->signal(*m_current_cb, m_dependency_info);
m_sync_label = sync_label.get();
}
command_buffer* AsyncTaskScheduler::get_current()
{
std::lock_guard lock(m_submit_mutex);
m_sync_required = true;
// 0. Anything still active?
if (m_current_cb)
{
return m_current_cb;
}
// 1. Check if there is a 'next' entry
if (m_async_command_queue.empty())
{
delayed_init();
}
else if (m_next_cb_index < m_async_command_queue.size())
{
m_current_cb = &m_async_command_queue[m_next_cb_index];
}
// 2. Create entry
if (!m_current_cb)
{
if (m_next_cb_index == VK_MAX_ASYNC_COMPUTE_QUEUES)
{
m_next_cb_index = 0;
m_current_cb = &m_async_command_queue[m_next_cb_index];
}
else
{
m_async_command_queue.emplace_back();
m_current_cb = &m_async_command_queue.back();
m_current_cb->create(m_command_pool);
}
}
m_next_cb_index++;
return m_current_cb;
}
event* AsyncTaskScheduler::get_primary_sync_label()
{
ensure(!m_use_host_scheduler);
if (m_sync_required) [[unlikely]]
{
std::lock_guard lock(m_submit_mutex); // For some reason this is inexplicably expensive. WTF!
ensure(m_current_cb);
insert_sync_event();
m_sync_required = false;
}
return std::exchange(m_sync_label, nullptr);
}
semaphore* AsyncTaskScheduler::get_sema()
{
if (m_semaphore_pool.empty())
{
delayed_init();
ensure(!m_semaphore_pool.empty());
}
const u32 sema_id = static_cast<u32>(m_next_semaphore_id++ % m_semaphore_pool.size());
return m_semaphore_pool[sema_id].get();
}
void AsyncTaskScheduler::flush(queue_submit_t& submit_info, VkBool32 force_flush)
{
if (!m_current_cb)
{
return;
}
submit_info.queue = get_current_renderer()->get_transfer_queue();
std::lock_guard lock(m_submit_mutex);
if (m_sync_required && !m_use_host_scheduler)
{
insert_sync_event();
}
m_current_cb->end();
m_current_cb->submit(submit_info, force_flush);
m_submit_count++;
m_last_used_cb = m_current_cb;
m_current_cb = nullptr;
m_sync_required = false;
}
void AsyncTaskScheduler::destroy()
{
for (auto& cb : m_async_command_queue)
{
cb.destroy();
}
m_async_command_queue.clear();
m_next_cb_index = 0;
m_command_pool.destroy();
m_events_pool.clear();
m_semaphore_pool.clear();
}
}
| 4,826
|
C++
|
.cpp
| 158
| 26.151899
| 121
| 0.671664
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,440
|
VKProgramPipeline.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKProgramPipeline.cpp
|
#include "stdafx.h"
#include "VKProgramPipeline.h"
#include "vkutils/descriptors.h"
#include "vkutils/device.h"
#include "../Program/SPIRVCommon.h"
namespace vk
{
namespace glsl
{
using namespace ::glsl;
void shader::create(::glsl::program_domain domain, const std::string& source)
{
type = domain;
m_source = source;
}
VkShaderModule shader::compile()
{
ensure(m_handle == VK_NULL_HANDLE);
if (!spirv::compile_glsl_to_spv(m_compiled, m_source, type, ::glsl::glsl_rules_vulkan))
{
const std::string shader_type = type == ::glsl::program_domain::glsl_vertex_program ? "vertex" :
type == ::glsl::program_domain::glsl_fragment_program ? "fragment" : "compute";
rsx_log.notice("%s", m_source);
fmt::throw_exception("Failed to compile %s shader", shader_type);
}
VkShaderModuleCreateInfo vs_info;
vs_info.codeSize = m_compiled.size() * sizeof(u32);
vs_info.pNext = nullptr;
vs_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
vs_info.pCode = m_compiled.data();
vs_info.flags = 0;
vkCreateShaderModule(*g_render_device, &vs_info, nullptr, &m_handle);
return m_handle;
}
void shader::destroy()
{
m_source.clear();
m_compiled.clear();
if (m_handle)
{
vkDestroyShaderModule(*g_render_device, m_handle, nullptr);
m_handle = nullptr;
}
}
const std::string& shader::get_source() const
{
return m_source;
}
const std::vector<u32> shader::get_compiled() const
{
return m_compiled;
}
VkShaderModule shader::get_handle() const
{
return m_handle;
}
void program::create_impl()
{
linked = false;
attribute_location_mask = 0;
vertex_attributes_mask = 0;
fs_texture_bindings.fill(~0u);
fs_texture_mirror_bindings.fill(~0u);
vs_texture_bindings.fill(~0u);
}
program::program(VkDevice dev, VkPipeline p, VkPipelineLayout layout, const std::vector<program_input> &vertex_input, const std::vector<program_input>& fragment_inputs)
: m_device(dev), pipeline(p), pipeline_layout(layout)
{
create_impl();
load_uniforms(vertex_input);
load_uniforms(fragment_inputs);
}
program::program(VkDevice dev, VkPipeline p, VkPipelineLayout layout)
: m_device(dev), pipeline(p), pipeline_layout(layout)
{
create_impl();
}
program::~program()
{
vkDestroyPipeline(m_device, pipeline, nullptr);
}
program& program::load_uniforms(const std::vector<program_input>& inputs)
{
ensure(!linked); // "Cannot change uniforms in already linked program!"
for (auto &item : inputs)
{
uniforms[item.type].push_back(item);
}
return *this;
}
program& program::link()
{
// Preprocess texture bindings
// Link step is only useful for rasterizer programs, compute programs do not need this
for (const auto &uniform : uniforms[program_input_type::input_type_texture])
{
if (const auto name_start = uniform.name.find("tex"); name_start != umax)
{
const auto name_end = uniform.name.find("_stencil");
const auto index_start = name_start + 3; // Skip 'tex' part
const auto index_length = (name_end != umax) ? name_end - index_start : name_end;
const auto index_part = uniform.name.substr(index_start, index_length);
const auto index = std::stoi(index_part);
if (name_start == 0)
{
// Fragment texture (tex...)
if (name_end == umax)
{
// Normal texture
fs_texture_bindings[index] = uniform.location;
}
else
{
// Stencil mirror
fs_texture_mirror_bindings[index] = uniform.location;
}
}
else
{
// Vertex texture (vtex...)
vs_texture_bindings[index] = uniform.location;
}
}
}
linked = true;
return *this;
}
bool program::has_uniform(program_input_type type, const std::string& uniform_name)
{
const auto& uniform = uniforms[type];
return std::any_of(uniform.cbegin(), uniform.cend(), [&uniform_name](const auto& u)
{
return u.name == uniform_name;
});
}
void program::bind_uniform(const VkDescriptorImageInfo &image_descriptor, const std::string& uniform_name, VkDescriptorType type, vk::descriptor_set &set)
{
for (const auto &uniform : uniforms[program_input_type::input_type_texture])
{
if (uniform.name == uniform_name)
{
set.push(image_descriptor, type, uniform.location);
attribute_location_mask |= (1ull << uniform.location);
return;
}
}
rsx_log.notice("texture not found in program: %s", uniform_name.c_str());
}
void program::bind_uniform(const VkDescriptorImageInfo & image_descriptor, int texture_unit, ::glsl::program_domain domain, vk::descriptor_set &set, bool is_stencil_mirror)
{
ensure(domain != ::glsl::program_domain::glsl_compute_program);
u32 binding;
if (domain == ::glsl::program_domain::glsl_fragment_program)
{
binding = (is_stencil_mirror) ? fs_texture_mirror_bindings[texture_unit] : fs_texture_bindings[texture_unit];
}
else
{
binding = vs_texture_bindings[texture_unit];
}
if (binding != ~0u)
{
set.push(image_descriptor, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, binding);
attribute_location_mask |= (1ull << binding);
return;
}
rsx_log.notice("texture not found in program: %stex%u", (domain == ::glsl::program_domain::glsl_vertex_program)? "v" : "", texture_unit);
}
void program::bind_uniform(const VkDescriptorBufferInfo &buffer_descriptor, u32 binding_point, vk::descriptor_set &set)
{
bind_buffer(buffer_descriptor, binding_point, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, set);
}
void program::bind_uniform(const VkBufferView &buffer_view, u32 binding_point, vk::descriptor_set &set)
{
set.push(buffer_view, VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, binding_point);
attribute_location_mask |= (1ull << binding_point);
}
void program::bind_uniform(const VkBufferView &buffer_view, program_input_type type, const std::string &binding_name, vk::descriptor_set &set)
{
for (const auto &uniform : uniforms[type])
{
if (uniform.name == binding_name)
{
bind_uniform(buffer_view, uniform.location, set);
return;
}
}
rsx_log.notice("vertex buffer not found in program: %s", binding_name.c_str());
}
void program::bind_buffer(const VkDescriptorBufferInfo &buffer_descriptor, u32 binding_point, VkDescriptorType type, vk::descriptor_set &set)
{
set.push(buffer_descriptor, type, binding_point);
attribute_location_mask |= (1ull << binding_point);
}
}
}
| 6,566
|
C++
|
.cpp
| 196
| 29.260204
| 174
| 0.682615
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,441
|
VKDMA.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKDMA.cpp
|
#include "stdafx.h"
#include "VKResourceManager.h"
#include "VKDMA.h"
#include "vkutils/device.h"
#include "Emu/Memory/vm.h"
#include "Emu/RSX/RSXThread.h"
#include "Utilities/mutex.h"
#include "util/asm.hpp"
#include <unordered_map>
namespace vk
{
static constexpr usz s_dma_block_length = 0x00010000;
static constexpr u32 s_dma_block_mask = 0xFFFF0000;
std::unordered_map<u32, std::unique_ptr<dma_block>> g_dma_pool;
shared_mutex g_dma_mutex;
// Validation
atomic_t<u64> s_allocated_dma_pool_size{ 0 };
dma_block::~dma_block()
{
// Use safe free (uses gc to clean up)
free();
}
void* dma_block::map_range(const utils::address_range& range)
{
if (inheritance_info.parent)
{
return inheritance_info.parent->map_range(range);
}
if (memory_mapping == nullptr)
{
memory_mapping = static_cast<u8*>(allocated_memory->map(0, VK_WHOLE_SIZE));
ensure(memory_mapping);
}
ensure(range.start >= base_address);
u32 start = range.start;
start -= base_address;
return memory_mapping + start;
}
void dma_block::unmap()
{
if (inheritance_info.parent)
{
inheritance_info.parent->unmap();
}
else
{
allocated_memory->unmap();
memory_mapping = nullptr;
}
}
void dma_block::allocate(const render_device& dev, usz size)
{
// Acquired blocks are always to be assumed dirty. It is not possible to synchronize host access and inline
// buffer copies without causing weird issues. Overlapped incomplete data ends up overwriting host-uploaded data.
free();
allocated_memory = std::make_unique<vk::buffer>(dev, size,
dev.get_memory_mapping().host_visible_coherent, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, 0,
VMM_ALLOCATION_POOL_UNDEFINED);
// Initialize memory contents. This isn't something that happens often.
// Pre-loading the contents helps to avoid leakage when mixed types of allocations are in use (NVIDIA)
// TODO: Fix memory lost when old object goes out of use with in-flight data.
auto dst = static_cast<u8*>(allocated_memory->map(0, size));
auto src = vm::get_super_ptr<u8>(base_address);
if (rsx::get_location(base_address) == CELL_GCM_LOCATION_LOCAL ||
vm::check_addr(base_address, 0, static_cast<u32>(size))) [[ likely ]]
{
// Linear virtual memory space. Copy all at once.
std::memcpy(dst, src, size);
}
else
{
// Some games will have address holes in the range and page in data using faults.
// Copy page by page. Slow, but we only really have to do this a handful of times.
// Note that base_address is 16k aligned.
for (u32 address = base_address; address < (base_address + size); address += 4096, src += 4096, dst += 4096)
{
if (vm::check_addr(address, 0))
{
std::memcpy(dst, src, 4096);
}
}
}
allocated_memory->unmap();
s_allocated_dma_pool_size += allocated_memory->size();
}
void dma_block::free()
{
if (allocated_memory)
{
// Do some accounting before the allocation info is no more
s_allocated_dma_pool_size -= allocated_memory->size();
// If you have both a memory allocation AND a parent block at the same time, you're in trouble
ensure(head() == this);
if (memory_mapping)
{
// vma allocator does not allow us to destroy mapped memory on windows
unmap();
ensure(!memory_mapping);
}
// Move allocation to gc
auto gc = vk::get_resource_manager();
gc->dispose(allocated_memory);
}
}
void dma_block::init(const render_device& dev, u32 addr, usz size)
{
ensure((size > 0) && !((size | addr) & ~s_dma_block_mask));
base_address = addr;
allocate(dev, size);
ensure(!inheritance_info.parent);
}
void dma_block::init(dma_block* parent, u32 addr, usz size)
{
ensure((size > 0) && !((size | addr) & ~s_dma_block_mask));
base_address = addr;
inheritance_info.parent = parent;
inheritance_info.block_offset = (addr - parent->base_address);
}
void dma_block::flush(const utils::address_range& range)
{
if (inheritance_info.parent)
{
// Parent may be a different type of block
inheritance_info.parent->flush(range);
return;
}
auto src = map_range(range);
auto dst = vm::get_super_ptr(range.start);
std::memcpy(dst, src, range.length());
// NOTE: Do not unmap. This can be extremely slow on some platforms.
}
void dma_block::load(const utils::address_range& range)
{
if (inheritance_info.parent)
{
// Parent may be a different type of block
inheritance_info.parent->load(range);
return;
}
auto src = vm::get_super_ptr(range.start);
auto dst = map_range(range);
std::memcpy(dst, src, range.length());
// NOTE: Do not unmap. This can be extremely slow on some platforms.
}
dma_mapping_handle dma_block::get(const utils::address_range& range)
{
if (inheritance_info.parent)
{
return inheritance_info.parent->get(range);
}
ensure(range.start >= base_address);
ensure(range.end <= end());
// mark_dirty(range);
return { (range.start - base_address), allocated_memory.get() };
}
dma_block* dma_block::head()
{
if (!inheritance_info.parent)
return this;
return inheritance_info.parent->head();
}
const dma_block* dma_block::head() const
{
if (!inheritance_info.parent)
return this;
return inheritance_info.parent->head();
}
void dma_block::set_parent(dma_block* parent)
{
ensure(parent);
ensure(parent->base_address < base_address);
if (inheritance_info.parent == parent)
{
// Nothing to do
return;
}
if (allocated_memory)
{
// Acquired blocks are always to be assumed dirty. It is not possible to synchronize host access and inline
// buffer copies without causing weird issues. Overlapped incomplete data ends up overwriting host-uploaded data.
free();
}
inheritance_info.parent = parent;
inheritance_info.block_offset = (base_address - parent->base_address);
}
void dma_block::extend(const render_device& dev, usz new_size)
{
ensure(allocated_memory);
if (new_size <= allocated_memory->size())
return;
allocate(dev, new_size);
}
u32 dma_block::start() const
{
return base_address;
}
u32 dma_block::end() const
{
auto source = head();
return (source->base_address + source->allocated_memory->size() - 1);
}
u32 dma_block::size() const
{
return (allocated_memory) ? allocated_memory->size() : 0;
}
void dma_block_EXT::allocate(const render_device& dev, usz size)
{
// Acquired blocks are always to be assumed dirty. It is not possible to synchronize host access and inline
// buffer copies without causing weird issues. Overlapped incomplete data ends up overwriting host-uploaded data.
free();
allocated_memory = std::make_unique<vk::buffer>(dev,
VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT,
vm::get_super_ptr<void>(base_address),
size);
s_allocated_dma_pool_size += allocated_memory->size();
}
void* dma_block_EXT::map_range(const utils::address_range& range)
{
return vm::get_super_ptr<void>(range.start);
}
void dma_block_EXT::unmap()
{
// NOP
}
void dma_block_EXT::flush(const utils::address_range&)
{
// NOP
}
void dma_block_EXT::load(const utils::address_range&)
{
// NOP
}
bool test_host_pointer([[maybe_unused]] u32 base_address, [[maybe_unused]] usz length)
{
#ifdef _WIN32
MEMORY_BASIC_INFORMATION mem_info;
if (!::VirtualQuery(vm::get_super_ptr<const void>(base_address), &mem_info, sizeof(mem_info)))
{
rsx_log.error("VirtualQuery failed! LastError=%s", fmt::win_error{GetLastError(), nullptr});
return false;
}
return (mem_info.RegionSize >= length);
#else
return true; // *nix behavior is unknown with NVIDIA drivers
#endif
}
void create_dma_block(std::unique_ptr<dma_block>& block, u32 base_address, usz expected_length)
{
bool allow_host_buffers = false;
if (rsx::get_current_renderer()->get_backend_config().supports_passthrough_dma)
{
allow_host_buffers =
#if defined(_WIN32)
(vk::get_driver_vendor() == driver_vendor::NVIDIA) ?
test_host_pointer(base_address, expected_length) :
#endif
true;
if (!allow_host_buffers)
{
rsx_log.trace("Requested DMA passthrough for block 0x%x->0x%x but this was not possible.",
base_address, base_address + expected_length - 1);
}
}
if (allow_host_buffers)
{
block.reset(new dma_block_EXT());
}
else
{
block.reset(new dma_block());
}
block->init(*g_render_device, base_address, expected_length);
}
dma_mapping_handle map_dma(u32 local_address, u32 length)
{
// Not much contention expected here, avoid searching twice
std::lock_guard lock(g_dma_mutex);
const auto map_range = utils::address_range::start_length(local_address, length);
auto first_block = (local_address & s_dma_block_mask);
if (auto found = g_dma_pool.find(first_block); found != g_dma_pool.end())
{
if (found->second->end() >= map_range.end)
{
return found->second->get(map_range);
}
}
auto last_block = (map_range.end & s_dma_block_mask);
if (first_block == last_block) [[likely]]
{
auto &block_info = g_dma_pool[first_block];
ensure(!block_info);
create_dma_block(block_info, first_block, s_dma_block_length);
return block_info->get(map_range);
}
// Scan range for overlapping sections and update 'chains' accordingly
for (auto block = first_block; block <= last_block; block += s_dma_block_length)
{
if (auto& entry = g_dma_pool[block])
{
first_block = std::min(first_block, entry->head()->start() & s_dma_block_mask);
last_block = std::max(last_block, entry->end() & s_dma_block_mask);
}
}
std::vector<std::unique_ptr<dma_block>> stale_references;
dma_block* block_head = nullptr;
for (auto block = first_block; block <= last_block; block += s_dma_block_length)
{
auto& entry = g_dma_pool[block];
if (block == first_block)
{
if (entry)
{
// Then the references to this object do not go to the end of the list as will be done with this new allocation.
// A dumb release is therefore safe...
ensure(entry->end() < map_range.end);
stale_references.push_back(std::move(entry));
}
auto required_size = (last_block - first_block + s_dma_block_length);
create_dma_block(entry, block, required_size);
block_head = entry->head();
}
else if (entry)
{
ensure((entry->end() & s_dma_block_mask) <= last_block);
entry->set_parent(block_head);
}
else
{
entry.reset(new dma_block());
entry->init(block_head, block, s_dma_block_length);
}
}
// Check that all the math adds up...
stale_references.clear();
ensure(s_allocated_dma_pool_size == g_dma_pool.size() * s_dma_block_length);
ensure(block_head);
return block_head->get(map_range);
}
void unmap_dma(u32 local_address, u32 length)
{
std::lock_guard lock(g_dma_mutex);
const u32 start = (local_address & s_dma_block_mask);
const u32 end = utils::align(local_address + length, static_cast<u32>(s_dma_block_length));
for (u32 block = start; block < end;)
{
if (auto found = g_dma_pool.find(block); found != g_dma_pool.end())
{
auto head = found->second->head();
if (dynamic_cast<dma_block_EXT*>(head))
{
// Passthrough block. Must unmap from GPU
const u32 start_block = head->start();
const u32 last_block = head->start() + head->size();
for (u32 block_ = start_block; block_ < last_block; block_ += s_dma_block_length)
{
g_dma_pool.erase(block_);
}
block = last_block;
continue;
}
}
block += s_dma_block_length;
}
ensure(s_allocated_dma_pool_size == g_dma_pool.size() * s_dma_block_length);
}
template<bool load>
void sync_dma_impl(u32 local_address, u32 length)
{
reader_lock lock(g_dma_mutex);
const auto limit = local_address + length - 1;
while (length)
{
u32 block = (local_address & s_dma_block_mask);
if (auto found = g_dma_pool.find(block); found != g_dma_pool.end())
{
const auto sync_end = std::min(limit, found->second->end());
const auto range = utils::address_range::start_end(local_address, sync_end);
if constexpr (load)
{
found->second->load(range);
}
else
{
found->second->flush(range);
}
if (sync_end < limit) [[unlikely]]
{
// Technically legal but assuming a map->flush usage, this shouldnot happen
// Optimizations could in theory batch together multiple transfers though
rsx_log.error("Sink request spans multiple allocated blocks!");
const auto write_end = (sync_end + 1u);
const auto written = (write_end - local_address);
length -= written;
local_address = write_end;
continue;
}
break;
}
else
{
rsx_log.error("Sync command on range not mapped!");
return;
}
}
}
void load_dma(u32 local_address, u32 length)
{
sync_dma_impl<true>(local_address, length);
}
void flush_dma(u32 local_address, u32 length)
{
sync_dma_impl<false>(local_address, length);
}
void clear_dma_resources()
{
g_dma_pool.clear();
}
}
| 13,094
|
C++
|
.cpp
| 419
| 27.778043
| 117
| 0.685385
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,442
|
VKRenderPass.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKRenderPass.cpp
|
#include "stdafx.h"
#include "Utilities/mutex.h"
#include "VKRenderPass.h"
#include "vkutils/image.h"
namespace vk
{
struct active_renderpass_info_t
{
VkRenderPass pass = VK_NULL_HANDLE;
VkFramebuffer fbo = VK_NULL_HANDLE;
};
atomic_t<u64> g_cached_renderpass_key = 0;
VkRenderPass g_cached_renderpass = VK_NULL_HANDLE;
std::unordered_map<VkCommandBuffer, active_renderpass_info_t> g_current_renderpass;
shared_mutex g_renderpass_cache_mutex;
std::unordered_map<u64, VkRenderPass> g_renderpass_cache;
// Key structure
// 0-7 color_format
// 8-15 depth_format
// 16-21 sample_counts
// 22-36 current layouts
// 37-41 input attachments
union renderpass_key_blob
{
private:
// Internal utils
static u64 encode_layout(VkImageLayout layout)
{
switch (+layout)
{
case VK_IMAGE_LAYOUT_GENERAL:
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
return static_cast<u64>(layout);
case VK_IMAGE_LAYOUT_ATTACHMENT_FEEDBACK_LOOP_OPTIMAL_EXT:
return 4ull;
default:
fmt::throw_exception("Unsupported layout 0x%llx here", static_cast<usz>(layout));
}
}
static VkImageLayout decode_layout(u64 encoded)
{
switch (encoded)
{
case 1:
case 2:
case 3:
return static_cast<VkImageLayout>(encoded);
case 4:
return VK_IMAGE_LAYOUT_ATTACHMENT_FEEDBACK_LOOP_OPTIMAL_EXT;
default:
fmt::throw_exception("Unsupported layout encoding 0x%llx here", encoded);
}
}
public:
u64 encoded;
struct
{
u64 color_format : 8;
u64 depth_format : 8;
u64 sample_count : 6;
u64 layout_blob : 15;
u64 input_attachments_mask : 5;
};
renderpass_key_blob(u64 encoded_) : encoded(encoded_)
{}
// Encoders
inline void set_layout(u32 index, VkImageLayout layout)
{
switch (+layout)
{
case VK_IMAGE_LAYOUT_ATTACHMENT_FEEDBACK_LOOP_OPTIMAL_EXT:
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_GENERAL:
layout_blob |= encode_layout(layout) << (index * 3);
break;
default:
fmt::throw_exception("Unsupported image layout 0x%x", static_cast<u32>(layout));
}
}
inline void set_input_attachment(u32 index)
{
input_attachments_mask |= (1ull << index);
}
inline void set_format(VkFormat format)
{
switch (format)
{
case VK_FORMAT_D16_UNORM:
case VK_FORMAT_D32_SFLOAT:
case VK_FORMAT_D24_UNORM_S8_UINT:
case VK_FORMAT_D32_SFLOAT_S8_UINT:
depth_format = static_cast<u64>(format);
break;
default:
color_format = static_cast<u64>(format);
break;
}
}
// Decoders
inline VkSampleCountFlagBits get_sample_count() const
{
return static_cast<VkSampleCountFlagBits>(sample_count);
}
inline VkFormat get_color_format() const
{
return static_cast<VkFormat>(color_format);
}
inline VkFormat get_depth_format() const
{
return static_cast<VkFormat>(depth_format);
}
std::vector<VkAttachmentReference> get_input_attachments() const
{
if (input_attachments_mask == 0) [[likely]]
{
return {};
}
std::vector<VkAttachmentReference> result;
for (u32 i = 0; i < 5; ++i)
{
if (input_attachments_mask & (1ull << i))
{
const auto layout = decode_layout((layout_blob >> (i * 3)) & 0x7);
result.push_back({i, layout});
}
}
return result;
}
std::vector<VkImageLayout> get_image_layouts() const
{
std::vector<VkImageLayout> result;
for (u32 i = 0, layout_offset = 0; i < 5; ++i, layout_offset += 3)
{
if (const auto layout_encoding = (layout_blob >> layout_offset) & 0x7)
{
result.push_back(decode_layout(layout_encoding));
}
else
{
break;
}
}
return result;
}
};
u64 get_renderpass_key(const std::vector<vk::image*>& images, const std::vector<u8>& input_attachment_ids)
{
renderpass_key_blob key(0);
for (u32 i = 0; i < ::size32(images); ++i)
{
const auto& surface = images[i];
key.set_format(surface->format());
key.set_layout(i, surface->current_layout);
}
for (const auto& ref_id : input_attachment_ids)
{
key.set_input_attachment(ref_id);
}
key.sample_count = images[0]->samples();
return key.encoded;
}
u64 get_renderpass_key(const std::vector<vk::image*>& images, u64 previous_key)
{
// Partial update; assumes compatible renderpass keys
renderpass_key_blob key(previous_key);
key.layout_blob = 0;
for (u32 i = 0; i < ::size32(images); ++i)
{
key.set_layout(i, images[i]->current_layout);
}
return key.encoded;
}
u64 get_renderpass_key(VkFormat surface_format)
{
renderpass_key_blob key(0);
key.sample_count = 1;
switch (surface_format)
{
case VK_FORMAT_D16_UNORM:
case VK_FORMAT_D32_SFLOAT:
case VK_FORMAT_D24_UNORM_S8_UINT:
case VK_FORMAT_D32_SFLOAT_S8_UINT:
key.depth_format = static_cast<u64>(surface_format);
key.layout_blob = static_cast<u64>(VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
break;
default:
key.color_format = static_cast<u64>(surface_format);
key.layout_blob = static_cast<u64>(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
break;
}
return key.encoded;
}
VkRenderPass get_renderpass(VkDevice dev, u64 renderpass_key)
{
// 99.999% of checks will go through this block once on-disk shader cache has loaded
{
reader_lock lock(g_renderpass_cache_mutex);
auto found = g_renderpass_cache.find(renderpass_key);
if (found != g_renderpass_cache.end())
{
return found->second;
}
}
std::lock_guard lock(g_renderpass_cache_mutex);
// Check again
auto found = g_renderpass_cache.find(renderpass_key);
if (found != g_renderpass_cache.end())
{
return found->second;
}
// Decode
renderpass_key_blob key(renderpass_key);
VkSampleCountFlagBits samples = static_cast<VkSampleCountFlagBits>(key.sample_count);
std::vector<VkImageLayout> rtv_layouts;
VkImageLayout dsv_layout = VK_IMAGE_LAYOUT_UNDEFINED;
VkFormat color_format = static_cast<VkFormat>(key.color_format);
VkFormat depth_format = static_cast<VkFormat>(key.depth_format);
std::vector<VkAttachmentDescription> attachments = {};
std::vector<VkAttachmentReference> attachment_references;
rtv_layouts = key.get_image_layouts();
if (depth_format)
{
dsv_layout = rtv_layouts.back();
rtv_layouts.pop_back();
}
u32 attachment_count = 0;
for (const auto &layout : rtv_layouts)
{
VkAttachmentDescription color_attachment_description = {};
color_attachment_description.format = color_format;
color_attachment_description.samples = samples;
color_attachment_description.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
color_attachment_description.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
color_attachment_description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
color_attachment_description.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
color_attachment_description.initialLayout = layout;
color_attachment_description.finalLayout = layout;
attachments.push_back(color_attachment_description);
attachment_references.push_back({ attachment_count++, layout });
}
if (depth_format)
{
VkAttachmentDescription depth_attachment_description = {};
depth_attachment_description.format = depth_format;
depth_attachment_description.samples = samples;
depth_attachment_description.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
depth_attachment_description.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
depth_attachment_description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
depth_attachment_description.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE;
depth_attachment_description.initialLayout = dsv_layout;
depth_attachment_description.finalLayout = dsv_layout;
attachments.push_back(depth_attachment_description);
attachment_references.push_back({ attachment_count, dsv_layout });
}
VkSubpassDescription subpass = {};
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpass.colorAttachmentCount = attachment_count;
subpass.pColorAttachments = attachment_count? attachment_references.data() : nullptr;
subpass.pDepthStencilAttachment = depth_format? &attachment_references.back() : nullptr;
const auto input_attachments = key.get_input_attachments();
if (!input_attachments.empty())
{
subpass.inputAttachmentCount = ::size32(input_attachments);
subpass.pInputAttachments = input_attachments.data();
}
VkRenderPassCreateInfo rp_info = {};
rp_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
rp_info.attachmentCount = ::size32(attachments);
rp_info.pAttachments = attachments.data();
rp_info.subpassCount = 1;
rp_info.pSubpasses = &subpass;
VkRenderPass result;
CHECK_RESULT(vkCreateRenderPass(dev, &rp_info, NULL, &result));
g_renderpass_cache[renderpass_key] = result;
return result;
}
void clear_renderpass_cache(VkDevice dev)
{
// Wipe current status
g_cached_renderpass_key = 0;
g_cached_renderpass = VK_NULL_HANDLE;
g_current_renderpass.clear();
// Destroy cache
for (const auto &renderpass : g_renderpass_cache)
{
vkDestroyRenderPass(dev, renderpass.second, nullptr);
}
g_renderpass_cache.clear();
}
void begin_renderpass(const vk::command_buffer& cmd, VkRenderPass pass, VkFramebuffer target, const coordu& framebuffer_region)
{
auto& renderpass_info = g_current_renderpass[cmd];
if (renderpass_info.pass == pass && renderpass_info.fbo == target)
{
return;
}
else if (renderpass_info.pass != VK_NULL_HANDLE)
{
end_renderpass(cmd);
}
VkRenderPassBeginInfo rp_begin = {};
rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
rp_begin.renderPass = pass;
rp_begin.framebuffer = target;
rp_begin.renderArea.offset.x = static_cast<s32>(framebuffer_region.x);
rp_begin.renderArea.offset.y = static_cast<s32>(framebuffer_region.y);
rp_begin.renderArea.extent.width = framebuffer_region.width;
rp_begin.renderArea.extent.height = framebuffer_region.height;
vkCmdBeginRenderPass(cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE);
renderpass_info = { pass, target };
}
void begin_renderpass(VkDevice dev, const vk::command_buffer& cmd, u64 renderpass_key, VkFramebuffer target, const coordu& framebuffer_region)
{
if (renderpass_key != g_cached_renderpass_key)
{
g_cached_renderpass = get_renderpass(dev, renderpass_key);
g_cached_renderpass_key = renderpass_key;
}
begin_renderpass(cmd, g_cached_renderpass, target, framebuffer_region);
}
void end_renderpass(const vk::command_buffer& cmd)
{
vkCmdEndRenderPass(cmd);
g_current_renderpass[cmd] = {};
}
bool is_renderpass_open(const vk::command_buffer& cmd)
{
return g_current_renderpass[cmd].pass != VK_NULL_HANDLE;
}
void renderpass_op(const vk::command_buffer& cmd, const renderpass_op_callback_t& op)
{
const auto& active = g_current_renderpass[cmd];
op(cmd, active.pass, active.fbo);
}
}
| 10,960
|
C++
|
.cpp
| 337
| 29.053412
| 143
| 0.728133
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,443
|
VKCompute.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKCompute.cpp
|
#include "VKCompute.h"
#include "VKHelpers.h"
#include "VKRenderPass.h"
#include "vkutils/buffer_object.h"
#define VK_MAX_COMPUTE_TASKS 8192 // Max number of jobs per frame
namespace vk
{
std::vector<std::pair<VkDescriptorType, u8>> compute_task::get_descriptor_layout()
{
std::vector<std::pair<VkDescriptorType, u8>> result;
result.emplace_back(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, ssbo_count);
return result;
}
void compute_task::init_descriptors()
{
rsx::simple_array<VkDescriptorPoolSize> descriptor_pool_sizes;
rsx::simple_array<VkDescriptorSetLayoutBinding> bindings;
const auto layout = get_descriptor_layout();
for (const auto &e : layout)
{
descriptor_pool_sizes.push_back({e.first, e.second});
for (unsigned n = 0; n < e.second; ++n)
{
bindings.push_back
({
u32(bindings.size()),
e.first,
1,
VK_SHADER_STAGE_COMPUTE_BIT,
nullptr
});
}
}
// Reserve descriptor pools
m_descriptor_pool.create(*g_render_device, descriptor_pool_sizes);
m_descriptor_layout = vk::descriptors::create_layout(bindings);
VkPipelineLayoutCreateInfo layout_info = {};
layout_info.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
layout_info.setLayoutCount = 1;
layout_info.pSetLayouts = &m_descriptor_layout;
VkPushConstantRange push_constants{};
if (use_push_constants)
{
push_constants.size = push_constants_size;
push_constants.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT;
layout_info.pushConstantRangeCount = 1;
layout_info.pPushConstantRanges = &push_constants;
}
CHECK_RESULT(vkCreatePipelineLayout(*g_render_device, &layout_info, nullptr, &m_pipeline_layout));
}
void compute_task::create()
{
if (!initialized)
{
init_descriptors();
switch (vk::get_driver_vendor())
{
case vk::driver_vendor::unknown:
case vk::driver_vendor::INTEL:
case vk::driver_vendor::ANV:
// Intel hw has 8 threads, but LDS allocation behavior makes optimal group size between 64 and 256
// Based on intel's own OpenCL recommended settings
unroll_loops = true;
optimal_kernel_size = 1;
optimal_group_size = 128;
break;
case vk::driver_vendor::LAVAPIPE:
case vk::driver_vendor::V3DV:
// TODO: Actually bench this. Using 32 for now to match other common configurations.
case vk::driver_vendor::DOZEN:
// Actual optimal size depends on the D3D device. Use 32 since it should work well on both AMD and NVIDIA
case vk::driver_vendor::NVIDIA:
case vk::driver_vendor::NVK:
// Warps are multiples of 32. Increasing kernel depth seems to hurt performance (Nier, Big Duck sample)
unroll_loops = true;
optimal_kernel_size = 1;
optimal_group_size = 32;
break;
case vk::driver_vendor::AMD:
case vk::driver_vendor::RADV:
// Wavefronts are multiples of 64. (RDNA also supports wave32)
unroll_loops = false;
optimal_kernel_size = 1;
optimal_group_size = 64;
break;
case vk::driver_vendor::MVK:
unroll_loops = true;
optimal_kernel_size = 1;
optimal_group_size = 256;
break;
}
const auto& gpu = vk::g_render_device->gpu();
max_invocations_x = gpu.get_limits().maxComputeWorkGroupCount[0];
initialized = true;
}
}
void compute_task::destroy()
{
if (initialized)
{
m_shader.destroy();
m_program.reset();
m_param_buffer.reset();
vkDestroyDescriptorSetLayout(*g_render_device, m_descriptor_layout, nullptr);
vkDestroyPipelineLayout(*g_render_device, m_pipeline_layout, nullptr);
m_descriptor_pool.destroy();
initialized = false;
}
}
void compute_task::load_program(const vk::command_buffer& cmd)
{
if (!m_program)
{
m_shader.create(::glsl::program_domain::glsl_compute_program, m_src);
auto handle = m_shader.compile();
VkPipelineShaderStageCreateInfo shader_stage{};
shader_stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
shader_stage.stage = VK_SHADER_STAGE_COMPUTE_BIT;
shader_stage.module = handle;
shader_stage.pName = "main";
VkComputePipelineCreateInfo info{};
info.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
info.stage = shader_stage;
info.layout = m_pipeline_layout;
info.basePipelineIndex = -1;
info.basePipelineHandle = VK_NULL_HANDLE;
auto compiler = vk::get_pipe_compiler();
m_program = compiler->compile(info, m_pipeline_layout, vk::pipe_compiler::COMPILE_INLINE);
declare_inputs();
}
ensure(m_used_descriptors < VK_MAX_COMPUTE_TASKS);
m_descriptor_set = m_descriptor_pool.allocate(m_descriptor_layout, VK_TRUE);
bind_resources();
vkCmdBindPipeline(cmd, VK_PIPELINE_BIND_POINT_COMPUTE, m_program->pipeline);
m_descriptor_set.bind(cmd, VK_PIPELINE_BIND_POINT_COMPUTE, m_pipeline_layout);
}
void compute_task::run(const vk::command_buffer& cmd, u32 invocations_x, u32 invocations_y, u32 invocations_z)
{
// CmdDispatch is outside renderpass scope only
if (vk::is_renderpass_open(cmd))
{
vk::end_renderpass(cmd);
}
load_program(cmd);
vkCmdDispatch(cmd, invocations_x, invocations_y, invocations_z);
}
void compute_task::run(const vk::command_buffer& cmd, u32 num_invocations)
{
u32 invocations_x, invocations_y;
if (num_invocations > max_invocations_x)
{
// AMD hw reports an annoyingly small maximum number of invocations in the X dimension
// Split the 1D job into 2 dimensions to accomodate this
invocations_x = static_cast<u32>(floor(std::sqrt(num_invocations)));
invocations_y = invocations_x;
if (num_invocations % invocations_x) invocations_y++;
}
else
{
invocations_x = num_invocations;
invocations_y = 1;
}
run(cmd, invocations_x, invocations_y, 1);
}
cs_shuffle_base::cs_shuffle_base()
{
work_kernel =
" value = data[index];\n"
" data[index] = %f(value);\n";
loop_advance =
" index++;\n";
suffix =
"}\n";
}
void cs_shuffle_base::build(const char* function_name, u32 _kernel_size)
{
// Initialize to allow detecting optimal settings
create();
kernel_size = _kernel_size? _kernel_size : optimal_kernel_size;
m_src =
#include "../Program/GLSLSnippets/ShuffleBytes.glsl"
;
const auto parameters_size = utils::align(push_constants_size, 16) / 16;
const std::pair<std::string_view, std::string> syntax_replace[] =
{
{ "%loc", "0" },
{ "%set", "set = 0"},
{ "%ws", std::to_string(optimal_group_size) },
{ "%ks", std::to_string(kernel_size) },
{ "%vars", variables },
{ "%f", function_name },
{ "%md", method_declarations },
{ "%ub", use_push_constants? "layout(push_constant) uniform ubo{ uvec4 params[" + std::to_string(parameters_size) + "]; };\n" : "" },
};
m_src = fmt::replace_all(m_src, syntax_replace);
work_kernel = fmt::replace_all(work_kernel, syntax_replace);
if (kernel_size <= 1)
{
m_src += " {\n" + work_kernel + " }\n";
}
else if (unroll_loops)
{
work_kernel += loop_advance + "\n";
m_src += std::string
(
" //Unrolled loop\n"
" {\n"
);
// Assemble body with manual loop unroll to try loweing GPR usage
for (u32 n = 0; n < kernel_size; ++n)
{
m_src += work_kernel;
}
m_src += " }\n";
}
else
{
m_src += " for (int loop = 0; loop < KERNEL_SIZE; ++loop)\n";
m_src += " {\n";
m_src += work_kernel;
m_src += loop_advance;
m_src += " }\n";
}
m_src += suffix;
}
void cs_shuffle_base::bind_resources()
{
m_program->bind_buffer({ m_data->value, m_data_offset, m_data_length }, 0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, m_descriptor_set);
}
void cs_shuffle_base::set_parameters(const vk::command_buffer& cmd, const u32* params, u8 count)
{
ensure(use_push_constants);
vkCmdPushConstants(cmd, m_pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, count * 4, params);
}
void cs_shuffle_base::run(const vk::command_buffer& cmd, const vk::buffer* data, u32 data_length, u32 data_offset)
{
m_data = data;
m_data_offset = data_offset;
m_data_length = data_length;
const auto num_bytes_per_invocation = optimal_group_size * kernel_size * 4;
const auto num_bytes_to_process = rsx::align2(data_length, num_bytes_per_invocation);
const auto num_invocations = num_bytes_to_process / num_bytes_per_invocation;
if ((num_bytes_to_process + data_offset) > data->size())
{
// Technically robust buffer access should keep the driver from crashing in OOB situations
rsx_log.error("Inadequate buffer length submitted for a compute operation."
"Required=%d bytes, Available=%d bytes", num_bytes_to_process, data->size());
}
compute_task::run(cmd, num_invocations);
}
cs_interleave_task::cs_interleave_task()
{
use_push_constants = true;
push_constants_size = 16;
variables =
" uint block_length = params[0].x >> 2;\n"
" uint z_offset = params[0].y >> 2;\n"
" uint s_offset = params[0].z >> 2;\n"
" uint depth;\n"
" uint stencil;\n"
" uint stencil_shift;\n"
" uint stencil_offset;\n";
}
void cs_interleave_task::bind_resources()
{
m_program->bind_buffer({ m_data->value, m_data_offset, m_ssbo_length }, 0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, m_descriptor_set);
}
void cs_interleave_task::run(const vk::command_buffer& cmd, const vk::buffer* data, u32 data_offset, u32 data_length, u32 zeta_offset, u32 stencil_offset)
{
u32 parameters[4] = { data_length, zeta_offset - data_offset, stencil_offset - data_offset, 0 };
set_parameters(cmd, parameters, 4);
ensure(stencil_offset > data_offset);
m_ssbo_length = stencil_offset + (data_length / 4) - data_offset;
cs_shuffle_base::run(cmd, data, data_length, data_offset);
}
cs_scatter_d24x8::cs_scatter_d24x8()
{
work_kernel =
" if (index >= block_length)\n"
" return;\n"
"\n"
" value = data[index];\n"
" data[index + z_offset] = (value >> 8);\n"
" stencil_offset = (index / 4);\n"
" stencil_shift = (index % 4) * 8;\n"
" stencil = (value & 0xFF) << stencil_shift;\n"
" atomicOr(data[stencil_offset + s_offset], stencil);\n";
cs_shuffle_base::build("");
}
cs_aggregator::cs_aggregator()
{
ssbo_count = 2;
create();
m_src =
"#version 450\n"
"layout(local_size_x = %ws, local_size_y = 1, local_size_z = 1) in;\n\n"
"layout(set=0, binding=0, std430) readonly buffer ssbo0{ uint src[]; };\n"
"layout(set=0, binding=1, std430) writeonly buffer ssbo1{ uint result; };\n\n"
"void main()\n"
"{\n"
" if (gl_GlobalInvocationID.x < src.length())\n"
" {\n"
" atomicAdd(result, src[gl_GlobalInvocationID.x]);\n"
" }\n"
"}\n";
const std::pair<std::string_view, std::string> syntax_replace[] =
{
{ "%ws", std::to_string(optimal_group_size) },
};
m_src = fmt::replace_all(m_src, syntax_replace);
}
void cs_aggregator::bind_resources()
{
m_program->bind_buffer({ src->value, 0, block_length }, 0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, m_descriptor_set);
m_program->bind_buffer({ dst->value, 0, 4 }, 1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, m_descriptor_set);
}
void cs_aggregator::run(const vk::command_buffer& cmd, const vk::buffer* dst, const vk::buffer* src, u32 num_words)
{
this->dst = dst;
this->src = src;
word_count = num_words;
block_length = num_words * 4;
const u32 linear_invocations = utils::aligned_div(word_count, optimal_group_size);
compute_task::run(cmd, linear_invocations);
}
}
| 11,329
|
C++
|
.cpp
| 326
| 31.199387
| 155
| 0.684923
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,444
|
VKResolveHelper.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKResolveHelper.cpp
|
#include "stdafx.h"
#include "VKResolveHelper.h"
#include "VKRenderPass.h"
#include "VKRenderTargets.h"
namespace
{
const char *get_format_prefix(VkFormat format)
{
switch (format)
{
case VK_FORMAT_R5G6B5_UNORM_PACK16:
return "r16";
case VK_FORMAT_R8G8B8A8_UNORM:
case VK_FORMAT_B8G8R8A8_UNORM:
return "rgba8";
case VK_FORMAT_R16G16B16A16_SFLOAT:
return "rgba16f";
case VK_FORMAT_R32G32B32A32_SFLOAT:
return "rgba32f";
case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
return "r16";
case VK_FORMAT_R8_UNORM:
return "r8";
case VK_FORMAT_R8G8_UNORM:
return "rg8";
case VK_FORMAT_R32_SFLOAT:
return "r32f";
default:
fmt::throw_exception("Unhandled VkFormat 0x%x", u32(format));
}
}
}
namespace vk
{
std::unordered_map<VkFormat, std::unique_ptr<vk::cs_resolve_task>> g_resolve_helpers;
std::unordered_map<VkFormat, std::unique_ptr<vk::cs_unresolve_task>> g_unresolve_helpers;
std::unique_ptr<vk::depthonly_resolve> g_depth_resolver;
std::unique_ptr<vk::depthonly_unresolve> g_depth_unresolver;
std::unique_ptr<vk::stencilonly_resolve> g_stencil_resolver;
std::unique_ptr<vk::stencilonly_unresolve> g_stencil_unresolver;
std::unique_ptr<vk::depthstencil_resolve_EXT> g_depthstencil_resolver;
std::unique_ptr<vk::depthstencil_unresolve_EXT> g_depthstencil_unresolver;
template <typename T, typename ...Args>
void initialize_pass(std::unique_ptr<T>& ptr, vk::render_device& dev, Args&&... extras)
{
if (!ptr)
{
ptr = std::make_unique<T>(std::forward<Args>(extras)...);
ptr->create(dev);
}
}
void resolve_image(vk::command_buffer& cmd, vk::viewable_image* dst, vk::viewable_image* src)
{
if (src->aspect() == VK_IMAGE_ASPECT_COLOR_BIT)
{
auto &job = g_resolve_helpers[src->format()];
if (!job)
{
const char* format_prefix = get_format_prefix(src->format());
bool require_bgra_swap = false;
if (vk::get_chip_family() == vk::chip_class::NV_kepler &&
src->format() == VK_FORMAT_B8G8R8A8_UNORM)
{
// Workaround for NVIDIA kepler's broken image_load_store
require_bgra_swap = true;
}
job.reset(new vk::cs_resolve_task(format_prefix, require_bgra_swap));
}
job->run(cmd, src, dst);
}
else
{
std::vector<vk::image*> surface = { dst };
auto& dev = cmd.get_command_pool().get_owner();
const auto key = vk::get_renderpass_key(surface);
auto renderpass = vk::get_renderpass(dev, key);
if (src->aspect() & VK_IMAGE_ASPECT_STENCIL_BIT)
{
if (dev.get_shader_stencil_export_support())
{
initialize_pass(g_depthstencil_resolver, dev);
g_depthstencil_resolver->run(cmd, src, dst, renderpass);
}
else
{
initialize_pass(g_depth_resolver, dev);
g_depth_resolver->run(cmd, src, dst, renderpass);
// Chance for optimization here: If the stencil buffer was not used, simply perform a clear operation
const auto stencil_init_flags = vk::as_rtt(src)->stencil_init_flags;
if (stencil_init_flags & 0xFF00)
{
initialize_pass(g_stencil_resolver, dev);
g_stencil_resolver->run(cmd, src, dst, renderpass);
}
else
{
VkClearDepthStencilValue clear{ 1.f, stencil_init_flags & 0xFF };
VkImageSubresourceRange range{ VK_IMAGE_ASPECT_STENCIL_BIT, 0, 1, 0, 1 };
dst->push_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
vkCmdClearDepthStencilImage(cmd, dst->value, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear, 1, &range);
dst->pop_layout(cmd);
}
}
}
else
{
initialize_pass(g_depth_resolver, dev);
g_depth_resolver->run(cmd, src, dst, renderpass);
}
}
}
void unresolve_image(vk::command_buffer& cmd, vk::viewable_image* dst, vk::viewable_image* src)
{
if (src->aspect() == VK_IMAGE_ASPECT_COLOR_BIT)
{
auto &job = g_unresolve_helpers[src->format()];
if (!job)
{
const char* format_prefix = get_format_prefix(src->format());
bool require_bgra_swap = false;
if (vk::get_chip_family() == vk::chip_class::NV_kepler &&
src->format() == VK_FORMAT_B8G8R8A8_UNORM)
{
// Workaround for NVIDIA kepler's broken image_load_store
require_bgra_swap = true;
}
job.reset(new vk::cs_unresolve_task(format_prefix, require_bgra_swap));
}
job->run(cmd, dst, src);
}
else
{
std::vector<vk::image*> surface = { dst };
auto& dev = cmd.get_command_pool().get_owner();
const auto key = vk::get_renderpass_key(surface);
auto renderpass = vk::get_renderpass(dev, key);
if (src->aspect() & VK_IMAGE_ASPECT_STENCIL_BIT)
{
if (dev.get_shader_stencil_export_support())
{
initialize_pass(g_depthstencil_unresolver, dev);
g_depthstencil_unresolver->run(cmd, dst, src, renderpass);
}
else
{
initialize_pass(g_depth_unresolver, dev);
g_depth_unresolver->run(cmd, dst, src, renderpass);
// Chance for optimization here: If the stencil buffer was not used, simply perform a clear operation
const auto stencil_init_flags = vk::as_rtt(dst)->stencil_init_flags;
if (stencil_init_flags & 0xFF00)
{
initialize_pass(g_stencil_unresolver, dev);
g_stencil_unresolver->run(cmd, dst, src, renderpass);
}
else
{
VkClearDepthStencilValue clear{ 1.f, stencil_init_flags & 0xFF };
VkImageSubresourceRange range{ VK_IMAGE_ASPECT_STENCIL_BIT, 0, 1, 0, 1 };
dst->push_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
vkCmdClearDepthStencilImage(cmd, dst->value, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear, 1, &range);
dst->pop_layout(cmd);
}
}
}
else
{
initialize_pass(g_depth_unresolver, dev);
g_depth_unresolver->run(cmd, dst, src, renderpass);
}
}
}
void clear_resolve_helpers()
{
for (auto &task : g_resolve_helpers)
{
task.second->destroy();
}
for (auto &task : g_unresolve_helpers)
{
task.second->destroy();
}
g_resolve_helpers.clear();
g_unresolve_helpers.clear();
if (g_depth_resolver)
{
g_depth_resolver->destroy();
g_depth_resolver.reset();
}
if (g_stencil_resolver)
{
g_stencil_resolver->destroy();
g_stencil_resolver.reset();
}
if (g_depthstencil_resolver)
{
g_depthstencil_resolver->destroy();
g_depthstencil_resolver.reset();
}
if (g_depth_unresolver)
{
g_depth_unresolver->destroy();
g_depth_unresolver.reset();
}
if (g_stencil_unresolver)
{
g_stencil_unresolver->destroy();
g_stencil_unresolver.reset();
}
if (g_depthstencil_unresolver)
{
g_depthstencil_unresolver->destroy();
g_depthstencil_unresolver.reset();
}
}
void reset_resolve_resources()
{
if (g_depth_resolver) g_depth_resolver->free_resources();
if (g_depth_unresolver) g_depth_unresolver->free_resources();
if (g_stencil_resolver) g_stencil_resolver->free_resources();
if (g_stencil_unresolver) g_stencil_unresolver->free_resources();
if (g_depthstencil_resolver) g_depthstencil_resolver->free_resources();
if (g_depthstencil_unresolver) g_depthstencil_unresolver->free_resources();
}
}
| 7,068
|
C++
|
.cpp
| 224
| 27.392857
| 108
| 0.680757
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,445
|
VKFragmentProgram.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKFragmentProgram.cpp
|
#include "stdafx.h"
#include "VKFragmentProgram.h"
#include "VKCommonDecompiler.h"
#include "VKHelpers.h"
#include "vkutils/device.h"
#include "Emu/system_config.h"
#include "../Program/GLSLCommon.h"
#include "../GCM.h"
std::string VKFragmentDecompilerThread::getFloatTypeName(usz elementCount)
{
return glsl::getFloatTypeNameImpl(elementCount);
}
std::string VKFragmentDecompilerThread::getHalfTypeName(usz elementCount)
{
return glsl::getHalfTypeNameImpl(elementCount);
}
std::string VKFragmentDecompilerThread::getFunction(FUNCTION f)
{
return glsl::getFunctionImpl(f);
}
std::string VKFragmentDecompilerThread::compareFunction(COMPARE f, const std::string &Op0, const std::string &Op1)
{
return glsl::compareFunctionImpl(f, Op0, Op1);
}
void VKFragmentDecompilerThread::insertHeader(std::stringstream & OS)
{
std::vector<const char*> required_extensions;
if (device_props.has_native_half_support)
{
required_extensions.emplace_back("GL_EXT_shader_explicit_arithmetic_types_float16");
}
if (properties.multisampled_sampler_mask)
{
required_extensions.emplace_back("GL_ARB_shader_texture_image_samples");
}
if (m_prog.ctrl & RSX_SHADER_CONTROL_ATTRIBUTE_INTERPOLATION)
{
required_extensions.emplace_back("GL_EXT_fragment_shader_barycentric");
}
OS << "#version 450\n";
for (const auto ext : required_extensions)
{
OS << "#extension " << ext << ": require\n";
}
OS << "#extension GL_ARB_separate_shader_objects: enable\n\n";
glsl::insert_subheader_block(OS);
}
void VKFragmentDecompilerThread::insertInputs(std::stringstream & OS)
{
glsl::insert_fragment_shader_inputs_block(
OS,
glsl::extension_flavour::EXT,
m_prog,
m_parr.params[PF_PARAM_IN],
{
.two_sided_color = !!(properties.in_register_mask & in_diff_color),
.two_sided_specular = !!(properties.in_register_mask & in_spec_color)
},
vk::get_varying_register_location
);
}
void VKFragmentDecompilerThread::insertOutputs(std::stringstream & OS)
{
const std::pair<std::string, std::string> table[] =
{
{ "ocol0", m_ctrl & CELL_GCM_SHADER_CONTROL_32_BITS_EXPORTS ? "r0" : "h0" },
{ "ocol1", m_ctrl & CELL_GCM_SHADER_CONTROL_32_BITS_EXPORTS ? "r2" : "h4" },
{ "ocol2", m_ctrl & CELL_GCM_SHADER_CONTROL_32_BITS_EXPORTS ? "r3" : "h6" },
{ "ocol3", m_ctrl & CELL_GCM_SHADER_CONTROL_32_BITS_EXPORTS ? "r4" : "h8" },
};
//NOTE: We do not skip outputs, the only possible combinations are a(0), b(0), ab(0,1), abc(0,1,2), abcd(0,1,2,3)
u8 output_index = 0;
const bool float_type = (m_ctrl & CELL_GCM_SHADER_CONTROL_32_BITS_EXPORTS) || !device_props.has_native_half_support;
const auto reg_type = float_type ? "vec4" : getHalfTypeName(4);
for (uint i = 0; i < std::size(table); ++i)
{
if (m_parr.HasParam(PF_PARAM_NONE, reg_type, table[i].second))
{
OS << "layout(location=" << std::to_string(output_index++) << ") " << "out vec4 " << table[i].first << ";\n";
vk_prog->output_color_masks[i] = -1;
}
}
}
void VKFragmentDecompilerThread::insertConstants(std::stringstream & OS)
{
u32 location = m_binding_table.textures_first_bind_slot;
for (const ParamType& PT : m_parr.params[PF_PARAM_UNIFORM])
{
if (PT.type != "sampler1D" &&
PT.type != "sampler2D" &&
PT.type != "sampler3D" &&
PT.type != "samplerCube")
continue;
for (const ParamItem& PI : PT.items)
{
std::string samplerType = PT.type;
ensure(PI.name.length() > 3);
int index = atoi(&PI.name[3]);
const auto mask = (1 << index);
if (properties.multisampled_sampler_mask & mask)
{
if (samplerType != "sampler1D" && samplerType != "sampler2D")
{
rsx_log.error("Unexpected multisampled image type '%s'", samplerType);
}
samplerType = "sampler2DMS";
}
else if (properties.shadow_sampler_mask & mask)
{
if (properties.common_access_sampler_mask & mask)
{
rsx_log.error("Texture unit %d is sampled as both a shadow texture and a depth texture", index);
}
else
{
samplerType += "Shadow";
}
}
vk::glsl::program_input in;
in.location = location;
in.domain = glsl::glsl_fragment_program;
in.name = PI.name;
in.type = vk::glsl::input_type_texture;
inputs.push_back(in);
OS << "layout(set=0, binding=" << location++ << ") uniform " << samplerType << " " << PI.name << ";\n";
if (properties.redirected_sampler_mask & mask)
{
// Insert stencil mirror declaration
in.name += "_stencil";
in.location = location;
inputs.push_back(in);
OS << "layout(set=0, binding=" << location++ << ") uniform u" << samplerType << " " << in.name << ";\n";
}
}
}
ensure(location <= m_binding_table.vertex_textures_first_bind_slot); // "Too many sampler descriptors!"
std::string constants_block;
for (const ParamType& PT : m_parr.params[PF_PARAM_UNIFORM])
{
if (PT.type == "sampler1D" ||
PT.type == "sampler2D" ||
PT.type == "sampler3D" ||
PT.type == "samplerCube")
continue;
for (const ParamItem& PI : PT.items)
{
constants_block += " " + PT.type + " " + PI.name + ";\n";
}
}
if (!constants_block.empty())
{
OS << "layout(std140, set = 0, binding = 2) uniform FragmentConstantsBuffer\n";
OS << "{\n";
OS << constants_block;
OS << "};\n\n";
}
OS << "layout(std140, set = 0, binding = 3) uniform FragmentStateBuffer\n";
OS << "{\n";
OS << " float fog_param0;\n";
OS << " float fog_param1;\n";
OS << " uint rop_control;\n";
OS << " float alpha_ref;\n";
OS << " uint reserved;\n";
OS << " uint fog_mode;\n";
OS << " float wpos_scale;\n";
OS << " float wpos_bias;\n";
OS << "};\n\n";
OS << "layout(std140, set = 0, binding = 4) uniform TextureParametersBuffer\n";
OS << "{\n";
OS << " sampler_info texture_parameters[16];\n";
OS << "};\n\n";
OS << "layout(std140, set = 0, binding = " << std::to_string(m_binding_table.rasterizer_env_bind_slot) << ") uniform RasterizerHeap\n";
OS << "{\n";
OS << " uvec4 stipple_pattern[8];\n";
OS << "};\n\n";
vk::glsl::program_input in;
in.location = m_binding_table.fragment_constant_buffers_bind_slot;
in.domain = glsl::glsl_fragment_program;
in.name = "FragmentConstantsBuffer";
in.type = vk::glsl::input_type_uniform_buffer;
inputs.push_back(in);
in.location = m_binding_table.fragment_state_bind_slot;
in.name = "FragmentStateBuffer";
inputs.push_back(in);
in.location = m_binding_table.fragment_texture_params_bind_slot;
in.name = "TextureParametersBuffer";
inputs.push_back(in);
in.location = m_binding_table.rasterizer_env_bind_slot;
in.name = "RasterizerHeap";
inputs.push_back(in);
}
void VKFragmentDecompilerThread::insertGlobalFunctions(std::stringstream &OS)
{
m_shader_props.domain = glsl::glsl_fragment_program;
m_shader_props.require_lit_emulation = properties.has_lit_op;
m_shader_props.fp32_outputs = !!(m_prog.ctrl & CELL_GCM_SHADER_CONTROL_32_BITS_EXPORTS);
m_shader_props.require_depth_conversion = properties.redirected_sampler_mask != 0;
m_shader_props.require_wpos = !!(properties.in_register_mask & in_wpos);
m_shader_props.require_texture_ops = properties.has_tex_op;
m_shader_props.require_tex_shadow_ops = properties.shadow_sampler_mask != 0;
m_shader_props.require_msaa_ops = m_prog.texture_state.multisampled_textures != 0;
m_shader_props.require_texture_expand = properties.has_exp_tex_op;
m_shader_props.require_srgb_to_linear = properties.has_upg;
m_shader_props.require_linear_to_srgb = properties.has_pkg;
m_shader_props.require_fog_read = properties.in_register_mask & in_fogc;
m_shader_props.emulate_coverage_tests = g_cfg.video.antialiasing_level == msaa_level::none;
m_shader_props.emulate_shadow_compare = device_props.emulate_depth_compare;
m_shader_props.low_precision_tests = device_props.has_low_precision_rounding && !(m_prog.ctrl & RSX_SHADER_CONTROL_ATTRIBUTE_INTERPOLATION);
m_shader_props.disable_early_discard = !vk::is_NVIDIA(vk::get_driver_vendor());
m_shader_props.supports_native_fp16 = device_props.has_native_half_support;
m_shader_props.ROP_output_rounding = g_cfg.video.shader_precision != gpu_preset_level::low;
m_shader_props.require_tex1D_ops = properties.has_tex1D;
m_shader_props.require_tex2D_ops = properties.has_tex2D;
m_shader_props.require_tex3D_ops = properties.has_tex3D;
m_shader_props.require_shadowProj_ops = properties.shadow_sampler_mask != 0 && properties.has_texShadowProj;
glsl::insert_glsl_legacy_function(OS, m_shader_props);
}
void VKFragmentDecompilerThread::insertMainStart(std::stringstream & OS)
{
std::set<std::string> output_registers;
if (m_ctrl & CELL_GCM_SHADER_CONTROL_32_BITS_EXPORTS)
{
output_registers = { "r0", "r2", "r3", "r4" };
}
else
{
output_registers = { "h0", "h4", "h6", "h8" };
}
if (m_ctrl & CELL_GCM_SHADER_CONTROL_DEPTH_EXPORT)
{
output_registers.insert("r1");
}
std::string registers;
std::string reg_type;
const auto half4 = getHalfTypeName(4);
for (auto ®_name : output_registers)
{
const auto type = (reg_name[0] == 'r' || !device_props.has_native_half_support)? "vec4" : half4;
if (reg_type == type) [[likely]]
{
registers += ", " + reg_name + " = " + type + "(0.)";
}
else
{
if (!registers.empty())
registers += ";\n";
registers += type + " " + reg_name + " = " + type + "(0.)";
}
reg_type = type;
}
if (!registers.empty())
{
OS << registers << ";\n";
}
OS << "void fs_main()\n";
OS << "{\n";
for (const ParamType& PT : m_parr.params[PF_PARAM_NONE])
{
for (const ParamItem& PI : PT.items)
{
if (output_registers.find(PI.name) != output_registers.end())
continue;
OS << " " << PT.type << " " << PI.name;
if (!PI.value.empty())
OS << " = " << PI.value;
OS << ";\n";
}
}
if (properties.has_w_access)
OS << " float in_w = (1. / gl_FragCoord.w);\n";
if (properties.in_register_mask & in_ssa)
OS << " vec4 ssa = gl_FrontFacing ? vec4(1.) : vec4(-1.);\n";
if (properties.in_register_mask & in_wpos)
OS << " vec4 wpos = get_wpos();\n";
if (properties.in_register_mask & in_fogc)
OS << " vec4 fogc = fetch_fog_value(fog_mode);\n";
if (m_prog.two_sided_lighting)
{
if (properties.in_register_mask & in_diff_color)
OS << " vec4 diff_color = gl_FrontFacing ? diff_color1 : diff_color0;\n";
if (properties.in_register_mask & in_spec_color)
OS << " vec4 spec_color = gl_FrontFacing ? spec_color1 : spec_color0;\n";
}
}
void VKFragmentDecompilerThread::insertMainEnd(std::stringstream & OS)
{
OS << "}\n\n";
OS << "void main()\n";
OS << "{\n";
::glsl::insert_rop_init(OS);
OS << "\n" << " fs_main();\n\n";
glsl::insert_rop(OS, m_shader_props);
if (m_ctrl & CELL_GCM_SHADER_CONTROL_DEPTH_EXPORT)
{
if (m_parr.HasParam(PF_PARAM_NONE, "vec4", "r1"))
{
// NOTE: Depth writes are always from a fp32 register. See issues section on nvidia's NV_fragment_program spec
// https://www.khronos.org/registry/OpenGL/extensions/NV/NV_fragment_program.txt
// NOTE: Depth writes in OpenGL (and by extension RSX) are clamped to 0,1 range.
// Indeed, hardware tests on realhw prove that even in depth float mode, values outside this range are clamped.
OS << " gl_FragDepth = _saturate(r1.z);\n";
}
else
{
//Input not declared. Leave commented to assist in debugging the shader
OS << " //gl_FragDepth = r1.z;\n";
}
}
OS << "}\n";
}
void VKFragmentDecompilerThread::Task()
{
m_binding_table = vk::g_render_device->get_pipeline_binding_table();
m_shader = Decompile();
vk_prog->SetInputs(inputs);
}
VKFragmentProgram::VKFragmentProgram() = default;
VKFragmentProgram::~VKFragmentProgram()
{
Delete();
}
void VKFragmentProgram::Decompile(const RSXFragmentProgram& prog)
{
u32 size;
std::string source;
VKFragmentDecompilerThread decompiler(source, parr, prog, size, *this);
const auto pdev = vk::get_current_renderer();
if (g_cfg.video.shader_precision == gpu_preset_level::low)
{
decompiler.device_props.has_native_half_support = pdev->get_shader_types_support().allow_float16;
}
decompiler.device_props.emulate_depth_compare = !pdev->get_formats_support().d24_unorm_s8;
decompiler.device_props.has_low_precision_rounding = vk::is_NVIDIA(vk::get_driver_vendor());
decompiler.Task();
shader.create(::glsl::program_domain::glsl_fragment_program, source);
for (const ParamType& PT : decompiler.m_parr.params[PF_PARAM_UNIFORM])
{
for (const ParamItem& PI : PT.items)
{
if (PT.type == "sampler1D" ||
PT.type == "sampler2D" ||
PT.type == "sampler3D" ||
PT.type == "samplerCube")
continue;
usz offset = atoi(PI.name.c_str() + 2);
FragmentConstantOffsetCache.push_back(offset);
}
}
}
void VKFragmentProgram::Compile()
{
if (g_cfg.video.log_programs)
fs::write_file(fs::get_cache_dir() + "shaderlog/FragmentProgram" + std::to_string(id) + ".spirv", fs::rewrite, shader.get_source());
handle = shader.compile();
}
void VKFragmentProgram::Delete()
{
shader.destroy();
}
void VKFragmentProgram::SetInputs(std::vector<vk::glsl::program_input>& inputs)
{
for (auto &it : inputs)
{
uniforms.push_back(it);
}
}
| 12,995
|
C++
|
.cpp
| 366
| 32.797814
| 141
| 0.688013
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,446
|
VKCommandStream.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKCommandStream.cpp
|
#include "stdafx.h"
#include "VKCommandStream.h"
#include "vkutils/descriptors.h"
#include "vkutils/sync.h"
#include "Emu/IdManager.h"
#include "Emu/RSX/RSXOffload.h"
#include "Emu/RSX/RSXThread.h"
#include "Emu/system_config.h"
namespace vk
{
// global submit guard to prevent race condition on queue submit
shared_mutex g_submit_mutex;
void acquire_global_submit_lock()
{
g_submit_mutex.lock();
}
void release_global_submit_lock()
{
g_submit_mutex.unlock();
}
FORCE_INLINE
static void queue_submit_impl(const queue_submit_t& submit_info)
{
ensure(submit_info.pfence);
acquire_global_submit_lock();
VkSubmitInfo info
{
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
.pNext = nullptr,
.waitSemaphoreCount = submit_info.wait_semaphores_count,
.pWaitSemaphores = submit_info.wait_semaphores.data(),
.pWaitDstStageMask = submit_info.wait_stages.data(),
.commandBufferCount = 1,
.pCommandBuffers = &submit_info.commands,
.signalSemaphoreCount = submit_info.signal_semaphores_count,
.pSignalSemaphores = submit_info.signal_semaphores.data()
};
vkQueueSubmit(submit_info.queue, 1, &info, submit_info.pfence->handle);
release_global_submit_lock();
// Signal fence
submit_info.pfence->signal_flushed();
}
void queue_submit(const queue_submit_t& submit_info, VkBool32 flush)
{
rsx::get_current_renderer()->get_stats().submit_count++;
// Access to this method must be externally synchronized.
// Offloader is guaranteed to never call this for async flushes.
vk::descriptors::flush();
if (!flush && g_cfg.video.multithreaded_rsx)
{
auto packet = new queue_submit_t(submit_info);
g_fxo->get<rsx::dma_manager>().backend_ctrl(rctrl_queue_submit, packet);
}
else
{
queue_submit_impl(submit_info);
}
}
void queue_submit(const queue_submit_t* packet)
{
// Flush-only version used by asynchronous submit processing (MTRSX)
queue_submit_impl(*packet);
}
}
| 1,941
|
C++
|
.cpp
| 64
| 27.59375
| 75
| 0.741693
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,447
|
VKGSRender.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKGSRender.cpp
|
#include "stdafx.h"
#include "../Overlays/overlay_compile_notification.h"
#include "../Overlays/Shaders/shader_loading_dialog_native.h"
#include "VKAsyncScheduler.h"
#include "VKCommandStream.h"
#include "VKCommonDecompiler.h"
#include "VKCompute.h"
#include "VKGSRender.h"
#include "VKHelpers.h"
#include "VKRenderPass.h"
#include "VKResourceManager.h"
#include "vkutils/buffer_object.h"
#include "vkutils/scratch.h"
#include "Emu/RSX/rsx_methods.h"
#include "Emu/RSX/Host/RSXDMAWriter.h"
#include "Emu/RSX/NV47/HW/context_accessors.define.h"
#include "Emu/Memory/vm_locking.h"
#include "../Program/SPIRVCommon.h"
#include "util/asm.hpp"
namespace vk
{
VkCompareOp get_compare_func(rsx::comparison_function op, bool reverse_direction = false);
std::pair<VkFormat, VkComponentMapping> get_compatible_surface_format(rsx::surface_color_format color_format)
{
const VkComponentMapping o_rgb = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_ONE };
const VkComponentMapping z_rgb = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_ZERO };
switch (color_format)
{
#ifndef __APPLE__
case rsx::surface_color_format::r5g6b5:
return std::make_pair(VK_FORMAT_R5G6B5_UNORM_PACK16, vk::default_component_map);
case rsx::surface_color_format::x1r5g5b5_o1r5g5b5:
return std::make_pair(VK_FORMAT_A1R5G5B5_UNORM_PACK16, o_rgb);
case rsx::surface_color_format::x1r5g5b5_z1r5g5b5:
return std::make_pair(VK_FORMAT_A1R5G5B5_UNORM_PACK16, z_rgb);
#else
// assign B8G8R8A8_UNORM to formats that are not supported by Metal
case rsx::surface_color_format::r5g6b5:
return std::make_pair(VK_FORMAT_B8G8R8A8_UNORM, vk::default_component_map);
case rsx::surface_color_format::x1r5g5b5_o1r5g5b5:
return std::make_pair(VK_FORMAT_B8G8R8A8_UNORM, o_rgb);
case rsx::surface_color_format::x1r5g5b5_z1r5g5b5:
return std::make_pair(VK_FORMAT_B8G8R8A8_UNORM, z_rgb);
#endif
case rsx::surface_color_format::a8r8g8b8:
return std::make_pair(VK_FORMAT_B8G8R8A8_UNORM, vk::default_component_map);
case rsx::surface_color_format::a8b8g8r8:
return std::make_pair(VK_FORMAT_R8G8B8A8_UNORM, vk::default_component_map);
case rsx::surface_color_format::x8b8g8r8_o8b8g8r8:
return std::make_pair(VK_FORMAT_R8G8B8A8_UNORM, o_rgb);
case rsx::surface_color_format::x8b8g8r8_z8b8g8r8:
return std::make_pair(VK_FORMAT_R8G8B8A8_UNORM, z_rgb);
case rsx::surface_color_format::x8r8g8b8_z8r8g8b8:
return std::make_pair(VK_FORMAT_B8G8R8A8_UNORM, z_rgb);
case rsx::surface_color_format::x8r8g8b8_o8r8g8b8:
return std::make_pair(VK_FORMAT_B8G8R8A8_UNORM, o_rgb);
case rsx::surface_color_format::w16z16y16x16:
return std::make_pair(VK_FORMAT_R16G16B16A16_SFLOAT, vk::default_component_map);
case rsx::surface_color_format::w32z32y32x32:
return std::make_pair(VK_FORMAT_R32G32B32A32_SFLOAT, vk::default_component_map);
case rsx::surface_color_format::b8:
{
const VkComponentMapping no_alpha = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_ONE };
return std::make_pair(VK_FORMAT_R8_UNORM, no_alpha);
}
case rsx::surface_color_format::g8b8:
{
const VkComponentMapping gb_rg = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G };
return std::make_pair(VK_FORMAT_R8G8_UNORM, gb_rg);
}
case rsx::surface_color_format::x32:
{
const VkComponentMapping rrrr = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_R };
return std::make_pair(VK_FORMAT_R32_SFLOAT, rrrr);
}
default:
rsx_log.error("Surface color buffer: Unsupported surface color format (0x%x)", static_cast<u32>(color_format));
return std::make_pair(VK_FORMAT_B8G8R8A8_UNORM, vk::default_component_map);
}
}
VkLogicOp get_logic_op(rsx::logic_op op)
{
switch (op)
{
case rsx::logic_op::logic_clear: return VK_LOGIC_OP_CLEAR;
case rsx::logic_op::logic_and: return VK_LOGIC_OP_AND;
case rsx::logic_op::logic_and_reverse: return VK_LOGIC_OP_AND_REVERSE;
case rsx::logic_op::logic_copy: return VK_LOGIC_OP_COPY;
case rsx::logic_op::logic_and_inverted: return VK_LOGIC_OP_AND_INVERTED;
case rsx::logic_op::logic_noop: return VK_LOGIC_OP_NO_OP;
case rsx::logic_op::logic_xor: return VK_LOGIC_OP_XOR;
case rsx::logic_op::logic_or : return VK_LOGIC_OP_OR;
case rsx::logic_op::logic_nor: return VK_LOGIC_OP_NOR;
case rsx::logic_op::logic_equiv: return VK_LOGIC_OP_EQUIVALENT;
case rsx::logic_op::logic_invert: return VK_LOGIC_OP_INVERT;
case rsx::logic_op::logic_or_reverse: return VK_LOGIC_OP_OR_REVERSE;
case rsx::logic_op::logic_copy_inverted: return VK_LOGIC_OP_COPY_INVERTED;
case rsx::logic_op::logic_or_inverted: return VK_LOGIC_OP_OR_INVERTED;
case rsx::logic_op::logic_nand: return VK_LOGIC_OP_NAND;
case rsx::logic_op::logic_set: return VK_LOGIC_OP_SET;
default:
fmt::throw_exception("Unknown logic op 0x%x", static_cast<u32>(op));
}
}
VkBlendFactor get_blend_factor(rsx::blend_factor factor)
{
switch (factor)
{
case rsx::blend_factor::one: return VK_BLEND_FACTOR_ONE;
case rsx::blend_factor::zero: return VK_BLEND_FACTOR_ZERO;
case rsx::blend_factor::src_alpha: return VK_BLEND_FACTOR_SRC_ALPHA;
case rsx::blend_factor::dst_alpha: return VK_BLEND_FACTOR_DST_ALPHA;
case rsx::blend_factor::src_color: return VK_BLEND_FACTOR_SRC_COLOR;
case rsx::blend_factor::dst_color: return VK_BLEND_FACTOR_DST_COLOR;
case rsx::blend_factor::constant_color: return VK_BLEND_FACTOR_CONSTANT_COLOR;
case rsx::blend_factor::constant_alpha: return VK_BLEND_FACTOR_CONSTANT_ALPHA;
case rsx::blend_factor::one_minus_src_color: return VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR;
case rsx::blend_factor::one_minus_dst_color: return VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR;
case rsx::blend_factor::one_minus_src_alpha: return VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
case rsx::blend_factor::one_minus_dst_alpha: return VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
case rsx::blend_factor::one_minus_constant_alpha: return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA;
case rsx::blend_factor::one_minus_constant_color: return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR;
case rsx::blend_factor::src_alpha_saturate: return VK_BLEND_FACTOR_SRC_ALPHA_SATURATE;
default:
fmt::throw_exception("Unknown blend factor 0x%x", static_cast<u32>(factor));
}
}
VkBlendOp get_blend_op(rsx::blend_equation op)
{
switch (op)
{
case rsx::blend_equation::add_signed:
rsx_log.trace("blend equation add_signed used. Emulating using FUNC_ADD");
[[fallthrough]];
case rsx::blend_equation::add:
return VK_BLEND_OP_ADD;
case rsx::blend_equation::subtract: return VK_BLEND_OP_SUBTRACT;
case rsx::blend_equation::reverse_subtract_signed:
rsx_log.trace("blend equation reverse_subtract_signed used. Emulating using FUNC_REVERSE_SUBTRACT");
[[fallthrough]];
case rsx::blend_equation::reverse_subtract: return VK_BLEND_OP_REVERSE_SUBTRACT;
case rsx::blend_equation::min: return VK_BLEND_OP_MIN;
case rsx::blend_equation::max: return VK_BLEND_OP_MAX;
default:
fmt::throw_exception("Unknown blend op: 0x%x", static_cast<u32>(op));
}
}
VkStencilOp get_stencil_op(rsx::stencil_op op)
{
switch (op)
{
case rsx::stencil_op::keep: return VK_STENCIL_OP_KEEP;
case rsx::stencil_op::zero: return VK_STENCIL_OP_ZERO;
case rsx::stencil_op::replace: return VK_STENCIL_OP_REPLACE;
case rsx::stencil_op::incr: return VK_STENCIL_OP_INCREMENT_AND_CLAMP;
case rsx::stencil_op::decr: return VK_STENCIL_OP_DECREMENT_AND_CLAMP;
case rsx::stencil_op::invert: return VK_STENCIL_OP_INVERT;
case rsx::stencil_op::incr_wrap: return VK_STENCIL_OP_INCREMENT_AND_WRAP;
case rsx::stencil_op::decr_wrap: return VK_STENCIL_OP_DECREMENT_AND_WRAP;
default:
fmt::throw_exception("Unknown stencil op: 0x%x", static_cast<u32>(op));
}
}
VkFrontFace get_front_face(rsx::front_face ffv)
{
switch (ffv)
{
case rsx::front_face::cw: return VK_FRONT_FACE_CLOCKWISE;
case rsx::front_face::ccw: return VK_FRONT_FACE_COUNTER_CLOCKWISE;
default:
fmt::throw_exception("Unknown front face value: 0x%x", static_cast<u32>(ffv));
}
}
VkCullModeFlags get_cull_face(rsx::cull_face cfv)
{
switch (cfv)
{
case rsx::cull_face::back: return VK_CULL_MODE_BACK_BIT;
case rsx::cull_face::front: return VK_CULL_MODE_FRONT_BIT;
case rsx::cull_face::front_and_back: return VK_CULL_MODE_FRONT_AND_BACK;
default:
fmt::throw_exception("Unknown cull face value: 0x%x", static_cast<u32>(cfv));
}
}
struct vertex_input_assembly_state
{
VkPrimitiveTopology primitive;
VkBool32 restart_index_enabled;
};
vertex_input_assembly_state decode_vertex_input_assembly_state()
{
vertex_input_assembly_state state{};
const auto& current_draw = rsx::method_registers.current_draw_clause;
const auto [primitive, emulated_primitive] = vk::get_appropriate_topology(current_draw.primitive);
if (rsx::method_registers.restart_index_enabled() &&
!current_draw.is_disjoint_primitive &&
current_draw.command == rsx::draw_command::indexed &&
!emulated_primitive &&
!vk::emulate_primitive_restart(current_draw.primitive))
{
state.restart_index_enabled = VK_TRUE;
}
state.primitive = primitive;
return state;
}
// TODO: This should be deprecated soon (kd)
vk::pipeline_props decode_rsx_state(
const vertex_input_assembly_state& vertex_input,
vk::render_target* ds,
const rsx::backend_configuration& backend_config,
u8 num_draw_buffers,
u8 num_rasterization_samples,
bool depth_bounds_support)
{
vk::pipeline_props properties{};
// Input assembly
properties.state.set_primitive_type(vertex_input.primitive);
properties.state.enable_primitive_restart(vertex_input.restart_index_enabled);
// Rasterizer state
properties.state.set_attachment_count(num_draw_buffers);
properties.state.set_front_face(vk::get_front_face(rsx::method_registers.front_face_mode()));
properties.state.enable_depth_clamp(rsx::method_registers.depth_clamp_enabled() || !rsx::method_registers.depth_clip_enabled());
properties.state.enable_depth_bias(true);
properties.state.enable_depth_bounds_test(depth_bounds_support);
if (rsx::method_registers.depth_test_enabled())
{
//NOTE: Like stencil, depth write is meaningless without depth test
properties.state.set_depth_mask(rsx::method_registers.depth_write_enabled());
properties.state.enable_depth_test(vk::get_compare_func(rsx::method_registers.depth_func()));
}
if (rsx::method_registers.cull_face_enabled())
{
properties.state.enable_cull_face(vk::get_cull_face(rsx::method_registers.cull_face_mode()));
}
const auto host_write_mask = rsx::get_write_output_mask(rsx::method_registers.surface_color());
for (uint index = 0; index < num_draw_buffers; ++index)
{
bool color_mask_b = rsx::method_registers.color_mask_b(index);
bool color_mask_g = rsx::method_registers.color_mask_g(index);
bool color_mask_r = rsx::method_registers.color_mask_r(index);
bool color_mask_a = rsx::method_registers.color_mask_a(index);
switch (rsx::method_registers.surface_color())
{
case rsx::surface_color_format::b8:
rsx::get_b8_colormask(color_mask_r, color_mask_g, color_mask_b, color_mask_a);
break;
case rsx::surface_color_format::g8b8:
rsx::get_g8b8_r8g8_colormask(color_mask_r, color_mask_g, color_mask_b, color_mask_a);
break;
default:
break;
}
properties.state.set_color_mask(
index,
color_mask_r && host_write_mask[0],
color_mask_g && host_write_mask[1],
color_mask_b && host_write_mask[2],
color_mask_a && host_write_mask[3]);
}
// LogicOp and Blend are mutually exclusive. If both are enabled, LogicOp takes precedence.
if (rsx::method_registers.logic_op_enabled())
{
properties.state.enable_logic_op(vk::get_logic_op(rsx::method_registers.logic_operation()));
}
else
{
bool mrt_blend_enabled[] =
{
rsx::method_registers.blend_enabled(),
rsx::method_registers.blend_enabled_surface_1(),
rsx::method_registers.blend_enabled_surface_2(),
rsx::method_registers.blend_enabled_surface_3()
};
VkBlendFactor sfactor_rgb, sfactor_a, dfactor_rgb, dfactor_a;
VkBlendOp equation_rgb, equation_a;
if (mrt_blend_enabled[0] || mrt_blend_enabled[1] || mrt_blend_enabled[2] || mrt_blend_enabled[3])
{
sfactor_rgb = vk::get_blend_factor(rsx::method_registers.blend_func_sfactor_rgb());
sfactor_a = vk::get_blend_factor(rsx::method_registers.blend_func_sfactor_a());
dfactor_rgb = vk::get_blend_factor(rsx::method_registers.blend_func_dfactor_rgb());
dfactor_a = vk::get_blend_factor(rsx::method_registers.blend_func_dfactor_a());
equation_rgb = vk::get_blend_op(rsx::method_registers.blend_equation_rgb());
equation_a = vk::get_blend_op(rsx::method_registers.blend_equation_a());
for (u8 idx = 0; idx < num_draw_buffers; ++idx)
{
if (mrt_blend_enabled[idx])
{
properties.state.enable_blend(idx, sfactor_rgb, sfactor_a, dfactor_rgb, dfactor_a, equation_rgb, equation_a);
}
}
}
}
if (rsx::method_registers.stencil_test_enabled())
{
if (!rsx::method_registers.two_sided_stencil_test_enabled())
{
properties.state.enable_stencil_test(
vk::get_stencil_op(rsx::method_registers.stencil_op_fail()),
vk::get_stencil_op(rsx::method_registers.stencil_op_zfail()),
vk::get_stencil_op(rsx::method_registers.stencil_op_zpass()),
vk::get_compare_func(rsx::method_registers.stencil_func()),
0xFF, 0xFF); //write mask, func_mask, ref are dynamic
}
else
{
properties.state.enable_stencil_test_separate(0,
vk::get_stencil_op(rsx::method_registers.stencil_op_fail()),
vk::get_stencil_op(rsx::method_registers.stencil_op_zfail()),
vk::get_stencil_op(rsx::method_registers.stencil_op_zpass()),
vk::get_compare_func(rsx::method_registers.stencil_func()),
0xFF, 0xFF); //write mask, func_mask, ref are dynamic
properties.state.enable_stencil_test_separate(1,
vk::get_stencil_op(rsx::method_registers.back_stencil_op_fail()),
vk::get_stencil_op(rsx::method_registers.back_stencil_op_zfail()),
vk::get_stencil_op(rsx::method_registers.back_stencil_op_zpass()),
vk::get_compare_func(rsx::method_registers.back_stencil_func()),
0xFF, 0xFF); //write mask, func_mask, ref are dynamic
}
if (ds && ds->samples() > 1 && !(ds->stencil_init_flags & 0xFF00))
{
if (properties.state.ds.front.failOp != VK_STENCIL_OP_KEEP ||
properties.state.ds.front.depthFailOp != VK_STENCIL_OP_KEEP ||
properties.state.ds.front.passOp != VK_STENCIL_OP_KEEP ||
properties.state.ds.back.failOp != VK_STENCIL_OP_KEEP ||
properties.state.ds.back.depthFailOp != VK_STENCIL_OP_KEEP ||
properties.state.ds.back.passOp != VK_STENCIL_OP_KEEP)
{
// Toggle bit 9 to signal require full bit-wise transfer
ds->stencil_init_flags |= (1 << 8);
}
}
}
if (backend_config.supports_hw_a2c || num_rasterization_samples > 1)
{
const bool alpha_to_one_enable = rsx::method_registers.msaa_alpha_to_one_enabled() && backend_config.supports_hw_a2one;
properties.state.set_multisample_state(
num_rasterization_samples,
rsx::method_registers.msaa_sample_mask(),
rsx::method_registers.msaa_enabled(),
rsx::method_registers.msaa_alpha_to_coverage_enabled(),
alpha_to_one_enable);
// A problem observed on multiple GPUs is that interior geometry edges can resolve 0 samples unless we force shading rate of 1.
// For whatever reason, the way MSAA images are 'resolved' on PS3 bypasses this issue.
// NOTE: We do not do image resolve at all, the output is merely 'exploded' and the guest application is responsible for doing the resolve in software as it is on real hardware.
properties.state.set_multisample_shading_rate(1.f);
}
return properties;
}
}
namespace
{
std::tuple<VkPipelineLayout, VkDescriptorSetLayout> get_shared_pipeline_layout(VkDevice dev)
{
const auto& binding_table = vk::get_current_renderer()->get_pipeline_binding_table();
rsx::simple_array<VkDescriptorSetLayoutBinding> bindings(binding_table.total_descriptor_bindings);
u32 idx = 0;
// Vertex stream, one stream for cacheable data, one stream for transient data
for (int i = 0; i < 3; i++)
{
bindings[idx].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
bindings[idx].descriptorCount = 1;
bindings[idx].stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
bindings[idx].binding = binding_table.vertex_buffers_first_bind_slot + i;
bindings[idx].pImmutableSamplers = nullptr;
idx++;
}
bindings[idx].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
bindings[idx].descriptorCount = 1;
bindings[idx].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
bindings[idx].binding = binding_table.fragment_constant_buffers_bind_slot;
bindings[idx].pImmutableSamplers = nullptr;
idx++;
bindings[idx].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
bindings[idx].descriptorCount = 1;
bindings[idx].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
bindings[idx].binding = binding_table.fragment_state_bind_slot;
bindings[idx].pImmutableSamplers = nullptr;
idx++;
bindings[idx].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
bindings[idx].descriptorCount = 1;
bindings[idx].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
bindings[idx].binding = binding_table.fragment_texture_params_bind_slot;
bindings[idx].pImmutableSamplers = nullptr;
idx++;
bindings[idx].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
bindings[idx].descriptorCount = 1;
bindings[idx].stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
bindings[idx].binding = binding_table.vertex_constant_buffers_bind_slot;
bindings[idx].pImmutableSamplers = nullptr;
idx++;
bindings[idx].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
bindings[idx].descriptorCount = 1;
bindings[idx].stageFlags = VK_SHADER_STAGE_ALL_GRAPHICS;
bindings[idx].binding = binding_table.vertex_params_bind_slot;
bindings[idx].pImmutableSamplers = nullptr;
idx++;
bindings[idx].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
bindings[idx].descriptorCount = 1;
bindings[idx].stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
bindings[idx].binding = binding_table.conditional_render_predicate_slot;
bindings[idx].pImmutableSamplers = nullptr;
idx++;
bindings[idx].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
bindings[idx].descriptorCount = 1;
bindings[idx].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
bindings[idx].binding = binding_table.rasterizer_env_bind_slot;
bindings[idx].pImmutableSamplers = nullptr;
idx++;
for (auto binding = binding_table.textures_first_bind_slot;
binding < binding_table.vertex_textures_first_bind_slot;
binding++)
{
bindings[idx].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
bindings[idx].descriptorCount = 1;
bindings[idx].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
bindings[idx].binding = binding;
bindings[idx].pImmutableSamplers = nullptr;
idx++;
}
for (int i = 0; i < rsx::limits::vertex_textures_count; i++)
{
bindings[idx].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
bindings[idx].descriptorCount = 1;
bindings[idx].stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
bindings[idx].binding = binding_table.vertex_textures_first_bind_slot + i;
bindings[idx].pImmutableSamplers = nullptr;
idx++;
}
ensure(idx == binding_table.total_descriptor_bindings);
std::array<VkPushConstantRange, 1> push_constants;
push_constants[0].offset = 0;
push_constants[0].size = 16;
push_constants[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
if (vk::emulate_conditional_rendering())
{
// Conditional render toggle
push_constants[0].size = 20;
}
const auto set_layout = vk::descriptors::create_layout(bindings);
VkPipelineLayoutCreateInfo layout_info = {};
layout_info.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
layout_info.setLayoutCount = 1;
layout_info.pSetLayouts = &set_layout;
layout_info.pushConstantRangeCount = 1;
layout_info.pPushConstantRanges = push_constants.data();
VkPipelineLayout result;
CHECK_RESULT(vkCreatePipelineLayout(dev, &layout_info, nullptr, &result));
return std::make_tuple(result, set_layout);
}
}
u64 VKGSRender::get_cycles()
{
return thread_ctrl::get_cycles(static_cast<named_thread<VKGSRender>&>(*this));
}
VKGSRender::VKGSRender(utils::serial* ar) noexcept : GSRender(ar)
{
// Initialize dependencies
g_fxo->need<rsx::dma_manager>();
if (!m_instance.create("RPCS3"))
{
rsx_log.fatal("Could not find a Vulkan compatible GPU driver. Your GPU(s) may not support Vulkan, or you need to install the Vulkan runtime and drivers");
m_device = VK_NULL_HANDLE;
return;
}
m_instance.bind();
std::vector<vk::physical_device>& gpus = m_instance.enumerate_devices();
//Actually confirm that the loader found at least one compatible device
//This should not happen unless something is wrong with the driver setup on the target system
if (gpus.empty())
{
//We can't throw in Emulator::Load, so we show error and return
rsx_log.fatal("No compatible GPU devices found");
m_device = VK_NULL_HANDLE;
return;
}
bool gpu_found = false;
std::string adapter_name = g_cfg.video.vk.adapter;
display_handle_t display = m_frame->handle();
#ifdef HAVE_X11
std::visit([this](auto&& p) {
using T = std::decay_t<decltype(p)>;
if constexpr (std::is_same_v<T, std::pair<Display*, Window>>)
{
m_display_handle = p.first; XFlush(m_display_handle);
}
}, display);
#endif
for (auto &gpu : gpus)
{
if (gpu.get_name() == adapter_name)
{
m_swapchain.reset(m_instance.create_swapchain(display, gpu));
gpu_found = true;
break;
}
}
if (!gpu_found || adapter_name.empty())
{
m_swapchain.reset(m_instance.create_swapchain(display, gpus[0]));
}
if (!m_swapchain)
{
m_device = VK_NULL_HANDLE;
rsx_log.fatal("Could not successfully initialize a swapchain");
return;
}
m_device = const_cast<vk::render_device*>(&m_swapchain->get_device());
vk::set_current_renderer(m_swapchain->get_device());
m_swapchain_dims.width = m_frame->client_width();
m_swapchain_dims.height = m_frame->client_height();
if (!m_swapchain->init(m_swapchain_dims.width, m_swapchain_dims.height))
{
swapchain_unavailable = true;
}
//create command buffer...
m_command_buffer_pool.create((*m_device), m_device->get_graphics_queue_family());
m_primary_cb_list.create(m_command_buffer_pool, vk::command_buffer::access_type_hint::flush_only);
m_current_command_buffer = m_primary_cb_list.get();
m_current_command_buffer->begin();
//Create secondary command_buffer for parallel operations
m_secondary_command_buffer_pool.create((*m_device), m_device->get_graphics_queue_family());
m_secondary_cb_list.create(m_secondary_command_buffer_pool, vk::command_buffer::access_type_hint::all);
//Precalculated stuff
std::tie(m_pipeline_layout, m_descriptor_layouts) = get_shared_pipeline_layout(*m_device);
//Occlusion
m_occlusion_query_manager = std::make_unique<vk::query_pool_manager>(*m_device, VK_QUERY_TYPE_OCCLUSION, OCCLUSION_MAX_POOL_SIZE);
m_occlusion_map.resize(rsx::reports::occlusion_query_count);
for (u32 n = 0; n < rsx::reports::occlusion_query_count; ++n)
m_occlusion_query_data[n].driver_handle = n;
if (g_cfg.video.precise_zpass_count)
{
m_occlusion_query_manager->set_control_flags(VK_QUERY_CONTROL_PRECISE_BIT, 0);
}
// Generate frame contexts
const u32 max_draw_calls = m_device->get_descriptor_max_draw_calls();
const auto& binding_table = m_device->get_pipeline_binding_table();
const u32 num_fs_samplers = binding_table.vertex_textures_first_bind_slot - binding_table.textures_first_bind_slot;
rsx::simple_array<VkDescriptorPoolSize> descriptor_type_sizes =
{
{ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER , 6 },
{ VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER , 3 },
{ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER , (num_fs_samplers + 4) },
// Conditional rendering predicate slot; refactor to allow skipping this when not needed
{ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1 }
};
m_descriptor_pool.create(*m_device, descriptor_type_sizes, max_draw_calls);
VkSemaphoreCreateInfo semaphore_info = {};
semaphore_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
// VRAM allocation
m_attrib_ring_info.create(VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, VK_ATTRIB_RING_BUFFER_SIZE_M * 0x100000, "attrib buffer", 0x400000, VK_TRUE);
m_fragment_env_ring_info.create(VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VK_UBO_RING_BUFFER_SIZE_M * 0x100000, "fragment env buffer");
m_vertex_env_ring_info.create(VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VK_UBO_RING_BUFFER_SIZE_M * 0x100000, "vertex env buffer");
m_fragment_texture_params_ring_info.create(VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VK_UBO_RING_BUFFER_SIZE_M * 0x100000, "fragment texture params buffer");
m_vertex_layout_ring_info.create(VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, VK_UBO_RING_BUFFER_SIZE_M * 0x100000, "vertex layout buffer", 0x10000, VK_TRUE);
m_fragment_constants_ring_info.create(VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VK_UBO_RING_BUFFER_SIZE_M * 0x100000, "fragment constants buffer");
m_transform_constants_ring_info.create(VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, VK_TRANSFORM_CONSTANTS_BUFFER_SIZE_M * 0x100000, "transform constants buffer");
m_index_buffer_ring_info.create(VK_BUFFER_USAGE_INDEX_BUFFER_BIT, VK_INDEX_RING_BUFFER_SIZE_M * 0x100000, "index buffer");
m_texture_upload_buffer_ring_info.create(VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_TEXTURE_UPLOAD_RING_BUFFER_SIZE_M * 0x100000, "texture upload buffer", 32 * 0x100000);
m_raster_env_ring_info.create(VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VK_UBO_RING_BUFFER_SIZE_M * 0x100000, "raster env buffer");
const auto shadermode = g_cfg.video.shadermode.get();
if (shadermode == shader_mode::async_with_interpreter || shadermode == shader_mode::interpreter_only)
{
m_vertex_instructions_buffer.create(VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, 64 * 0x100000, "vertex instructions buffer", 512 * 16);
m_fragment_instructions_buffer.create(VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, 64 * 0x100000, "fragment instructions buffer", 2048);
}
// Initialize optional allocation information with placeholders
m_vertex_env_buffer_info = { m_vertex_env_ring_info.heap->value, 0, 32 };
m_vertex_constants_buffer_info = { m_transform_constants_ring_info.heap->value, 0, 32 };
m_fragment_env_buffer_info = { m_fragment_env_ring_info.heap->value, 0, 32 };
m_fragment_texture_params_buffer_info = { m_fragment_texture_params_ring_info.heap->value, 0, 32 };
m_raster_env_buffer_info = { m_raster_env_ring_info.heap->value, 0, 128 };
const auto limits = m_device->gpu().get_limits();
m_texbuffer_view_size = std::min(limits.maxTexelBufferElements, VK_ATTRIB_RING_BUFFER_SIZE_M * 0x100000u);
if (m_texbuffer_view_size < 0x800000)
{
// Warn, only possibly expected on macOS
rsx_log.warning("Current driver may crash due to memory limitations (%uk)", m_texbuffer_view_size / 1024);
}
for (auto &ctx : frame_context_storage)
{
vkCreateSemaphore((*m_device), &semaphore_info, nullptr, &ctx.present_wait_semaphore);
vkCreateSemaphore((*m_device), &semaphore_info, nullptr, &ctx.acquire_signal_semaphore);
}
const auto& memory_map = m_device->get_memory_mapping();
null_buffer = std::make_unique<vk::buffer>(*m_device, 32, memory_map.device_local, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, 0, VMM_ALLOCATION_POOL_UNDEFINED);
null_buffer_view = std::make_unique<vk::buffer_view>(*m_device, null_buffer->value, VK_FORMAT_R8_UINT, 0, 32);
spirv::initialize_compiler_context();
vk::initialize_pipe_compiler(g_cfg.video.shader_compiler_threads_count);
m_prog_buffer = std::make_unique<vk::program_cache>
(
[this](const vk::pipeline_props& props, const RSXVertexProgram& vp, const RSXFragmentProgram& fp)
{
// Program was linked or queued for linking
m_shaders_cache->store(props, vp, fp);
}
);
if (g_cfg.video.disable_vertex_cache)
m_vertex_cache = std::make_unique<vk::null_vertex_cache>();
else
m_vertex_cache = std::make_unique<vk::weak_vertex_cache>();
m_shaders_cache = std::make_unique<vk::shader_cache>(*m_prog_buffer, "vulkan", "v1.94");
for (u32 i = 0; i < m_swapchain->get_swap_image_count(); ++i)
{
const auto target_layout = m_swapchain->get_optimal_present_layout();
const auto target_image = m_swapchain->get_image(i);
VkClearColorValue clear_color{};
VkImageSubresourceRange range = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 };
vk::change_image_layout(*m_current_command_buffer, target_image, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, range);
vkCmdClearColorImage(*m_current_command_buffer, target_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear_color, 1, &range);
vk::change_image_layout(*m_current_command_buffer, target_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, target_layout, range);
}
m_current_frame = &frame_context_storage[0];
m_texture_cache.initialize((*m_device), m_device->get_graphics_queue(),
m_texture_upload_buffer_ring_info);
vk::get_overlay_pass<vk::ui_overlay_renderer>()->init(*m_current_command_buffer, m_texture_upload_buffer_ring_info);
if (shadermode == shader_mode::async_with_interpreter || shadermode == shader_mode::interpreter_only)
{
m_shader_interpreter.init(*m_device);
}
backend_config.supports_multidraw = true;
// NVIDIA has broken attribute interpolation
backend_config.supports_normalized_barycentrics = (
!vk::is_NVIDIA(vk::get_driver_vendor()) ||
!m_device->get_barycoords_support() ||
g_cfg.video.shader_precision == gpu_preset_level::low);
// NOTE: We do not actually need multiple sample support for A2C to work
// This is here for visual consistency - will be removed when AA problems due to mipmaps are fixed
if (g_cfg.video.antialiasing_level != msaa_level::none)
{
backend_config.supports_hw_msaa = true;
backend_config.supports_hw_a2c = true;
backend_config.supports_hw_a2one = m_device->get_alpha_to_one_support();
}
// NOTE: On NVIDIA cards going back decades (including the PS3) there is a slight normalization inaccuracy in compressed formats.
// Confirmed in BLES01916 (The Evil Within) which uses RGB565 for some virtual texturing data.
backend_config.supports_hw_renormalization = vk::is_NVIDIA(vk::get_driver_vendor());
// Conditional rendering support
// Do not use on MVK due to a speedhack we rely on (streaming results without stopping the current renderpass)
// If we break the renderpasses, MVK loses around 75% of its performance in troublesome spots compared to just doing a CPU sync
backend_config.supports_hw_conditional_render = (vk::get_driver_vendor() != vk::driver_vendor::MVK);
// Passthrough DMA
backend_config.supports_passthrough_dma = m_device->get_external_memory_host_support();
// Host sync
backend_config.supports_host_gpu_labels = !!g_cfg.video.host_label_synchronization;
// Async compute and related operations
if (g_cfg.video.vk.asynchronous_texture_streaming)
{
// Optimistic, enable async compute
backend_config.supports_asynchronous_compute = true;
if (m_device->get_graphics_queue() == m_device->get_transfer_queue())
{
rsx_log.error("Cannot run graphics and async transfer in the same queue. Async uploads are disabled. This is a limitation of your GPU");
backend_config.supports_asynchronous_compute = false;
}
}
// Sanity checks
switch (vk::get_driver_vendor())
{
case vk::driver_vendor::NVIDIA:
if (backend_config.supports_asynchronous_compute)
{
if (auto chip_family = vk::get_chip_family();
chip_family == vk::chip_class::NV_kepler || chip_family == vk::chip_class::NV_maxwell)
{
rsx_log.warning("Older NVIDIA cards do not meet requirements for true asynchronous compute due to some driver fakery.");
}
rsx_log.notice("Forcing safe async compute for NVIDIA device to avoid crashing.");
g_cfg.video.vk.asynchronous_scheduler.set(vk_gpu_scheduler_mode::safe);
}
break;
case vk::driver_vendor::NVK:
// TODO: Verify if this driver crashes or not
rsx_log.warning("NVK behavior with passthrough DMA is unknown. Proceed with caution.");
break;
#if !defined(_WIN32)
// Anything running on AMDGPU kernel driver will not work due to the check for fd-backed memory allocations
case vk::driver_vendor::RADV:
case vk::driver_vendor::AMD:
#if !defined(__linux__)
// Intel chipsets would fail on BSD in most cases and DRM_IOCTL_i915_GEM_USERPTR unimplemented
case vk::driver_vendor::ANV:
#endif
if (backend_config.supports_passthrough_dma)
{
rsx_log.error("AMDGPU kernel driver on Linux and INTEL driver on some platforms cannot support passthrough DMA buffers.");
backend_config.supports_passthrough_dma = false;
}
break;
#endif
case vk::driver_vendor::MVK:
// Async compute crashes immediately on Apple GPUs
rsx_log.error("Apple GPUs are incompatible with the current implementation of asynchronous texture decoding.");
backend_config.supports_asynchronous_compute = false;
break;
case vk::driver_vendor::INTEL:
// As expected host allocations won't work on INTEL despite the extension being present
if (backend_config.supports_passthrough_dma)
{
rsx_log.error("INTEL driver does not support passthrough DMA buffers");
backend_config.supports_passthrough_dma = false;
}
break;
default: break;
}
if (backend_config.supports_asynchronous_compute)
{
m_async_compute_memory_barrier =
{
.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER_2_KHR,
.pNext = nullptr,
.srcStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT_KHR | VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT_KHR,
.srcAccessMask = VK_ACCESS_2_MEMORY_WRITE_BIT_KHR,
.dstStageMask = VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR | VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT_KHR,
.dstAccessMask = VK_ACCESS_2_SHADER_SAMPLED_READ_BIT_KHR
};
m_async_compute_dependency_info =
{
.sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO_KHR,
.memoryBarrierCount = 1,
.pMemoryBarriers = &m_async_compute_memory_barrier
};
// Run only if async compute can be used.
g_fxo->init<vk::AsyncTaskScheduler>(g_cfg.video.vk.asynchronous_scheduler, m_async_compute_dependency_info);
}
if (backend_config.supports_host_gpu_labels)
{
if (backend_config.supports_passthrough_dma)
{
m_host_object_data = std::make_unique<vk::buffer>(*m_device,
0x10000,
memory_map.host_visible_coherent, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
VK_BUFFER_USAGE_TRANSFER_DST_BIT, 0,
VMM_ALLOCATION_POOL_SYSTEM);
m_host_dma_ctrl = std::make_unique<rsx::RSXDMAWriter>(m_host_object_data->map(0, 0x10000));
}
else
{
rsx_log.error("Your GPU/driver does not support extensions required to enable passthrough DMA emulation. Host GPU labels will be disabled.");
backend_config.supports_host_gpu_labels = false;
}
}
if (!backend_config.supports_host_gpu_labels &&
!backend_config.supports_asynchronous_compute)
{
// Disable passthrough DMA unless we enable a feature that requires it.
// I'm avoiding an explicit checkbox for this until I figure out why host labels don't fix all problems with passthrough.
backend_config.supports_passthrough_dma = false;
}
}
VKGSRender::~VKGSRender()
{
if (m_device == VK_NULL_HANDLE)
{
//Initialization failed
return;
}
// Flush DMA queue
while (!g_fxo->get<rsx::dma_manager>().sync())
{
do_local_task(rsx::FIFO::state::lock_wait);
}
//Wait for device to finish up with resources
vkDeviceWaitIdle(*m_device);
// Globals. TODO: Refactor lifetime management
if (auto async_scheduler = g_fxo->try_get<vk::AsyncTaskScheduler>())
{
async_scheduler->destroy();
}
// GC cleanup
vk::get_resource_manager()->flush();
// Host data
if (m_host_object_data)
{
m_host_object_data->unmap();
m_host_object_data.reset();
}
// Clear flush requests
m_flush_requests.clear_pending_flag();
// Shaders
vk::destroy_pipe_compiler(); // Ensure no pending shaders being compiled
spirv::finalize_compiler_context(); // Shut down the glslang compiler
m_prog_buffer->clear(); // Delete shader objects
m_shader_interpreter.destroy();
m_persistent_attribute_storage.reset();
m_volatile_attribute_storage.reset();
m_vertex_layout_storage.reset();
// Upscaler (references some global resources)
m_upscaler.reset();
// Heaps
m_attrib_ring_info.destroy();
m_fragment_env_ring_info.destroy();
m_vertex_env_ring_info.destroy();
m_fragment_texture_params_ring_info.destroy();
m_vertex_layout_ring_info.destroy();
m_fragment_constants_ring_info.destroy();
m_transform_constants_ring_info.destroy();
m_index_buffer_ring_info.destroy();
m_texture_upload_buffer_ring_info.destroy();
m_vertex_instructions_buffer.destroy();
m_fragment_instructions_buffer.destroy();
m_raster_env_ring_info.destroy();
// Fallback bindables
null_buffer.reset();
null_buffer_view.reset();
if (m_current_frame == &m_aux_frame_context)
{
// Return resources back to the owner
m_current_frame = &frame_context_storage[m_current_queue_index];
m_current_frame->swap_storage(m_aux_frame_context);
m_current_frame->grab_resources(m_aux_frame_context);
}
m_aux_frame_context.buffer_views_to_clean.clear();
// NOTE: aux_context uses descriptor pools borrowed from the main queues and any allocations will be automatically freed when pool is destroyed
for (auto &ctx : frame_context_storage)
{
vkDestroySemaphore((*m_device), ctx.present_wait_semaphore, nullptr);
vkDestroySemaphore((*m_device), ctx.acquire_signal_semaphore, nullptr);
ctx.buffer_views_to_clean.clear();
}
// Textures
m_rtts.destroy();
m_texture_cache.destroy();
m_stencil_mirror_sampler.reset();
// Pipeline descriptors
m_descriptor_pool.destroy();
vkDestroyPipelineLayout(*m_device, m_pipeline_layout, nullptr);
vkDestroyDescriptorSetLayout(*m_device, m_descriptor_layouts, nullptr);
// Queries
m_occlusion_query_manager.reset();
m_cond_render_buffer.reset();
// Command buffer
m_primary_cb_list.destroy();
m_secondary_cb_list.destroy();
m_command_buffer_pool.destroy();
m_secondary_command_buffer_pool.destroy();
// Global resources
vk::destroy_global_resources();
// Device handles/contexts
m_swapchain->destroy();
m_instance.destroy();
#if defined(HAVE_X11) && defined(HAVE_VULKAN)
if (m_display_handle)
XCloseDisplay(m_display_handle);
#endif
}
bool VKGSRender::on_access_violation(u32 address, bool is_writing)
{
vk::texture_cache::thrashed_set result;
{
const rsx::invalidation_cause cause = is_writing ? rsx::invalidation_cause::deferred_write : rsx::invalidation_cause::deferred_read;
result = m_texture_cache.invalidate_address(*m_secondary_cb_list.get(), address, cause);
}
if (result.invalidate_samplers)
{
std::lock_guard lock(m_sampler_mutex);
m_samplers_dirty.store(true);
}
if (!result.violation_handled)
{
return zcull_ctrl->on_access_violation(address);
}
if (result.num_flushable > 0)
{
if (g_fxo->get<rsx::dma_manager>().is_current_thread())
{
// The offloader thread cannot handle flush requests
ensure(!(m_queue_status & flush_queue_state::deadlock));
m_offloader_fault_range = g_fxo->get<rsx::dma_manager>().get_fault_range(is_writing);
m_offloader_fault_cause = (is_writing) ? rsx::invalidation_cause::write : rsx::invalidation_cause::read;
g_fxo->get<rsx::dma_manager>().set_mem_fault_flag();
m_queue_status |= flush_queue_state::deadlock;
m_eng_interrupt_mask |= rsx::backend_interrupt;
// Wait for deadlock to clear
while (m_queue_status & flush_queue_state::deadlock)
{
utils::pause();
}
g_fxo->get<rsx::dma_manager>().clear_mem_fault_flag();
return true;
}
bool has_queue_ref = false;
std::function<void()> data_transfer_completed_callback{};
if (!is_current_thread()) [[likely]]
{
// Always submit primary cb to ensure state consistency (flush pending changes such as image transitions)
vm::temporary_unlock();
std::lock_guard lock(m_flush_queue_mutex);
m_flush_requests.post(false);
m_eng_interrupt_mask |= rsx::backend_interrupt;
has_queue_ref = true;
}
else
{
if (vk::is_uninterruptible())
{
rsx_log.error("Fault in uninterruptible code!");
}
// Flush primary cb queue to sync pending changes (e.g image transitions!)
flush_command_queue();
}
if (has_queue_ref)
{
// Wait for the RSX thread to process request if it hasn't already
m_flush_requests.producer_wait();
data_transfer_completed_callback = [&]()
{
m_flush_requests.remove_one();
has_queue_ref = false;
};
}
m_texture_cache.flush_all(*m_secondary_cb_list.next(), result, data_transfer_completed_callback);
if (has_queue_ref)
{
// Release RSX thread if it's still locked
m_flush_requests.remove_one();
}
}
return true;
}
void VKGSRender::on_invalidate_memory_range(const utils::address_range &range, rsx::invalidation_cause cause)
{
std::lock_guard lock(m_secondary_cb_guard);
auto data = m_texture_cache.invalidate_range(*m_secondary_cb_list.next(), range, cause);
AUDIT(data.empty());
if (cause == rsx::invalidation_cause::unmap)
{
if (data.violation_handled)
{
m_texture_cache.purge_unreleased_sections();
{
std::lock_guard lock(m_sampler_mutex);
m_samplers_dirty.store(true);
}
}
vk::unmap_dma(range.start, range.length());
}
}
void VKGSRender::on_semaphore_acquire_wait()
{
if (m_flush_requests.pending() ||
(async_flip_requested & flip_request::emu_requested) ||
(m_queue_status & flush_queue_state::deadlock))
{
do_local_task(rsx::FIFO::state::lock_wait);
}
}
bool VKGSRender::on_vram_exhausted(rsx::problem_severity severity)
{
ensure(!vk::is_uninterruptible() && rsx::get_current_renderer()->is_current_thread());
bool texture_cache_relieved = false;
if (severity >= rsx::problem_severity::fatal)
{
// Hard sync before trying to evict anything. This guarantees no UAF crashes in the driver.
// As a bonus, we also get a free gc pass
flush_command_queue(true, true);
if (m_texture_cache.is_overallocated())
{
// Evict some unused textures. Do not evict any active references
std::set<u32> exclusion_list;
auto scan_array = [&](const auto& texture_array)
{
for (auto i = 0ull; i < texture_array.size(); ++i)
{
const auto& tex = texture_array[i];
const auto addr = rsx::get_address(tex.offset(), tex.location());
exclusion_list.insert(addr);
}
};
scan_array(rsx::method_registers.fragment_textures);
scan_array(rsx::method_registers.vertex_textures);
// Hold the secondary lock guard to prevent threads from trying to touch access violation handler stuff
std::lock_guard lock(m_secondary_cb_guard);
rsx_log.warning("Texture cache is overallocated. Will evict unnecessary textures.");
texture_cache_relieved = m_texture_cache.evict_unused(exclusion_list);
}
}
texture_cache_relieved |= m_texture_cache.handle_memory_pressure(severity);
if (severity == rsx::problem_severity::low)
{
// Low severity only handles invalidating unused textures
return texture_cache_relieved;
}
bool surface_cache_relieved = false;
const auto mem_info = m_device->get_memory_mapping();
// Check if we need to spill
if (severity >= rsx::problem_severity::fatal && // Only spill for fatal errors
mem_info.device_local != mem_info.host_visible_coherent && // Do not spill if it is an IGP, there is nowhere to spill to
m_rtts.is_overallocated()) // Surface cache must be over-allocated by the design quota
{
// Queue a VRAM spill operation.
m_rtts.spill_unused_memory();
}
// Moderate severity and higher also starts removing stale render target objects
if (m_rtts.handle_memory_pressure(*m_current_command_buffer, severity))
{
surface_cache_relieved = true;
m_rtts.trim(*m_current_command_buffer, severity);
}
const bool any_cache_relieved = (texture_cache_relieved || surface_cache_relieved);
if (severity < rsx::problem_severity::fatal)
{
return any_cache_relieved;
}
if (surface_cache_relieved && !m_samplers_dirty)
{
// If surface cache was modified destructively, then we must reload samplers touching the surface cache.
bool invalidate_samplers = false;
auto scan_array = [&](const auto& texture_array, const auto& sampler_states)
{
if (invalidate_samplers)
{
return;
}
for (auto i = 0ull; i < texture_array.size(); ++i)
{
if (texture_array[i].enabled() &&
sampler_states[i] &&
sampler_states[i]->upload_context == rsx::texture_upload_context::framebuffer_storage)
{
invalidate_samplers = true;
break;
}
}
};
scan_array(rsx::method_registers.fragment_textures, fs_sampler_state);
scan_array(rsx::method_registers.vertex_textures, vs_sampler_state);
if (invalidate_samplers)
{
m_samplers_dirty.store(true);
}
}
// Imminent crash, full GPU sync is the least of our problems
flush_command_queue(true, true);
return any_cache_relieved;
}
void VKGSRender::on_descriptor_pool_fragmentation(bool is_fatal)
{
if (!is_fatal)
{
// It is very likely that the release is simply in progress (enqueued)
m_primary_cb_list.wait_all();
return;
}
// Just flush everything. Unless the hardware is very deficient, this should happen very rarely.
flush_command_queue(true, true);
}
void VKGSRender::notify_tile_unbound(u32 tile)
{
//TODO: Handle texture writeback
if (false)
{
u32 addr = rsx::get_address(tiles[tile].offset, tiles[tile].location);
on_notify_pre_memory_unmapped(addr, tiles[tile].size, *std::make_unique<std::vector<std::pair<u64, u64>>>());
m_rtts.invalidate_surface_address(addr, false);
}
{
std::lock_guard lock(m_sampler_mutex);
m_samplers_dirty.store(true);
}
}
void VKGSRender::check_heap_status(u32 flags)
{
ensure(flags);
bool heap_critical;
if (flags == VK_HEAP_CHECK_ALL)
{
heap_critical = m_attrib_ring_info.is_critical() ||
m_texture_upload_buffer_ring_info.is_critical() ||
m_fragment_env_ring_info.is_critical() ||
m_vertex_env_ring_info.is_critical() ||
m_fragment_texture_params_ring_info.is_critical() ||
m_vertex_layout_ring_info.is_critical() ||
m_fragment_constants_ring_info.is_critical() ||
m_transform_constants_ring_info.is_critical() ||
m_index_buffer_ring_info.is_critical() ||
m_raster_env_ring_info.is_critical();
}
else
{
heap_critical = false;
u32 test = 1u << std::countr_zero(flags);
do
{
switch (flags & test)
{
case 0:
break;
case VK_HEAP_CHECK_TEXTURE_UPLOAD_STORAGE:
heap_critical = m_texture_upload_buffer_ring_info.is_critical();
break;
case VK_HEAP_CHECK_VERTEX_STORAGE:
heap_critical = m_attrib_ring_info.is_critical() || m_index_buffer_ring_info.is_critical();
break;
case VK_HEAP_CHECK_VERTEX_ENV_STORAGE:
heap_critical = m_vertex_env_ring_info.is_critical();
break;
case VK_HEAP_CHECK_FRAGMENT_ENV_STORAGE:
heap_critical = m_fragment_env_ring_info.is_critical() || m_raster_env_ring_info.is_critical();
break;
case VK_HEAP_CHECK_TEXTURE_ENV_STORAGE:
heap_critical = m_fragment_texture_params_ring_info.is_critical();
break;
case VK_HEAP_CHECK_VERTEX_LAYOUT_STORAGE:
heap_critical = m_vertex_layout_ring_info.is_critical();
break;
case VK_HEAP_CHECK_TRANSFORM_CONSTANTS_STORAGE:
heap_critical = m_transform_constants_ring_info.is_critical();
break;
case VK_HEAP_CHECK_FRAGMENT_CONSTANTS_STORAGE:
heap_critical = m_fragment_constants_ring_info.is_critical();
break;
default:
fmt::throw_exception("Unexpected heap flag set! (0x%X)", test);
}
flags &= ~test;
test <<= 1;
}
while (flags && !heap_critical);
}
if (heap_critical)
{
m_profiler.start();
vk::frame_context_t *target_frame = nullptr;
if (!m_queued_frames.empty())
{
if (m_current_frame != &m_aux_frame_context)
{
target_frame = m_queued_frames.front();
}
}
if (target_frame == nullptr)
{
flush_command_queue(true);
m_vertex_cache->purge();
m_index_buffer_ring_info.reset_allocation_stats();
m_fragment_env_ring_info.reset_allocation_stats();
m_vertex_env_ring_info.reset_allocation_stats();
m_fragment_texture_params_ring_info.reset_allocation_stats();
m_vertex_layout_ring_info.reset_allocation_stats();
m_fragment_constants_ring_info.reset_allocation_stats();
m_transform_constants_ring_info.reset_allocation_stats();
m_attrib_ring_info.reset_allocation_stats();
m_texture_upload_buffer_ring_info.reset_allocation_stats();
m_raster_env_ring_info.reset_allocation_stats();
m_current_frame->reset_heap_ptrs();
m_last_heap_sync_time = rsx::get_shared_tag();
}
else
{
// Flush the frame context
frame_context_cleanup(target_frame);
}
m_frame_stats.flip_time += m_profiler.duration();
}
}
void VKGSRender::check_present_status()
{
while (!m_queued_frames.empty())
{
auto ctx = m_queued_frames.front();
if (!ctx->swap_command_buffer->poke())
{
return;
}
frame_context_cleanup(ctx);
}
}
VkDescriptorSet VKGSRender::allocate_descriptor_set()
{
if (!m_shader_interpreter.is_interpreter(m_program)) [[likely]]
{
return m_descriptor_pool.allocate(m_descriptor_layouts, VK_TRUE);
}
else
{
return m_shader_interpreter.allocate_descriptor_set();
}
}
void VKGSRender::set_viewport()
{
const auto [clip_width, clip_height] = rsx::apply_resolution_scale<true>(
rsx::method_registers.surface_clip_width(), rsx::method_registers.surface_clip_height());
const auto zclip_near = rsx::method_registers.clip_min();
const auto zclip_far = rsx::method_registers.clip_max();
//NOTE: The scale_offset matrix already has viewport matrix factored in
m_viewport.x = 0;
m_viewport.y = 0;
m_viewport.width = clip_width;
m_viewport.height = clip_height;
if (m_device->get_unrestricted_depth_range_support())
{
m_viewport.minDepth = zclip_near;
m_viewport.maxDepth = zclip_far;
}
else
{
m_viewport.minDepth = 0.f;
m_viewport.maxDepth = 1.f;
}
m_current_command_buffer->flags |= vk::command_buffer::cb_reload_dynamic_state;
m_graphics_state.clear(rsx::pipeline_state::zclip_config_state_dirty);
}
void VKGSRender::set_scissor(bool clip_viewport)
{
areau scissor;
if (get_scissor(scissor, clip_viewport))
{
m_scissor.extent.height = scissor.height();
m_scissor.extent.width = scissor.width();
m_scissor.offset.x = scissor.x1;
m_scissor.offset.y = scissor.y1;
m_current_command_buffer->flags |= vk::command_buffer::cb_reload_dynamic_state;
}
}
void VKGSRender::bind_viewport()
{
if (m_graphics_state & rsx::pipeline_state::zclip_config_state_dirty)
{
if (m_device->get_unrestricted_depth_range_support())
{
m_viewport.minDepth = rsx::method_registers.clip_min();
m_viewport.maxDepth = rsx::method_registers.clip_max();
}
m_graphics_state.clear(rsx::pipeline_state::zclip_config_state_dirty);
}
vkCmdSetViewport(*m_current_command_buffer, 0, 1, &m_viewport);
vkCmdSetScissor(*m_current_command_buffer, 0, 1, &m_scissor);
}
void VKGSRender::on_init_thread()
{
if (m_device == VK_NULL_HANDLE)
{
fmt::throw_exception("No Vulkan device was created");
}
GSRender::on_init_thread();
zcull_ctrl.reset(static_cast<::rsx::reports::ZCULL_control*>(this));
if (!m_overlay_manager)
{
m_frame->hide();
m_shaders_cache->load(nullptr, m_pipeline_layout);
m_frame->show();
}
else
{
rsx::shader_loading_dialog_native dlg(this);
// TODO: Handle window resize messages during loading on GPUs without OUT_OF_DATE_KHR support
m_shaders_cache->load(&dlg, m_pipeline_layout);
}
}
void VKGSRender::on_exit()
{
GSRender::on_exit();
vk::destroy_pipe_compiler(); // Ensure no pending shaders being compiled
zcull_ctrl.release();
}
void VKGSRender::clear_surface(u32 mask)
{
if (skip_current_frame || swapchain_unavailable) return;
// If stencil write mask is disabled, remove clear_stencil bit
if (!rsx::method_registers.stencil_mask()) mask &= ~RSX_GCM_CLEAR_STENCIL_BIT;
// Ignore invalid clear flags
if (!(mask & RSX_GCM_CLEAR_ANY_MASK)) return;
u8 ctx = rsx::framebuffer_creation_context::context_draw;
if (mask & RSX_GCM_CLEAR_COLOR_RGBA_MASK) ctx |= rsx::framebuffer_creation_context::context_clear_color;
if (mask & RSX_GCM_CLEAR_DEPTH_STENCIL_MASK) ctx |= rsx::framebuffer_creation_context::context_clear_depth;
init_buffers(rsx::framebuffer_creation_context{ctx});
if (!m_graphics_state.test(rsx::rtt_config_valid))
{
return;
}
//float depth_clear = 1.f;
u32 stencil_clear = 0;
u32 depth_stencil_mask = 0;
std::vector<VkClearAttachment> clear_descriptors;
VkClearValue depth_stencil_clear_values = {}, color_clear_values = {};
u16 scissor_x = static_cast<u16>(m_scissor.offset.x);
u16 scissor_w = static_cast<u16>(m_scissor.extent.width);
u16 scissor_y = static_cast<u16>(m_scissor.offset.y);
u16 scissor_h = static_cast<u16>(m_scissor.extent.height);
const u16 fb_width = m_draw_fbo->width();
const u16 fb_height = m_draw_fbo->height();
//clip region
std::tie(scissor_x, scissor_y, scissor_w, scissor_h) = rsx::clip_region<u16>(fb_width, fb_height, scissor_x, scissor_y, scissor_w, scissor_h, true);
VkClearRect region = { { { scissor_x, scissor_y }, { scissor_w, scissor_h } }, 0, 1 };
const bool full_frame = (scissor_w == fb_width && scissor_h == fb_height);
bool update_color = false, update_z = false;
auto surface_depth_format = rsx::method_registers.surface_depth_fmt();
if (auto ds = std::get<1>(m_rtts.m_bound_depth_stencil); mask & RSX_GCM_CLEAR_DEPTH_STENCIL_MASK)
{
if (mask & RSX_GCM_CLEAR_DEPTH_BIT)
{
u32 max_depth_value = get_max_depth_value(surface_depth_format);
u32 clear_depth = rsx::method_registers.z_clear_value(is_depth_stencil_format(surface_depth_format));
float depth_clear = static_cast<float>(clear_depth) / max_depth_value;
depth_stencil_clear_values.depthStencil.depth = depth_clear;
depth_stencil_clear_values.depthStencil.stencil = stencil_clear;
depth_stencil_mask |= VK_IMAGE_ASPECT_DEPTH_BIT;
}
if (is_depth_stencil_format(surface_depth_format))
{
if (mask & RSX_GCM_CLEAR_STENCIL_BIT)
{
u8 clear_stencil = rsx::method_registers.stencil_clear_value();
depth_stencil_clear_values.depthStencil.stencil = clear_stencil;
depth_stencil_mask |= VK_IMAGE_ASPECT_STENCIL_BIT;
if (ds->samples() > 1)
{
if (full_frame) ds->stencil_init_flags &= 0xFF;
ds->stencil_init_flags |= clear_stencil;
}
}
}
if ((depth_stencil_mask && depth_stencil_mask != ds->aspect()) || !full_frame)
{
// At least one aspect is not being cleared or the clear does not cover the full frame
// Steps to initialize memory are required
if (ds->state_flags & rsx::surface_state_flags::erase_bkgnd && // Needs initialization
ds->old_contents.empty() && !g_cfg.video.read_depth_buffer) // No way to load data from memory, so no initialization given
{
// Only one aspect was cleared. Make sure to memory initialize the other before removing dirty flag
const auto ds_mask = (mask & RSX_GCM_CLEAR_DEPTH_STENCIL_MASK);
if (ds_mask == RSX_GCM_CLEAR_DEPTH_BIT && (ds->aspect() & VK_IMAGE_ASPECT_STENCIL_BIT))
{
// Depth was cleared, initialize stencil
depth_stencil_clear_values.depthStencil.stencil = 0xFF;
depth_stencil_mask |= VK_IMAGE_ASPECT_STENCIL_BIT;
}
else if (ds_mask == RSX_GCM_CLEAR_STENCIL_BIT)
{
// Stencil was cleared, initialize depth
depth_stencil_clear_values.depthStencil.depth = 1.f;
depth_stencil_mask |= VK_IMAGE_ASPECT_DEPTH_BIT;
}
}
else
{
// Barrier required before any writes
ds->write_barrier(*m_current_command_buffer);
}
}
}
if (auto colormask = (mask & RSX_GCM_CLEAR_COLOR_RGBA_MASK))
{
if (!m_draw_buffers.empty())
{
bool use_fast_clear = (colormask == RSX_GCM_CLEAR_COLOR_RGBA_MASK);;
u8 clear_a = rsx::method_registers.clear_color_a();
u8 clear_r = rsx::method_registers.clear_color_r();
u8 clear_g = rsx::method_registers.clear_color_g();
u8 clear_b = rsx::method_registers.clear_color_b();
switch (rsx::method_registers.surface_color())
{
case rsx::surface_color_format::x32:
case rsx::surface_color_format::w16z16y16x16:
case rsx::surface_color_format::w32z32y32x32:
{
//NOP
colormask = 0;
break;
}
case rsx::surface_color_format::b8:
{
rsx::get_b8_clear_color(clear_r, clear_g, clear_b, clear_a);
colormask = rsx::get_b8_clearmask(colormask);
use_fast_clear = (colormask & RSX_GCM_CLEAR_RED_BIT);
break;
}
case rsx::surface_color_format::g8b8:
{
rsx::get_g8b8_clear_color(clear_r, clear_g, clear_b, clear_a);
colormask = rsx::get_g8b8_r8g8_clearmask(colormask);
use_fast_clear = ((colormask & RSX_GCM_CLEAR_COLOR_RG_MASK) == RSX_GCM_CLEAR_COLOR_RG_MASK);
break;
}
case rsx::surface_color_format::r5g6b5:
{
rsx::get_rgb565_clear_color(clear_r, clear_g, clear_b, clear_a);
use_fast_clear = ((colormask & RSX_GCM_CLEAR_COLOR_RGB_MASK) == RSX_GCM_CLEAR_COLOR_RGB_MASK);
break;
}
case rsx::surface_color_format::x1r5g5b5_o1r5g5b5:
{
rsx::get_a1rgb555_clear_color(clear_r, clear_g, clear_b, clear_a, 255);
break;
}
case rsx::surface_color_format::x1r5g5b5_z1r5g5b5:
{
rsx::get_a1rgb555_clear_color(clear_r, clear_g, clear_b, clear_a, 0);
break;
}
case rsx::surface_color_format::a8b8g8r8:
case rsx::surface_color_format::x8b8g8r8_o8b8g8r8:
case rsx::surface_color_format::x8b8g8r8_z8b8g8r8:
{
rsx::get_abgr8_clear_color(clear_r, clear_g, clear_b, clear_a);
colormask = rsx::get_abgr8_clearmask(colormask);
break;
}
default:
{
break;
}
}
if (colormask)
{
if (!use_fast_clear || !full_frame)
{
// If we're not clobber all the memory, a barrier is required
for (const auto& index : m_rtts.m_bound_render_target_ids)
{
m_rtts.m_bound_render_targets[index].second->write_barrier(*m_current_command_buffer);
}
}
color_clear_values.color.float32[0] = static_cast<float>(clear_r) / 255;
color_clear_values.color.float32[1] = static_cast<float>(clear_g) / 255;
color_clear_values.color.float32[2] = static_cast<float>(clear_b) / 255;
color_clear_values.color.float32[3] = static_cast<float>(clear_a) / 255;
if (use_fast_clear)
{
for (u32 index = 0; index < m_draw_buffers.size(); ++index)
{
clear_descriptors.push_back({ VK_IMAGE_ASPECT_COLOR_BIT, index, color_clear_values });
}
}
else
{
color4f clear_color =
{
color_clear_values.color.float32[0],
color_clear_values.color.float32[1],
color_clear_values.color.float32[2],
color_clear_values.color.float32[3]
};
auto attachment_clear_pass = vk::get_overlay_pass<vk::attachment_clear_pass>();
attachment_clear_pass->run(*m_current_command_buffer, m_draw_fbo, region.rect, colormask, clear_color, get_render_pass());
}
update_color = true;
}
}
}
if (depth_stencil_mask)
{
if ((depth_stencil_mask & VK_IMAGE_ASPECT_STENCIL_BIT) &&
rsx::method_registers.stencil_mask() != 0xff)
{
// Partial stencil clear. Disables fast stencil clear
auto ds = std::get<1>(m_rtts.m_bound_depth_stencil);
auto key = vk::get_renderpass_key({ ds });
auto renderpass = vk::get_renderpass(*m_device, key);
vk::get_overlay_pass<vk::stencil_clear_pass>()->run(
*m_current_command_buffer, ds, region.rect,
depth_stencil_clear_values.depthStencil.stencil,
rsx::method_registers.stencil_mask(), renderpass);
depth_stencil_mask &= ~VK_IMAGE_ASPECT_STENCIL_BIT;
}
if (depth_stencil_mask)
{
clear_descriptors.push_back({ static_cast<VkImageAspectFlags>(depth_stencil_mask), 0, depth_stencil_clear_values });
}
update_z = true;
}
if (update_color || update_z)
{
m_rtts.on_write({ update_color, update_color, update_color, update_color }, update_z);
}
if (!clear_descriptors.empty())
{
begin_render_pass();
vkCmdClearAttachments(*m_current_command_buffer, ::size32(clear_descriptors), clear_descriptors.data(), 1, ®ion);
}
}
void VKGSRender::flush_command_queue(bool hard_sync, bool do_not_switch)
{
close_and_submit_command_buffer();
if (hard_sync)
{
// wait for the latest instruction to execute
m_current_command_buffer->reset();
// Clear all command buffer statuses
m_primary_cb_list.poke_all();
// Drain present queue
while (!m_queued_frames.empty())
{
check_present_status();
}
m_flush_requests.clear_pending_flag();
}
if (!do_not_switch)
{
// Grab next cb in line and make it usable
// NOTE: Even in the case of a hard sync, this is required to free any waiters on the CB (ZCULL)
m_current_command_buffer = m_primary_cb_list.next();
m_current_command_buffer->reset();
}
else
{
// Special hard-sync where we must preserve the CB. This can happen when an emergency event handler is invoked and needs to flush to hw.
ensure(hard_sync);
}
// Just in case a queued frame holds a ref to this cb, drain the present queue
check_present_status();
if (m_occlusion_query_active)
{
m_current_command_buffer->flags |= vk::command_buffer::cb_load_occluson_task;
}
m_current_command_buffer->begin();
}
std::pair<volatile vk::host_data_t*, VkBuffer> VKGSRender::map_host_object_data() const
{
return { m_host_dma_ctrl->host_ctx(), m_host_object_data->value };
}
bool VKGSRender::release_GCM_label(u32 address, u32 args)
{
if (!backend_config.supports_host_gpu_labels)
{
return false;
}
auto host_ctx = ensure(m_host_dma_ctrl->host_ctx());
if (host_ctx->texture_loads_completed())
{
// All texture loads already seen by the host GPU
// Wait for all previously submitted labels to be flushed
m_host_dma_ctrl->drain_label_queue();
return false;
}
const auto mapping = vk::map_dma(address, 4);
const auto write_data = std::bit_cast<u32, be_t<u32>>(args);
if (!dynamic_cast<vk::memory_block_host*>(mapping.second->memory.get()))
{
// NVIDIA GPUs can disappoint when DMA blocks straddle VirtualAlloc boundaries.
// Take the L and try the fallback.
rsx_log.warning("Host label update at 0x%x was not possible.", address);
m_host_dma_ctrl->drain_label_queue();
return false;
}
const auto release_event_id = host_ctx->on_label_acquire();
if (host_ctx->has_unflushed_texture_loads())
{
if (vk::is_renderpass_open(*m_current_command_buffer))
{
vk::end_renderpass(*m_current_command_buffer);
}
vkCmdUpdateBuffer(*m_current_command_buffer, mapping.second->value, mapping.first, 4, &write_data);
flush_command_queue();
}
else
{
auto cmd = m_secondary_cb_list.next();
cmd->begin();
vkCmdUpdateBuffer(*cmd, mapping.second->value, mapping.first, 4, &write_data);
vkCmdUpdateBuffer(*cmd, m_host_object_data->value, ::offset32(&vk::host_data_t::commands_complete_event), 8, &release_event_id);
cmd->end();
vk::queue_submit_t submit_info = { m_device->get_graphics_queue(), nullptr };
cmd->submit(submit_info);
host_ctx->on_label_release();
}
return true;
}
void VKGSRender::on_guest_texture_read(const vk::command_buffer& cmd)
{
if (!backend_config.supports_host_gpu_labels)
{
return;
}
// Queue a sync update on the CB doing the load
auto host_ctx = ensure(m_host_dma_ctrl->host_ctx());
const auto event_id = host_ctx->on_texture_load_acquire();
vkCmdUpdateBuffer(cmd, m_host_object_data->value, ::offset32(&vk::host_data_t::texture_load_complete_event), sizeof(u64), &event_id);
}
void VKGSRender::sync_hint(rsx::FIFO::interrupt_hint hint, rsx::reports::sync_hint_payload_t payload)
{
rsx::thread::sync_hint(hint, payload);
if (!(m_current_command_buffer->flags & vk::command_buffer::cb_has_occlusion_task))
{
// Occlusion queries not enabled, do nothing
return;
}
// Occlusion test result evaluation is coming up, avoid a hard sync
switch (hint)
{
case rsx::FIFO::interrupt_hint::conditional_render_eval:
{
// If a flush request is already enqueued, do nothing
if (m_flush_requests.pending())
{
return;
}
// If the result is not going to be read by CELL, do nothing
const auto ref_addr = static_cast<u32>(payload.address);
if (!zcull_ctrl->is_query_result_urgent(ref_addr))
{
// No effect on CELL behaviour, it will be faster to handle this in RSX code
return;
}
// OK, cell will be accessing the results, probably.
// Try to avoid flush spam, it is more costly to flush the CB than it is to just upload the vertex data
// This is supposed to be an optimization afterall.
const auto now = get_system_time();
if ((now - m_last_cond_render_eval_hint) > 50)
{
// Schedule a sync on the next loop iteration
m_flush_requests.post(false);
m_flush_requests.remove_one();
}
m_last_cond_render_eval_hint = now;
break;
}
case rsx::FIFO::interrupt_hint::zcull_sync:
{
// Check if the required report is synced to this CB
auto& data = m_occlusion_map[payload.query->driver_handle];
// NOTE: Currently, a special condition exists where the indices can be empty even with active draw count.
// This is caused by async compiler and should be removed when ubershaders are added in
if (!data.is_current(m_current_command_buffer) || data.indices.empty())
{
return;
}
// Unavoidable hard sync coming up, flush immediately
// This heavyweight hint should be used with caution
std::lock_guard lock(m_flush_queue_mutex);
flush_command_queue();
if (m_flush_requests.pending())
{
// Clear without wait
m_flush_requests.clear_pending_flag();
}
break;
}
}
}
void VKGSRender::do_local_task(rsx::FIFO::state state)
{
if (m_queue_status & flush_queue_state::deadlock)
{
// Clear offloader deadlock
// NOTE: It is not possible to handle regular flush requests before this is cleared
// NOTE: This may cause graphics corruption due to unsynchronized modification
on_invalidate_memory_range(m_offloader_fault_range, m_offloader_fault_cause);
m_queue_status.clear(flush_queue_state::deadlock);
}
if (m_queue_status & flush_queue_state::flushing)
{
// Abort recursive CB submit requests.
// When flushing flag is already set, only deadlock events may be processed.
return;
}
else if (m_flush_requests.pending())
{
if (m_flush_queue_mutex.try_lock())
{
// TODO: Determine if a hard sync is necessary
// Pipeline barriers later may do a better job synchronizing than wholly stalling the pipeline
flush_command_queue();
m_flush_requests.clear_pending_flag();
m_flush_requests.consumer_wait();
m_flush_queue_mutex.unlock();
}
}
else if (!in_begin_end && state != rsx::FIFO::state::lock_wait)
{
if (m_graphics_state & rsx::pipeline_state::framebuffer_reads_dirty)
{
//This will re-engage locks and break the texture cache if another thread is waiting in access violation handler!
//Only call when there are no waiters
m_texture_cache.do_update();
m_graphics_state.clear(rsx::pipeline_state::framebuffer_reads_dirty);
}
}
rsx::thread::do_local_task(state);
switch (state)
{
case rsx::FIFO::state::lock_wait:
// Critical check finished
return;
//case rsx::FIFO::state::spinning:
//case rsx::FIFO::state::empty:
// We have some time, check the present queue
//check_present_status();
//break;
default:
break;
}
if (m_overlay_manager)
{
const auto should_ignore = in_begin_end && state != rsx::FIFO::state::empty;
if ((async_flip_requested & flip_request::native_ui) && !should_ignore && !is_stopped())
{
flush_command_queue(true);
rsx::display_flip_info_t info{};
info.buffer = current_display_buffer;
flip(info);
}
}
}
bool VKGSRender::load_program()
{
const auto shadermode = g_cfg.video.shadermode.get();
// TODO: EXT_dynamic_state should get rid of this sillyness soon (kd)
const auto vertex_state = vk::decode_vertex_input_assembly_state();
if (m_graphics_state & rsx::pipeline_state::invalidate_pipeline_bits)
{
get_current_fragment_program(fs_sampler_state);
ensure(current_fragment_program.valid);
get_current_vertex_program(vs_sampler_state);
m_graphics_state.clear(rsx::pipeline_state::invalidate_pipeline_bits);
}
else if (!(m_graphics_state & rsx::pipeline_state::pipeline_config_dirty) &&
m_program &&
m_pipeline_properties.state.ia.topology == vertex_state.primitive &&
m_pipeline_properties.state.ia.primitiveRestartEnable == vertex_state.restart_index_enabled)
{
if (!m_shader_interpreter.is_interpreter(m_program)) [[ likely ]]
{
return true;
}
if (shadermode == shader_mode::interpreter_only)
{
m_program = m_shader_interpreter.get(m_pipeline_properties, current_fp_metadata);
return true;
}
}
auto &vertex_program = current_vertex_program;
auto &fragment_program = current_fragment_program;
if (m_graphics_state & rsx::pipeline_state::pipeline_config_dirty)
{
vk::pipeline_props properties = vk::decode_rsx_state(
vertex_state,
m_rtts.m_bound_depth_stencil.second,
backend_config,
static_cast<u8>(m_draw_buffers.size()),
u8((m_current_renderpass_key >> 16) & 0xF),
m_device->get_depth_bounds_support()
);
properties.renderpass_key = m_current_renderpass_key;
if (m_program &&
!m_shader_interpreter.is_interpreter(m_program) &&
m_pipeline_properties == properties)
{
// Nothing changed
return true;
}
// Fallthrough
m_pipeline_properties = properties;
m_graphics_state.clear(rsx::pipeline_state::pipeline_config_dirty);
}
else
{
// Update primitive type and restart index. Note that this is not needed with EXT_dynamic_state
m_pipeline_properties.state.set_primitive_type(vertex_state.primitive);
m_pipeline_properties.state.enable_primitive_restart(vertex_state.restart_index_enabled);
m_pipeline_properties.renderpass_key = m_current_renderpass_key;
}
m_vertex_prog = nullptr;
m_fragment_prog = nullptr;
if (shadermode != shader_mode::interpreter_only) [[likely]]
{
vk::enter_uninterruptible();
// Load current program from cache
std::tie(m_program, m_vertex_prog, m_fragment_prog) = m_prog_buffer->get_graphics_pipeline(vertex_program, fragment_program, m_pipeline_properties,
shadermode != shader_mode::recompiler, true, m_pipeline_layout);
vk::leave_uninterruptible();
if (m_prog_buffer->check_cache_missed())
{
// Notify the user with HUD notification
if (g_cfg.misc.show_shader_compilation_hint)
{
if (m_overlay_manager)
{
rsx::overlays::show_shader_compile_notification();
}
}
}
}
else
{
m_program = nullptr;
}
if (!m_program && (shadermode == shader_mode::async_with_interpreter || shadermode == shader_mode::interpreter_only))
{
if (!m_shader_interpreter.is_interpreter(m_prev_program))
{
m_interpreter_state = rsx::invalidate_pipeline_bits;
}
m_program = m_shader_interpreter.get(m_pipeline_properties, current_fp_metadata);
}
return m_program != nullptr;
}
void VKGSRender::load_program_env()
{
if (!m_program)
{
fmt::throw_exception("Unreachable right now");
}
const u32 fragment_constants_size = current_fp_metadata.program_constants_buffer_length;
const bool update_transform_constants = !!(m_graphics_state & rsx::pipeline_state::transform_constants_dirty);
const bool update_fragment_constants = !!(m_graphics_state & rsx::pipeline_state::fragment_constants_dirty);
const bool update_vertex_env = !!(m_graphics_state & rsx::pipeline_state::vertex_state_dirty);
const bool update_fragment_env = !!(m_graphics_state & rsx::pipeline_state::fragment_state_dirty);
const bool update_fragment_texture_env = !!(m_graphics_state & rsx::pipeline_state::fragment_texture_state_dirty);
const bool update_instruction_buffers = (!!m_interpreter_state && m_shader_interpreter.is_interpreter(m_program));
const bool update_raster_env = (rsx::method_registers.polygon_stipple_enabled() && !!(m_graphics_state & rsx::pipeline_state::polygon_stipple_pattern_dirty));
if (update_vertex_env)
{
check_heap_status(VK_HEAP_CHECK_VERTEX_ENV_STORAGE);
// Vertex state
const auto mem = m_vertex_env_ring_info.alloc<256>(256);
auto buf = static_cast<u8*>(m_vertex_env_ring_info.map(mem, 148));
fill_scale_offset_data(buf, false);
fill_user_clip_data(buf + 64);
*(reinterpret_cast<u32*>(buf + 128)) = rsx::method_registers.transform_branch_bits();
*(reinterpret_cast<f32*>(buf + 132)) = rsx::method_registers.point_size() * rsx::get_resolution_scale();
*(reinterpret_cast<f32*>(buf + 136)) = rsx::method_registers.clip_min();
*(reinterpret_cast<f32*>(buf + 140)) = rsx::method_registers.clip_max();
m_vertex_env_ring_info.unmap();
m_vertex_env_buffer_info = { m_vertex_env_ring_info.heap->value, mem, 144 };
}
if (update_transform_constants)
{
// Transform constants
usz mem_offset = 0;
auto alloc_storage = [&](usz size) -> std::pair<void*, usz>
{
const auto alignment = m_device->gpu().get_limits().minUniformBufferOffsetAlignment;
mem_offset = m_transform_constants_ring_info.alloc<1>(utils::align(size, alignment));
return std::make_pair(m_transform_constants_ring_info.map(mem_offset, size), size);
};
auto io_buf = rsx::io_buffer(alloc_storage);
upload_transform_constants(io_buf);
if (!io_buf.empty())
{
m_transform_constants_ring_info.unmap();
m_vertex_constants_buffer_info = { m_transform_constants_ring_info.heap->value, mem_offset, io_buf.size() };
}
}
if (update_fragment_constants && !update_instruction_buffers)
{
check_heap_status(VK_HEAP_CHECK_FRAGMENT_CONSTANTS_STORAGE);
// Fragment constants
if (fragment_constants_size)
{
auto mem = m_fragment_constants_ring_info.alloc<256>(fragment_constants_size);
auto buf = m_fragment_constants_ring_info.map(mem, fragment_constants_size);
m_prog_buffer->fill_fragment_constants_buffer({ reinterpret_cast<float*>(buf), fragment_constants_size },
*ensure(m_fragment_prog), current_fragment_program, true);
m_fragment_constants_ring_info.unmap();
m_fragment_constants_buffer_info = { m_fragment_constants_ring_info.heap->value, mem, fragment_constants_size };
}
else
{
m_fragment_constants_buffer_info = { m_fragment_constants_ring_info.heap->value, 0, 32 };
}
}
if (update_fragment_env)
{
check_heap_status(VK_HEAP_CHECK_FRAGMENT_ENV_STORAGE);
auto mem = m_fragment_env_ring_info.alloc<256>(256);
auto buf = m_fragment_env_ring_info.map(mem, 32);
fill_fragment_state_buffer(buf, current_fragment_program);
m_fragment_env_ring_info.unmap();
m_fragment_env_buffer_info = { m_fragment_env_ring_info.heap->value, mem, 32 };
}
if (update_fragment_texture_env)
{
check_heap_status(VK_HEAP_CHECK_TEXTURE_ENV_STORAGE);
auto mem = m_fragment_texture_params_ring_info.alloc<256>(768);
auto buf = m_fragment_texture_params_ring_info.map(mem, 768);
current_fragment_program.texture_params.write_to(buf, current_fp_metadata.referenced_textures_mask);
m_fragment_texture_params_ring_info.unmap();
m_fragment_texture_params_buffer_info = { m_fragment_texture_params_ring_info.heap->value, mem, 768 };
}
if (update_raster_env)
{
check_heap_status(VK_HEAP_CHECK_FRAGMENT_ENV_STORAGE);
auto mem = m_raster_env_ring_info.alloc<256>(256);
auto buf = m_raster_env_ring_info.map(mem, 128);
std::memcpy(buf, rsx::method_registers.polygon_stipple_pattern(), 128);
m_raster_env_ring_info.unmap();
m_raster_env_buffer_info = { m_raster_env_ring_info.heap->value, mem, 128 };
m_graphics_state.clear(rsx::pipeline_state::polygon_stipple_pattern_dirty);
}
if (update_instruction_buffers)
{
if (m_interpreter_state & rsx::vertex_program_dirty)
{
// Attach vertex buffer data
const auto vp_block_length = current_vp_metadata.ucode_length + 16;
auto vp_mapping = m_vertex_instructions_buffer.alloc<256>(vp_block_length);
auto vp_buf = static_cast<u8*>(m_vertex_instructions_buffer.map(vp_mapping, vp_block_length));
auto vp_config = reinterpret_cast<u32*>(vp_buf);
vp_config[0] = current_vertex_program.base_address;
vp_config[1] = current_vertex_program.entry;
vp_config[2] = current_vertex_program.output_mask;
vp_config[3] = rsx::method_registers.two_side_light_en()? 1u: 0u;
std::memcpy(vp_buf + 16, current_vertex_program.data.data(), current_vp_metadata.ucode_length);
m_vertex_instructions_buffer.unmap();
m_vertex_instructions_buffer_info = { m_vertex_instructions_buffer.heap->value, vp_mapping, vp_block_length };
}
if (m_interpreter_state & rsx::fragment_program_dirty)
{
// Attach fragment buffer data
const auto fp_block_length = current_fp_metadata.program_ucode_length + 16;
auto fp_mapping = m_fragment_instructions_buffer.alloc<256>(fp_block_length);
auto fp_buf = static_cast<u8*>(m_fragment_instructions_buffer.map(fp_mapping, fp_block_length));
// Control mask
const auto control_masks = reinterpret_cast<u32*>(fp_buf);
control_masks[0] = rsx::method_registers.shader_control();
control_masks[1] = current_fragment_program.texture_state.texture_dimensions;
std::memcpy(fp_buf + 16, current_fragment_program.get_data(), current_fragment_program.ucode_length);
m_fragment_instructions_buffer.unmap();
m_fragment_instructions_buffer_info = { m_fragment_instructions_buffer.heap->value, fp_mapping, fp_block_length };
}
}
const auto& binding_table = m_device->get_pipeline_binding_table();
m_program->bind_uniform(m_vertex_env_buffer_info, binding_table.vertex_params_bind_slot, m_current_frame->descriptor_set);
m_program->bind_uniform(m_vertex_constants_buffer_info, binding_table.vertex_constant_buffers_bind_slot, m_current_frame->descriptor_set);
m_program->bind_uniform(m_fragment_env_buffer_info, binding_table.fragment_state_bind_slot, m_current_frame->descriptor_set);
m_program->bind_uniform(m_fragment_texture_params_buffer_info, binding_table.fragment_texture_params_bind_slot, m_current_frame->descriptor_set);
m_program->bind_uniform(m_raster_env_buffer_info, binding_table.rasterizer_env_bind_slot, m_current_frame->descriptor_set);
if (!m_shader_interpreter.is_interpreter(m_program))
{
m_program->bind_uniform(m_fragment_constants_buffer_info, binding_table.fragment_constant_buffers_bind_slot, m_current_frame->descriptor_set);
}
else
{
m_program->bind_buffer(m_vertex_instructions_buffer_info, m_shader_interpreter.get_vertex_instruction_location(), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, m_current_frame->descriptor_set);
m_program->bind_buffer(m_fragment_instructions_buffer_info, m_shader_interpreter.get_fragment_instruction_location(), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, m_current_frame->descriptor_set);
}
if (vk::emulate_conditional_rendering())
{
auto predicate = m_cond_render_buffer ? m_cond_render_buffer->value : vk::get_scratch_buffer(*m_current_command_buffer, 4)->value;
m_program->bind_buffer({ predicate, 0, 4 }, binding_table.conditional_render_predicate_slot, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, m_current_frame->descriptor_set);
}
// Clear flags
m_graphics_state.clear(
rsx::pipeline_state::fragment_state_dirty |
rsx::pipeline_state::vertex_state_dirty |
rsx::pipeline_state::transform_constants_dirty |
rsx::pipeline_state::fragment_constants_dirty |
rsx::pipeline_state::fragment_texture_state_dirty);
}
void VKGSRender::upload_transform_constants(const rsx::io_buffer& buffer)
{
const usz transform_constants_size = (!m_vertex_prog || m_vertex_prog->has_indexed_constants) ? 8192 : m_vertex_prog->constant_ids.size() * 16;
if (transform_constants_size)
{
check_heap_status(VK_HEAP_CHECK_TRANSFORM_CONSTANTS_STORAGE);
buffer.reserve(transform_constants_size);
auto buf = buffer.data();
const auto constant_ids = (transform_constants_size == 8192)
? std::span<const u16>{}
: std::span<const u16>(m_vertex_prog->constant_ids);
fill_vertex_program_constants_data(buf, constant_ids);
}
}
void VKGSRender::update_vertex_env(u32 id, const vk::vertex_upload_info& vertex_info)
{
// Actual allocation must have been done previously
u32 base_offset;
const u32 offset32 = static_cast<u32>(m_vertex_layout_stream_info.offset);
const u32 range32 = static_cast<u32>(m_vertex_layout_stream_info.range);
if (!m_vertex_layout_storage || !m_vertex_layout_storage->in_range(offset32, range32, base_offset))
{
ensure(m_texbuffer_view_size >= m_vertex_layout_stream_info.range);
if (m_vertex_layout_storage)
m_current_frame->buffer_views_to_clean.push_back(std::move(m_vertex_layout_storage));
const usz alloc_addr = m_vertex_layout_stream_info.offset;
const usz view_size = (alloc_addr + m_texbuffer_view_size) > m_vertex_layout_ring_info.size() ? m_vertex_layout_ring_info.size() - alloc_addr : m_texbuffer_view_size;
m_vertex_layout_storage = std::make_unique<vk::buffer_view>(*m_device, m_vertex_layout_ring_info.heap->value, VK_FORMAT_R32G32_UINT, alloc_addr, view_size);
base_offset = 0;
}
u8 data_size = 16;
u32 draw_info[5];
draw_info[0] = vertex_info.vertex_index_base;
draw_info[1] = vertex_info.vertex_index_offset;
draw_info[2] = id;
draw_info[3] = (id * 16) + (base_offset / 8);
if (vk::emulate_conditional_rendering())
{
draw_info[4] = cond_render_ctrl.hw_cond_active ? 1 : 0;
data_size = 20;
}
vkCmdPushConstants(*m_current_command_buffer, m_pipeline_layout, VK_SHADER_STAGE_VERTEX_BIT, 0, data_size, draw_info);
const usz data_offset = (id * 128) + m_vertex_layout_stream_info.offset;
auto dst = m_vertex_layout_ring_info.map(data_offset, 128);
fill_vertex_layout_state(m_vertex_layout, vertex_info.first_vertex, vertex_info.allocated_vertex_count, static_cast<s32*>(dst),
vertex_info.persistent_window_offset, vertex_info.volatile_window_offset);
m_vertex_layout_ring_info.unmap();
}
void VKGSRender::patch_transform_constants(rsx::context* ctx, u32 index, u32 count)
{
if (!m_vertex_prog)
{
// Shouldn't be reachable, but handle it correctly anyway
m_graphics_state |= rsx::pipeline_state::transform_constants_dirty;
return;
}
// Hot-patching transform constants mid-draw (instanced draw)
std::pair<VkDeviceSize, VkDeviceSize> data_range;
void* data_source = nullptr;
if (m_vertex_prog->has_indexed_constants)
{
// We're working with a full range. We can do a direct patch in this case since no index translation is required.
const auto byte_count = count * 16;
const auto byte_offset = index * 16;
data_range = { m_vertex_constants_buffer_info.offset + byte_offset, byte_count };
data_source = ®S(ctx)->transform_constants[index];
}
else if (auto xform_id = m_vertex_prog->TranslateConstantsRange(index, count); xform_id >= 0)
{
const auto write_offset = xform_id * 16;
const auto byte_count = count * 16;
data_range = { m_vertex_constants_buffer_info.offset + write_offset, byte_count };
data_source = ®S(ctx)->transform_constants[index];
}
else
{
// Indexed. This is a bit trickier. Use scratchpad to avoid UAF
auto allocate_mem = [&](usz size) -> std::pair<void*, usz>
{
m_scratch_mem.resize(size);
return { m_scratch_mem.data(), size };
};
rsx::io_buffer iobuf(allocate_mem);
upload_transform_constants(iobuf);
ensure(iobuf.size() >= m_vertex_constants_buffer_info.range);
data_range = { m_vertex_constants_buffer_info.offset, m_vertex_constants_buffer_info.range };
data_source = iobuf.data();
}
// Preserving an active renderpass across a transfer operation is illegal vulkan. However, splitting up the CB into thousands of renderpasses incurs an overhead.
// We cheat here for specific cases where we already know the driver can let us get away with this.
static const std::set<vk::driver_vendor> s_allowed_vendors =
{
vk::driver_vendor::AMD,
vk::driver_vendor::RADV,
vk::driver_vendor::LAVAPIPE,
vk::driver_vendor::NVIDIA,
vk::driver_vendor::NVK
};
const auto driver_vendor = vk::get_driver_vendor();
const bool preserve_renderpass = !g_cfg.video.strict_rendering_mode && s_allowed_vendors.contains(driver_vendor);
vk::insert_buffer_memory_barrier(
*m_current_command_buffer,
m_vertex_constants_buffer_info.buffer,
data_range.first,
data_range.second,
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_ACCESS_UNIFORM_READ_BIT, VK_ACCESS_TRANSFER_WRITE_BIT,
preserve_renderpass);
// FIXME: This is illegal during a renderpass
vkCmdUpdateBuffer(
*m_current_command_buffer,
m_vertex_constants_buffer_info.buffer,
data_range.first,
data_range.second,
data_source);
vk::insert_buffer_memory_barrier(
*m_current_command_buffer,
m_vertex_constants_buffer_info.buffer,
data_range.first,
data_range.second,
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_UNIFORM_READ_BIT,
preserve_renderpass);
}
void VKGSRender::init_buffers(rsx::framebuffer_creation_context context, bool)
{
prepare_rtts(context);
}
void VKGSRender::close_and_submit_command_buffer(vk::fence* pFence, VkSemaphore wait_semaphore, VkSemaphore signal_semaphore, VkPipelineStageFlags pipeline_stage_flags)
{
ensure(!m_queue_status.test_and_set(flush_queue_state::flushing));
// Workaround for deadlock occuring during RSX offloader fault
// TODO: Restructure command submission infrastructure to avoid this condition
const bool sync_success = g_fxo->get<rsx::dma_manager>().sync();
const VkBool32 force_flush = !sync_success;
if (vk::test_status_interrupt(vk::heap_dirty))
{
if (m_attrib_ring_info.is_dirty() ||
m_fragment_env_ring_info.is_dirty() ||
m_vertex_env_ring_info.is_dirty() ||
m_fragment_texture_params_ring_info.is_dirty() ||
m_vertex_layout_ring_info.is_dirty() ||
m_fragment_constants_ring_info.is_dirty() ||
m_index_buffer_ring_info.is_dirty() ||
m_transform_constants_ring_info.is_dirty() ||
m_texture_upload_buffer_ring_info.is_dirty() ||
m_raster_env_ring_info.is_dirty())
{
auto secondary_command_buffer = m_secondary_cb_list.next();
secondary_command_buffer->begin();
m_attrib_ring_info.sync(*secondary_command_buffer);
m_fragment_env_ring_info.sync(*secondary_command_buffer);
m_vertex_env_ring_info.sync(*secondary_command_buffer);
m_fragment_texture_params_ring_info.sync(*secondary_command_buffer);
m_vertex_layout_ring_info.sync(*secondary_command_buffer);
m_fragment_constants_ring_info.sync(*secondary_command_buffer);
m_index_buffer_ring_info.sync(*secondary_command_buffer);
m_transform_constants_ring_info.sync(*secondary_command_buffer);
m_texture_upload_buffer_ring_info.sync(*secondary_command_buffer);
m_raster_env_ring_info.sync(*secondary_command_buffer);
secondary_command_buffer->end();
vk::queue_submit_t submit_info{ m_device->get_graphics_queue(), nullptr };
secondary_command_buffer->submit(submit_info, force_flush);
}
vk::clear_status_interrupt(vk::heap_dirty);
}
#if 0 // Currently unreachable
if (m_current_command_buffer->flags & vk::command_buffer::cb_has_conditional_render)
{
ensure(m_render_pass_open);
m_device->_vkCmdEndConditionalRenderingEXT(*m_current_command_buffer);
}
#endif
// End any active renderpasses; the caller should handle reopening
if (vk::is_renderpass_open(*m_current_command_buffer))
{
close_render_pass();
}
// End open queries. Flags will be automatically reset by the submit routine
if (m_current_command_buffer->flags & vk::command_buffer::cb_has_open_query)
{
auto open_query = m_occlusion_map[m_active_query_info->driver_handle].indices.back();
m_occlusion_query_manager->end_query(*m_current_command_buffer, open_query);
m_current_command_buffer->flags &= ~vk::command_buffer::cb_has_open_query;
}
if (m_host_dma_ctrl && m_host_dma_ctrl->host_ctx()->needs_label_release())
{
vkCmdUpdateBuffer(*m_current_command_buffer,
m_host_object_data->value,
::offset32(&vk::host_data_t::commands_complete_event),
sizeof(u64),
const_cast<u64*>(&m_host_dma_ctrl->host_ctx()->last_label_acquire_event));
m_host_dma_ctrl->host_ctx()->on_label_release();
}
m_current_command_buffer->end();
m_current_command_buffer->tag();
// Supporting concurrent access vastly simplifies this logic.
// Instead of doing CB slice injection, we can just chain these together logically with the async stream going first
vk::queue_submit_t primary_submit_info{ m_device->get_graphics_queue(), pFence };
vk::queue_submit_t secondary_submit_info{};
if (wait_semaphore)
{
primary_submit_info.wait_on(wait_semaphore, pipeline_stage_flags);
}
if (auto async_scheduler = g_fxo->try_get<vk::AsyncTaskScheduler>();
async_scheduler && async_scheduler->is_recording())
{
if (async_scheduler->is_host_mode())
{
const VkSemaphore async_sema = *async_scheduler->get_sema();
secondary_submit_info.queue_signal(async_sema);
primary_submit_info.wait_on(async_sema, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
// Delay object destruction by one cycle
vk::get_resource_manager()->push_down_current_scope();
}
async_scheduler->flush(secondary_submit_info, force_flush);
}
if (signal_semaphore)
{
primary_submit_info.queue_signal(signal_semaphore);
}
m_current_command_buffer->submit(primary_submit_info, force_flush);
m_queue_status.clear(flush_queue_state::flushing);
}
void VKGSRender::prepare_rtts(rsx::framebuffer_creation_context context)
{
const bool clipped_scissor = (context == rsx::framebuffer_creation_context::context_draw);
if (m_current_framebuffer_context == context && !m_graphics_state.test(rsx::rtt_config_dirty) && m_draw_fbo)
{
// Fast path
// Framebuffer usage has not changed, framebuffer exists and config regs have not changed
set_scissor(clipped_scissor);
return;
}
m_graphics_state.clear(
rsx::rtt_config_dirty |
rsx::rtt_config_contested |
rsx::rtt_config_valid |
rsx::rtt_cache_state_dirty);
get_framebuffer_layout(context, m_framebuffer_layout);
if (!m_graphics_state.test(rsx::rtt_config_valid))
{
return;
}
if (m_draw_fbo && m_framebuffer_layout.ignore_change)
{
// Nothing has changed, we're still using the same framebuffer
// Update flags to match current
set_scissor(clipped_scissor);
return;
}
m_rtts.prepare_render_target(*m_current_command_buffer,
m_framebuffer_layout.color_format, m_framebuffer_layout.depth_format,
m_framebuffer_layout.width, m_framebuffer_layout.height,
m_framebuffer_layout.target, m_framebuffer_layout.aa_mode, m_framebuffer_layout.raster_type,
m_framebuffer_layout.color_addresses, m_framebuffer_layout.zeta_address,
m_framebuffer_layout.actual_color_pitch, m_framebuffer_layout.actual_zeta_pitch,
(*m_device), *m_current_command_buffer);
// Reset framebuffer information
const auto color_bpp = get_format_block_size_in_bytes(m_framebuffer_layout.color_format);
const auto samples = get_format_sample_count(m_framebuffer_layout.aa_mode);
for (u8 i = 0; i < rsx::limits::color_buffers_count; ++i)
{
// Flush old address if we keep missing it
if (m_surface_info[i].pitch && g_cfg.video.write_color_buffers)
{
const utils::address_range rsx_range = m_surface_info[i].get_memory_range();
m_texture_cache.set_memory_read_flags(rsx_range, rsx::memory_read_flags::flush_once);
m_texture_cache.flush_if_cache_miss_likely(*m_current_command_buffer, rsx_range);
}
m_surface_info[i].address = m_surface_info[i].pitch = 0;
m_surface_info[i].width = m_framebuffer_layout.width;
m_surface_info[i].height = m_framebuffer_layout.height;
m_surface_info[i].color_format = m_framebuffer_layout.color_format;
m_surface_info[i].bpp = color_bpp;
m_surface_info[i].samples = samples;
}
//Process depth surface as well
{
if (m_depth_surface_info.pitch && g_cfg.video.write_depth_buffer)
{
const utils::address_range surface_range = m_depth_surface_info.get_memory_range();
m_texture_cache.set_memory_read_flags(surface_range, rsx::memory_read_flags::flush_once);
m_texture_cache.flush_if_cache_miss_likely(*m_current_command_buffer, surface_range);
}
m_depth_surface_info.address = m_depth_surface_info.pitch = 0;
m_depth_surface_info.width = m_framebuffer_layout.width;
m_depth_surface_info.height = m_framebuffer_layout.height;
m_depth_surface_info.depth_format = m_framebuffer_layout.depth_format;
m_depth_surface_info.bpp = get_format_block_size_in_bytes(m_framebuffer_layout.depth_format);
m_depth_surface_info.samples = samples;
}
//Bind created rtts as current fbo...
const auto draw_buffers = rsx::utility::get_rtt_indexes(m_framebuffer_layout.target);
m_draw_buffers.clear();
m_fbo_images.clear();
for (u8 index : draw_buffers)
{
if (auto surface = std::get<1>(m_rtts.m_bound_render_targets[index]))
{
m_fbo_images.push_back(surface);
m_surface_info[index].address = m_framebuffer_layout.color_addresses[index];
m_surface_info[index].pitch = m_framebuffer_layout.actual_color_pitch[index];
ensure(surface->rsx_pitch == m_framebuffer_layout.actual_color_pitch[index]);
m_texture_cache.notify_surface_changed(m_surface_info[index].get_memory_range(m_framebuffer_layout.aa_factors));
m_draw_buffers.push_back(index);
}
}
if (std::get<0>(m_rtts.m_bound_depth_stencil) != 0)
{
auto ds = std::get<1>(m_rtts.m_bound_depth_stencil);
m_fbo_images.push_back(ds);
m_depth_surface_info.address = m_framebuffer_layout.zeta_address;
m_depth_surface_info.pitch = m_framebuffer_layout.actual_zeta_pitch;
ensure(ds->rsx_pitch == m_framebuffer_layout.actual_zeta_pitch);
m_texture_cache.notify_surface_changed(m_depth_surface_info.get_memory_range(m_framebuffer_layout.aa_factors));
}
// Before messing with memory properties, flush command queue if there are dma transfers queued up
if (m_current_command_buffer->flags & vk::command_buffer::cb_has_dma_transfer)
{
flush_command_queue();
}
if (!m_rtts.superseded_surfaces.empty())
{
for (auto& surface : m_rtts.superseded_surfaces)
{
m_texture_cache.discard_framebuffer_memory_region(*m_current_command_buffer, surface->get_memory_range());
}
m_rtts.superseded_surfaces.clear();
}
if (!m_rtts.orphaned_surfaces.empty())
{
u32 gcm_format;
bool swap_bytes;
for (auto& [base_addr, surface] : m_rtts.orphaned_surfaces)
{
bool lock = surface->is_depth_surface() ? !!g_cfg.video.write_depth_buffer :
!!g_cfg.video.write_color_buffers;
if (lock &&
#ifdef TEXTURE_CACHE_DEBUG
!m_texture_cache.is_protected(
base_addr,
surface->get_memory_range(),
rsx::texture_upload_context::framebuffer_storage)
#else
!surface->is_locked()
#endif
)
{
lock = false;
}
if (!lock) [[likely]]
{
m_texture_cache.commit_framebuffer_memory_region(*m_current_command_buffer, surface->get_memory_range());
continue;
}
if (surface->is_depth_surface())
{
gcm_format = (surface->get_surface_depth_format() != rsx::surface_depth_format::z16) ? CELL_GCM_TEXTURE_DEPTH16 : CELL_GCM_TEXTURE_DEPTH24_D8;
swap_bytes = true;
}
else
{
auto info = get_compatible_gcm_format(surface->get_surface_color_format());
gcm_format = info.first;
swap_bytes = info.second;
}
m_texture_cache.lock_memory_region(
*m_current_command_buffer, surface, surface->get_memory_range(), false,
surface->get_surface_width<rsx::surface_metrics::pixels>(), surface->get_surface_height<rsx::surface_metrics::pixels>(), surface->get_rsx_pitch(),
gcm_format, swap_bytes);
}
m_rtts.orphaned_surfaces.clear();
}
const auto color_fmt_info = get_compatible_gcm_format(m_framebuffer_layout.color_format);
for (u8 index : m_draw_buffers)
{
if (!m_surface_info[index].address || !m_surface_info[index].pitch) continue;
const utils::address_range surface_range = m_surface_info[index].get_memory_range();
if (g_cfg.video.write_color_buffers)
{
m_texture_cache.lock_memory_region(
*m_current_command_buffer, m_rtts.m_bound_render_targets[index].second, surface_range, true,
m_surface_info[index].width, m_surface_info[index].height, m_framebuffer_layout.actual_color_pitch[index],
color_fmt_info.first, color_fmt_info.second);
}
else
{
m_texture_cache.commit_framebuffer_memory_region(*m_current_command_buffer, surface_range);
}
}
if (m_depth_surface_info.address && m_depth_surface_info.pitch)
{
const utils::address_range surface_range = m_depth_surface_info.get_memory_range();
if (g_cfg.video.write_depth_buffer)
{
const u32 gcm_format = (m_depth_surface_info.depth_format == rsx::surface_depth_format::z16) ? CELL_GCM_TEXTURE_DEPTH16 : CELL_GCM_TEXTURE_DEPTH24_D8;
m_texture_cache.lock_memory_region(
*m_current_command_buffer, m_rtts.m_bound_depth_stencil.second, surface_range, true,
m_depth_surface_info.width, m_depth_surface_info.height, m_framebuffer_layout.actual_zeta_pitch, gcm_format, true);
}
else
{
m_texture_cache.commit_framebuffer_memory_region(*m_current_command_buffer, surface_range);
}
}
m_current_renderpass_key = vk::get_renderpass_key(m_fbo_images);
m_cached_renderpass = vk::get_renderpass(*m_device, m_current_renderpass_key);
// Search old framebuffers for this same configuration
const auto [fbo_width, fbo_height] = rsx::apply_resolution_scale<true>(m_framebuffer_layout.width, m_framebuffer_layout.height);
if (m_draw_fbo)
{
// Release old ref
m_draw_fbo->release();
}
m_draw_fbo = vk::get_framebuffer(*m_device, fbo_width, fbo_height, VK_FALSE, m_cached_renderpass, m_fbo_images);
m_draw_fbo->add_ref();
set_viewport();
set_scissor(clipped_scissor);
check_zcull_status(true);
}
void VKGSRender::renderctl(u32 request_code, void* args)
{
switch (request_code)
{
case vk::rctrl_queue_submit:
{
const auto packet = reinterpret_cast<vk::queue_submit_t*>(args);
vk::queue_submit(packet);
free(packet);
break;
}
case vk::rctrl_run_gc:
{
auto eid = reinterpret_cast<u64>(args);
vk::on_event_completed(eid, true);
break;
}
default:
fmt::throw_exception("Unhandled request code 0x%x", request_code);
}
}
bool VKGSRender::scaled_image_from_memory(const rsx::blit_src_info& src, const rsx::blit_dst_info& dst, bool interpolate)
{
if (swapchain_unavailable)
return false;
// Verify enough memory exists before attempting to handle data transfer
check_heap_status(VK_HEAP_CHECK_TEXTURE_UPLOAD_STORAGE);
if (m_texture_cache.blit(src, dst, interpolate, m_rtts, *m_current_command_buffer))
{
m_samplers_dirty.store(true);
m_current_command_buffer->set_flag(vk::command_buffer::cb_has_blit_transfer);
if (m_current_command_buffer->flags & vk::command_buffer::cb_has_dma_transfer)
{
// A dma transfer has been queued onto this cb
// This likely means that we're done with the tranfers to the target (writes_likely_completed=1)
flush_command_queue();
}
return true;
}
return false;
}
void VKGSRender::begin_occlusion_query(rsx::reports::occlusion_query_info* query)
{
ensure(!m_occlusion_query_active);
query->result = 0;
//query->sync_timestamp = get_system_time();
m_active_query_info = query;
m_occlusion_query_active = true;
m_current_command_buffer->flags |= vk::command_buffer::cb_load_occluson_task;
}
void VKGSRender::end_occlusion_query(rsx::reports::occlusion_query_info* query)
{
ensure(query == m_active_query_info);
// NOTE: flushing the queue is very expensive, do not flush just because query stopped
if (m_current_command_buffer->flags & vk::command_buffer::cb_has_open_query)
{
// End query
auto open_query = m_occlusion_map[m_active_query_info->driver_handle].indices.back();
m_occlusion_query_manager->end_query(*m_current_command_buffer, open_query);
m_current_command_buffer->flags &= ~vk::command_buffer::cb_has_open_query;
}
// Clear occlusion load flag
m_current_command_buffer->flags &= ~vk::command_buffer::cb_load_occluson_task;
m_occlusion_query_active = false;
m_active_query_info = nullptr;
}
bool VKGSRender::check_occlusion_query_status(rsx::reports::occlusion_query_info* query)
{
if (!query->num_draws)
return true;
auto &data = m_occlusion_map[query->driver_handle];
if (data.indices.empty())
return true;
if (data.is_current(m_current_command_buffer))
return false;
const u32 oldest = data.indices.front();
return m_occlusion_query_manager->check_query_status(oldest);
}
void VKGSRender::get_occlusion_query_result(rsx::reports::occlusion_query_info* query)
{
auto &data = m_occlusion_map[query->driver_handle];
if (data.indices.empty())
return;
if (query->num_draws)
{
if (data.is_current(m_current_command_buffer))
{
std::lock_guard lock(m_flush_queue_mutex);
flush_command_queue();
if (m_flush_requests.pending())
{
m_flush_requests.clear_pending_flag();
}
rsx_log.warning("[Performance warning] Unexpected ZCULL read caused a hard sync");
busy_wait();
}
data.sync();
// Gather data
for (const auto occlusion_id : data.indices)
{
query->result += m_occlusion_query_manager->get_query_result(occlusion_id);
if (query->result && !g_cfg.video.precise_zpass_count)
{
// We only need one hit unless precise zcull is requested
break;
}
}
}
m_occlusion_query_manager->free_queries(*m_current_command_buffer, data.indices);
data.indices.clear();
}
void VKGSRender::discard_occlusion_query(rsx::reports::occlusion_query_info* query)
{
if (m_active_query_info == query)
{
end_occlusion_query(query);
}
auto &data = m_occlusion_map[query->driver_handle];
if (data.indices.empty())
return;
m_occlusion_query_manager->free_queries(*m_current_command_buffer, data.indices);
data.indices.clear();
}
void VKGSRender::emergency_query_cleanup(vk::command_buffer* commands)
{
ensure(commands == static_cast<vk::command_buffer*>(m_current_command_buffer));
if (m_current_command_buffer->flags & vk::command_buffer::cb_has_open_query)
{
auto open_query = m_occlusion_map[m_active_query_info->driver_handle].indices.back();
m_occlusion_query_manager->end_query(*m_current_command_buffer, open_query);
m_current_command_buffer->flags &= ~vk::command_buffer::cb_has_open_query;
}
}
void VKGSRender::begin_conditional_rendering(const std::vector<rsx::reports::occlusion_query_info*>& sources)
{
ensure(!sources.empty());
// Flag check whether to calculate all entries or only one
bool partial_eval;
// Try and avoid regenerating the data if its a repeat/spam
// NOTE: The incoming list is reversed with the first entry being the newest
if (m_cond_render_sync_tag == sources.front()->sync_tag)
{
// Already synched, check subdraw which is possible if last sync happened while query was active
if (!m_active_query_info || m_active_query_info != sources.front())
{
rsx::thread::begin_conditional_rendering(sources);
return;
}
// Partial evaluation only
partial_eval = true;
}
else
{
m_cond_render_sync_tag = sources.front()->sync_tag;
partial_eval = false;
}
// Time to aggregate
if (!m_cond_render_buffer)
{
auto& memory_props = m_device->get_memory_mapping();
auto usage_flags = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
if (m_device->get_conditional_render_support())
{
usage_flags |= VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT;
}
m_cond_render_buffer = std::make_unique<vk::buffer>(
*m_device, 4,
memory_props.device_local, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
usage_flags, 0, VMM_ALLOCATION_POOL_UNDEFINED);
}
VkPipelineStageFlags dst_stage;
VkAccessFlags dst_access;
u32 dst_offset = 0;
u32 num_hw_queries = 0;
usz first = 0;
usz last = (!partial_eval) ? sources.size() : 1;
// Count number of queries available. This is an "opening" evaluation, if there is only one source, read it as-is.
// The idea is to avoid scheduling a compute task unless we have to.
for (usz i = first; i < last; ++i)
{
auto& query_info = m_occlusion_map[sources[i]->driver_handle];
num_hw_queries += ::size32(query_info.indices);
}
if (m_device->get_conditional_render_support())
{
dst_stage = VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT;
dst_access = VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT;
}
else
{
dst_stage = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;
dst_access = VK_ACCESS_SHADER_READ_BIT;
}
if (num_hw_queries == 1 && !partial_eval) [[ likely ]]
{
// Accept the first available query handle as the source of truth. No aggregation is required.
for (usz i = first; i < last; ++i)
{
auto& query_info = m_occlusion_map[sources[i]->driver_handle];
if (!query_info.indices.empty())
{
const auto& index = query_info.indices.front();
m_occlusion_query_manager->get_query_result_indirect(*m_current_command_buffer, index, 1, m_cond_render_buffer->value, 0);
vk::insert_buffer_memory_barrier(*m_current_command_buffer, m_cond_render_buffer->value, 0, 4,
VK_PIPELINE_STAGE_TRANSFER_BIT, dst_stage,
VK_ACCESS_TRANSFER_WRITE_BIT, dst_access);
rsx::thread::begin_conditional_rendering(sources);
return;
}
}
// This is unreachable unless something went horribly wrong
fmt::throw_exception("Unreachable");
}
else if (num_hw_queries > 0)
{
// We'll need to do some result aggregation using a compute shader.
auto scratch = vk::get_scratch_buffer(*m_current_command_buffer, num_hw_queries * 4);
// Range latching. Because of how the query pool manages allocations using a stack, we get an inverse sequential set of handles/indices that we can easily group together.
// This drastically boosts performance on some drivers like the NVIDIA proprietary one that seems to have a rather high cost for every individual query transer command.
struct { u32 first, last; } query_range = { umax, 0 };
auto copy_query_range_impl = [&]()
{
const auto count = (query_range.last - query_range.first + 1);
m_occlusion_query_manager->get_query_result_indirect(*m_current_command_buffer, query_range.first, count, scratch->value, dst_offset);
dst_offset += count * 4;
};
for (usz i = first; i < last; ++i)
{
auto& query_info = m_occlusion_map[sources[i]->driver_handle];
for (const auto& index : query_info.indices)
{
// First iteration?
if (query_range.first == umax)
{
query_range = { index, index };
continue;
}
// Head?
if ((query_range.first - 1) == index)
{
query_range.first = index;
continue;
}
// Tail?
if ((query_range.last + 1) == index)
{
query_range.last = index;
continue;
}
// Flush pending queue. In practice, this is never reached and we fall out to the spill block outside the loops
copy_query_range_impl();
// Start a new range for the current index
query_range = { index, index };
}
}
if (query_range.first != umax)
{
// Dangling queries, flush
copy_query_range_impl();
}
// Sanity check
ensure(dst_offset <= scratch->size());
if (!partial_eval)
{
// Fast path should have been caught above
ensure(dst_offset > 4);
// Clear result to zero
vkCmdFillBuffer(*m_current_command_buffer, m_cond_render_buffer->value, 0, 4, 0);
vk::insert_buffer_memory_barrier(*m_current_command_buffer, m_cond_render_buffer->value, 0, 4,
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_WRITE_BIT);
}
vk::insert_buffer_memory_barrier(*m_current_command_buffer, scratch->value, 0, dst_offset,
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT);
vk::get_compute_task<vk::cs_aggregator>()->run(*m_current_command_buffer, m_cond_render_buffer.get(), scratch, dst_offset / 4);
vk::insert_buffer_memory_barrier(*m_current_command_buffer, m_cond_render_buffer->value, 0, 4,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, dst_stage,
VK_ACCESS_SHADER_WRITE_BIT, dst_access);
}
else if (m_program)
{
// This can sometimes happen when shaders are compiling, only log if there is a program hit
rsx_log.warning("Dubious query data pushed to cond render! Please report to developers(q.pending=%d)", sources.front()->pending);
}
rsx::thread::begin_conditional_rendering(sources);
}
void VKGSRender::end_conditional_rendering()
{
thread::end_conditional_rendering();
}
| 108,482
|
C++
|
.cpp
| 2,652
| 37.758673
| 197
| 0.72939
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,448
|
VKOverlays.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKOverlays.cpp
|
#include "VKOverlays.h"
#include "VKRenderTargets.h"
#include "VKFramebuffer.h"
#include "VKResourceManager.h"
#include "VKRenderPass.h"
#include "VKPipelineCompiler.h"
#include "vkutils/image.h"
#include "vkutils/image_helpers.h"
#include "vkutils/sampler.h"
#include "vkutils/scratch.h"
#include "../Overlays/overlays.h"
#include "../Program/RSXOverlay.h"
#include "util/fnv_hash.hpp"
namespace vk
{
overlay_pass::overlay_pass()
{
// Override-able defaults
renderpass_config.set_primitive_type(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP);
}
overlay_pass::~overlay_pass()
{
m_vao.destroy();
m_ubo.destroy();
}
u64 overlay_pass::get_pipeline_key(VkRenderPass pass)
{
u64 key = rpcs3::hash_struct(renderpass_config);
key ^= reinterpret_cast<uptr>(pass);
return key;
}
void overlay_pass::check_heap()
{
if (!m_vao.heap)
{
m_vao.create(VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, 1 * 0x100000, "overlays VAO", 128);
m_ubo.create(VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, 8 * 0x100000, "overlays UBO", 128);
}
}
void overlay_pass::init_descriptors()
{
rsx::simple_array<VkDescriptorPoolSize> descriptor_pool_sizes =
{
{ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1 }
};
if (m_num_usable_samplers)
{
descriptor_pool_sizes.push_back({ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, m_num_usable_samplers });
}
if (m_num_input_attachments)
{
descriptor_pool_sizes.push_back({ VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, m_num_input_attachments });
}
// Reserve descriptor pools
m_descriptor_pool.create(*m_device, descriptor_pool_sizes);
const auto num_bindings = 1 + m_num_usable_samplers + m_num_input_attachments;
rsx::simple_array<VkDescriptorSetLayoutBinding> bindings(num_bindings);
bindings[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
bindings[0].descriptorCount = 1;
bindings[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT;
bindings[0].binding = 0;
bindings[0].pImmutableSamplers = nullptr;
u32 descriptor_index = 1;
for (u32 n = 0; n < m_num_usable_samplers; ++n, ++descriptor_index)
{
bindings[descriptor_index].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
bindings[descriptor_index].descriptorCount = 1;
bindings[descriptor_index].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
bindings[descriptor_index].binding = descriptor_index;
bindings[descriptor_index].pImmutableSamplers = nullptr;
}
for (u32 n = 0; n < m_num_input_attachments; ++n, ++descriptor_index)
{
bindings[descriptor_index].descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
bindings[descriptor_index].descriptorCount = 1;
bindings[descriptor_index].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
bindings[descriptor_index].binding = descriptor_index;
bindings[descriptor_index].pImmutableSamplers = nullptr;
}
ensure(descriptor_index == num_bindings);
m_descriptor_layout = vk::descriptors::create_layout(bindings);
VkPipelineLayoutCreateInfo layout_info = {};
layout_info.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
layout_info.setLayoutCount = 1;
layout_info.pSetLayouts = &m_descriptor_layout;
std::vector<VkPushConstantRange> push_constants = get_push_constants();
if (!push_constants.empty())
{
layout_info.pushConstantRangeCount = u32(push_constants.size());
layout_info.pPushConstantRanges = push_constants.data();
}
CHECK_RESULT(vkCreatePipelineLayout(*m_device, &layout_info, nullptr, &m_pipeline_layout));
}
std::vector<vk::glsl::program_input> overlay_pass::get_vertex_inputs()
{
check_heap();
return{};
}
std::vector<vk::glsl::program_input> overlay_pass::get_fragment_inputs()
{
std::vector<vk::glsl::program_input> fs_inputs;
fs_inputs.push_back({ ::glsl::program_domain::glsl_fragment_program, vk::glsl::program_input_type::input_type_uniform_buffer,{},{}, 0, "static_data" });
u32 binding = 1;
for (u32 n = 0; n < m_num_usable_samplers; ++n, ++binding)
{
fs_inputs.push_back({ ::glsl::program_domain::glsl_fragment_program, vk::glsl::program_input_type::input_type_texture,{},{}, binding, "fs" + std::to_string(n) });
}
for (u32 n = 0; n < m_num_input_attachments; ++n, ++binding)
{
fs_inputs.push_back({ ::glsl::program_domain::glsl_fragment_program, vk::glsl::program_input_type::input_type_texture,{},{}, binding, "sp" + std::to_string(n) });
}
return fs_inputs;
}
vk::glsl::program* overlay_pass::build_pipeline(u64 storage_key, VkRenderPass render_pass)
{
if (!compiled)
{
m_vertex_shader.create(::glsl::program_domain::glsl_vertex_program, vs_src);
m_vertex_shader.compile();
m_fragment_shader.create(::glsl::program_domain::glsl_fragment_program, fs_src);
m_fragment_shader.compile();
compiled = true;
}
VkPipelineShaderStageCreateInfo shader_stages[2] = {};
shader_stages[0].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
shader_stages[0].stage = VK_SHADER_STAGE_VERTEX_BIT;
shader_stages[0].module = m_vertex_shader.get_handle();
shader_stages[0].pName = "main";
shader_stages[1].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
shader_stages[1].stage = VK_SHADER_STAGE_FRAGMENT_BIT;
shader_stages[1].module = m_fragment_shader.get_handle();
shader_stages[1].pName = "main";
std::vector<VkDynamicState> dynamic_state_descriptors;
dynamic_state_descriptors.push_back(VK_DYNAMIC_STATE_VIEWPORT);
dynamic_state_descriptors.push_back(VK_DYNAMIC_STATE_SCISSOR);
get_dynamic_state_entries(dynamic_state_descriptors);
VkPipelineDynamicStateCreateInfo dynamic_state_info = {};
dynamic_state_info.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
dynamic_state_info.dynamicStateCount = ::size32(dynamic_state_descriptors);
dynamic_state_info.pDynamicStates = dynamic_state_descriptors.data();
VkVertexInputBindingDescription vb = { 0, 16, VK_VERTEX_INPUT_RATE_VERTEX };
VkVertexInputAttributeDescription via = { 0, 0, VK_FORMAT_R32G32B32A32_SFLOAT, 0 };
VkPipelineVertexInputStateCreateInfo vi = {};
vi.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
vi.vertexBindingDescriptionCount = 1;
vi.pVertexBindingDescriptions = &vb;
vi.vertexAttributeDescriptionCount = 1;
vi.pVertexAttributeDescriptions = &via;
VkPipelineViewportStateCreateInfo vp = {};
vp.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
vp.scissorCount = 1;
vp.viewportCount = 1;
VkGraphicsPipelineCreateInfo info = {};
info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
info.pVertexInputState = &vi;
info.pInputAssemblyState = &renderpass_config.ia;
info.pRasterizationState = &renderpass_config.rs;
info.pColorBlendState = &renderpass_config.cs;
info.pMultisampleState = &renderpass_config.ms;
info.pViewportState = &vp;
info.pDepthStencilState = &renderpass_config.ds;
info.stageCount = 2;
info.pStages = shader_stages;
info.pDynamicState = &dynamic_state_info;
info.layout = m_pipeline_layout;
info.basePipelineIndex = -1;
info.basePipelineHandle = VK_NULL_HANDLE;
info.renderPass = render_pass;
auto compiler = vk::get_pipe_compiler();
auto program = compiler->compile(info, m_pipeline_layout, vk::pipe_compiler::COMPILE_INLINE, {}, get_vertex_inputs(), get_fragment_inputs());
auto result = program.get();
m_program_cache[storage_key] = std::move(program);
return result;
}
void overlay_pass::load_program(vk::command_buffer& cmd, VkRenderPass pass, const std::vector<vk::image_view*>& src)
{
vk::glsl::program *program = nullptr;
const auto key = get_pipeline_key(pass);
auto found = m_program_cache.find(key);
if (found != m_program_cache.end())
program = found->second.get();
else
program = build_pipeline(key, pass);
m_descriptor_set = m_descriptor_pool.allocate(m_descriptor_layout);
if (!m_sampler && !src.empty())
{
m_sampler = std::make_unique<vk::sampler>(*m_device,
VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
VK_FALSE, 0.f, 1.f, 0.f, 0.f, m_sampler_filter, m_sampler_filter, VK_SAMPLER_MIPMAP_MODE_NEAREST, VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK);
}
update_uniforms(cmd, program);
program->bind_uniform({ m_ubo.heap->value, m_ubo_offset, std::max(m_ubo_length, 4u) }, 0, m_descriptor_set);
for (uint n = 0; n < src.size(); ++n)
{
VkDescriptorImageInfo info = { m_sampler->value, src[n]->value, src[n]->image()->current_layout };
program->bind_uniform(info, "fs" + std::to_string(n), VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, m_descriptor_set);
}
vkCmdBindPipeline(cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, program->pipeline);
m_descriptor_set.bind(cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, m_pipeline_layout);
VkBuffer buffers = m_vao.heap->value;
VkDeviceSize offsets = m_vao_offset;
vkCmdBindVertexBuffers(cmd, 0, 1, &buffers, &offsets);
}
void overlay_pass::create(const vk::render_device& dev)
{
if (!initialized)
{
m_device = &dev;
init_descriptors();
initialized = true;
}
}
void overlay_pass::destroy()
{
if (initialized)
{
m_vertex_shader.destroy();
m_fragment_shader.destroy();
m_program_cache.clear();
m_sampler.reset();
vkDestroyDescriptorSetLayout(*m_device, m_descriptor_layout, nullptr);
vkDestroyPipelineLayout(*m_device, m_pipeline_layout, nullptr);
m_descriptor_pool.destroy();
initialized = false;
}
}
void overlay_pass::free_resources()
{
// FIXME: Allocation sizes are known, we don't need to use a data_heap structure
m_vao.reset_allocation_stats();
m_ubo.reset_allocation_stats();
}
vk::framebuffer* overlay_pass::get_framebuffer(vk::image* target, VkRenderPass render_pass)
{
VkDevice dev = (*vk::get_current_renderer());
return vk::get_framebuffer(dev, target->width(), target->height(), m_num_input_attachments > 0, render_pass, { target });
}
void overlay_pass::emit_geometry(vk::command_buffer& cmd)
{
vkCmdDraw(cmd, num_drawable_elements, 1, first_vertex, 0);
}
void overlay_pass::set_up_viewport(vk::command_buffer& cmd, u32 x, u32 y, u32 w, u32 h)
{
VkViewport vp{};
vp.x = static_cast<f32>(x);
vp.y = static_cast<f32>(y);
vp.width = static_cast<f32>(w);
vp.height = static_cast<f32>(h);
vp.minDepth = 0.f;
vp.maxDepth = 1.f;
vkCmdSetViewport(cmd, 0, 1, &vp);
VkRect2D vs = { { static_cast<s32>(x), static_cast<s32>(y) }, { w, h } };
vkCmdSetScissor(cmd, 0, 1, &vs);
}
void overlay_pass::run(vk::command_buffer& cmd, const areau& viewport, vk::framebuffer* fbo, const std::vector<vk::image_view*>& src, VkRenderPass render_pass)
{
// This call clobbers dynamic state
cmd.flags |= vk::command_buffer::cb_reload_dynamic_state;
load_program(cmd, render_pass, src);
set_up_viewport(cmd, viewport.x1, viewport.y1, viewport.width(), viewport.height());
vk::begin_renderpass(cmd, render_pass, fbo->value, { positionu{0u, 0u}, sizeu{fbo->width(), fbo->height()} });
emit_geometry(cmd);
}
void overlay_pass::run(vk::command_buffer& cmd, const areau& viewport, vk::image* target, const std::vector<vk::image_view*>& src, VkRenderPass render_pass)
{
auto fbo = static_cast<vk::framebuffer_holder*>(get_framebuffer(target, render_pass));
fbo->add_ref();
run(cmd, viewport, fbo, src, render_pass);
fbo->release();
}
void overlay_pass::run(vk::command_buffer& cmd, const areau& viewport, vk::image* target, vk::image_view* src, VkRenderPass render_pass)
{
std::vector<vk::image_view*> views = { src };
run(cmd, viewport, target, views, render_pass);
}
ui_overlay_renderer::ui_overlay_renderer()
: m_texture_type(rsx::overlays::texture_sampling_mode::none)
{
vs_src =
#include "../Program/GLSLSnippets/OverlayRenderVS.glsl"
;
fs_src =
#include "../Program/GLSLSnippets/OverlayRenderFS.glsl"
;
vs_src = fmt::replace_all(vs_src,
{
{ "%preprocessor", "// %preprocessor" },
{ "%push_block", "push_constant" }
});
fs_src = fmt::replace_all(fs_src,
{
{ "%preprocessor", "// %preprocessor" },
{ "%push_block_offset", "layout(offset=68)" },
{ "%push_block", "push_constant" }
});
// 2 input textures
m_num_usable_samplers = 2;
renderpass_config.set_attachment_count(1);
renderpass_config.set_color_mask(0, true, true, true, true);
renderpass_config.set_depth_mask(false);
renderpass_config.enable_blend(0,
VK_BLEND_FACTOR_SRC_ALPHA, VK_BLEND_FACTOR_ZERO,
VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA, VK_BLEND_FACTOR_ONE,
VK_BLEND_OP_ADD, VK_BLEND_OP_ADD);
}
vk::image_view* ui_overlay_renderer::upload_simple_texture(vk::render_device& dev, vk::command_buffer& cmd,
vk::data_heap& upload_heap, u64 key, u32 w, u32 h, u32 layers, bool font, bool temp, const void* pixel_src, u32 owner_uid)
{
const VkFormat format = (font) ? VK_FORMAT_R8_UNORM : VK_FORMAT_B8G8R8A8_UNORM;
const u32 pitch = (font) ? w : w * 4;
const u32 data_size = pitch * h * layers;
const auto offset = upload_heap.alloc<512>(data_size);
const auto addr = upload_heap.map(offset, data_size);
const VkImageSubresourceRange range = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, layers };
auto tex = std::make_unique<vk::image>(dev, dev.get_memory_mapping().device_local, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
VK_IMAGE_TYPE_2D, format, std::max(w, 1u), std::max(h, 1u), 1, 1, layers, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,
0, VMM_ALLOCATION_POOL_UNDEFINED);
if (pixel_src && data_size)
std::memcpy(addr, pixel_src, data_size);
else if (data_size)
std::memset(addr, 0, data_size);
upload_heap.unmap();
VkBufferImageCopy region;
region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, layers };
region.bufferOffset = offset;
region.bufferRowLength = w;
region.bufferImageHeight = h;
region.imageOffset = {};
region.imageExtent = { static_cast<u32>(w), static_cast<u32>(h), 1u };
change_image_layout(cmd, tex.get(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, range);
vkCmdCopyBufferToImage(cmd, upload_heap.heap->value, tex->value, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
change_image_layout(cmd, tex.get(), VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, range);
auto view = std::make_unique<vk::image_view>(dev, tex.get());
auto result = view.get();
if (!temp || font)
view_cache[key] = std::move(view);
else
temp_view_cache[key] = std::move(view);
if (font)
font_cache[key] = std::move(tex);
else if (!temp)
resources.push_back(std::move(tex));
else
temp_image_cache[key] = std::make_pair(owner_uid, std::move(tex));
return result;
}
void ui_overlay_renderer::init(vk::command_buffer& cmd, vk::data_heap& upload_heap)
{
rsx::overlays::resource_config configuration;
configuration.load_files();
auto& dev = cmd.get_command_pool().get_owner();
u64 storage_key = 1;
for (const auto &res : configuration.texture_raw_data)
{
upload_simple_texture(dev, cmd, upload_heap, storage_key++, res->w, res->h, 1, false, false, res->data, -1);
}
configuration.free_resources();
}
void ui_overlay_renderer::destroy()
{
temp_image_cache.clear();
temp_view_cache.clear();
resources.clear();
font_cache.clear();
view_cache.clear();
overlay_pass::destroy();
}
void ui_overlay_renderer::remove_temp_resources(u32 key)
{
std::vector<u64> keys_to_remove;
for (const auto& temp_image : temp_image_cache)
{
if (temp_image.second.first == key)
{
keys_to_remove.push_back(temp_image.first);
}
}
for (const auto& _key : keys_to_remove)
{
auto& img_data = temp_image_cache[_key];
auto& view_data = temp_view_cache[_key];
auto gc = vk::get_resource_manager();
gc->dispose(img_data.second);
gc->dispose(view_data);
temp_image_cache.erase(_key);
temp_view_cache.erase(_key);
}
}
vk::image_view* ui_overlay_renderer::find_font(rsx::overlays::font* font, vk::command_buffer& cmd, vk::data_heap& upload_heap)
{
const auto image_size = font->get_glyph_data_dimensions();
u64 key = reinterpret_cast<u64>(font);
auto found = view_cache.find(key);
if (found != view_cache.end())
{
if (const auto raw = found->second->image();
image_size.width == raw->width() &&
image_size.height == raw->height() &&
image_size.depth == raw->layers())
{
return found->second.get();
}
else
{
auto gc = vk::get_resource_manager();
gc->dispose(font_cache[key]);
gc->dispose(view_cache[key]);
}
}
// Create font resource
const std::vector<u8> bytes = font->get_glyph_data();
return upload_simple_texture(cmd.get_command_pool().get_owner(), cmd, upload_heap, key, image_size.width, image_size.height, image_size.depth,
true, false, bytes.data(), -1);
}
vk::image_view* ui_overlay_renderer::find_temp_image(rsx::overlays::image_info* desc, vk::command_buffer& cmd, vk::data_heap& upload_heap, u32 owner_uid)
{
u64 key = reinterpret_cast<u64>(desc);
auto found = temp_view_cache.find(key);
if (found != temp_view_cache.end())
return found->second.get();
return upload_simple_texture(cmd.get_command_pool().get_owner(), cmd, upload_heap, key, desc->w, desc->h, 1,
false, true, desc->data, owner_uid);
}
std::vector<VkPushConstantRange> ui_overlay_renderer::get_push_constants()
{
return
{
{
.stageFlags = VK_SHADER_STAGE_VERTEX_BIT,
.offset = 0,
.size = 68
},
{
.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT,
.offset = 68,
.size = 12
}
};
}
void ui_overlay_renderer::update_uniforms(vk::command_buffer& cmd, vk::glsl::program* /*program*/)
{
// Byte Layout
// 00: vec4 ui_scale;
// 16: vec4 albedo;
// 32: vec4 viewport;
// 48: vec4 clip_bounds;
// 64: uint vertex_config;
// 68: uint fragment_config;
// 72: float timestamp;
// 76: float blur_intensity;
f32 push_buf[32];
// 1. Vertex config (00 - 63)
std::memcpy(push_buf, m_scale_offset.rgba, 16);
std::memcpy(push_buf + 4, m_color.rgba, 16);
push_buf[8] = m_viewport.width;
push_buf[9] = m_viewport.height;
push_buf[10] = m_viewport.x;
push_buf[11] = m_viewport.y;
push_buf[12] = m_clip_region.x1;
push_buf[13] = m_clip_region.y1;
push_buf[14] = m_clip_region.x2;
push_buf[15] = m_clip_region.y2;
rsx::overlays::vertex_options vert_opts;
const auto vert_config = vert_opts
.disable_vertex_snap(m_disable_vertex_snap)
.get();
push_buf[16] = std::bit_cast<f32>(vert_config);
vkCmdPushConstants(cmd, m_pipeline_layout, VK_SHADER_STAGE_VERTEX_BIT, 0, 68, push_buf);
// 2. Fragment stuff
rsx::overlays::fragment_options frag_opts;
const auto frag_config = frag_opts
.texture_mode(m_texture_type)
.clip_fragments(m_clip_enabled)
.pulse_glow(m_pulse_glow)
.get();
push_buf[0] = std::bit_cast<f32>(frag_config);
push_buf[1] = m_time;
push_buf[2] = m_blur_strength;
vkCmdPushConstants(cmd, m_pipeline_layout, VK_SHADER_STAGE_FRAGMENT_BIT, 68, 12, push_buf);
}
void ui_overlay_renderer::set_primitive_type(rsx::overlays::primitive_type type)
{
m_current_primitive_type = type;
switch (type)
{
case rsx::overlays::primitive_type::quad_list:
case rsx::overlays::primitive_type::triangle_strip:
renderpass_config.set_primitive_type(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP);
break;
case rsx::overlays::primitive_type::line_list:
renderpass_config.set_primitive_type(VK_PRIMITIVE_TOPOLOGY_LINE_LIST);
break;
case rsx::overlays::primitive_type::line_strip:
renderpass_config.set_primitive_type(VK_PRIMITIVE_TOPOLOGY_LINE_STRIP);
break;
case rsx::overlays::primitive_type::triangle_fan:
renderpass_config.set_primitive_type(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN);
break;
default:
fmt::throw_exception("Unexpected primitive type %d", static_cast<s32>(type));
}
}
void ui_overlay_renderer::emit_geometry(vk::command_buffer& cmd)
{
if (m_current_primitive_type == rsx::overlays::primitive_type::quad_list)
{
// Emulate quads with disjointed triangle strips
u32 first = 0;
u32 num_quads = num_drawable_elements / 4;
for (u32 n = 0; n < num_quads; ++n)
{
vkCmdDraw(cmd, 4, 1, first, 0);
first += 4;
}
}
else
{
overlay_pass::emit_geometry(cmd);
}
}
void ui_overlay_renderer::run(vk::command_buffer& cmd, const areau& viewport, vk::framebuffer* target, VkRenderPass render_pass,
vk::data_heap& upload_heap, rsx::overlays::overlay& ui)
{
m_scale_offset = color4f(ui.virtual_width, ui.virtual_height, 1.f, 1.f);
m_viewport = { { static_cast<f32>(viewport.x1), static_cast<f32>(viewport.y1) }, { static_cast<f32>(viewport.width()), static_cast<f32>(viewport.height()) } };
std::vector<vk::image_view*> image_views
{
vk::null_image_view(cmd, VK_IMAGE_VIEW_TYPE_2D),
vk::null_image_view(cmd, VK_IMAGE_VIEW_TYPE_2D_ARRAY)
};
if (ui.status_flags & rsx::overlays::status_bits::invalidate_image_cache)
{
remove_temp_resources(ui.uid);
ui.status_flags.clear(rsx::overlays::status_bits::invalidate_image_cache);
}
for (auto& command : ui.get_compiled().draw_commands)
{
num_drawable_elements = static_cast<u32>(command.verts.size());
upload_vertex_data(command.verts.data(), num_drawable_elements);
set_primitive_type(command.config.primitives);
m_time = command.config.get_sinus_value();
m_texture_type = rsx::overlays::texture_sampling_mode::texture2D;
m_color = command.config.color;
m_pulse_glow = command.config.pulse_glow;
m_blur_strength = static_cast<f32>(command.config.blur_strength) * 0.01f;
m_clip_enabled = command.config.clip_region;
m_clip_region = command.config.clip_rect;
m_disable_vertex_snap = command.config.disable_vertex_snap;
vk::image_view* src = nullptr;
switch (command.config.texture_ref)
{
case rsx::overlays::image_resource_id::game_icon:
case rsx::overlays::image_resource_id::backbuffer:
// TODO
case rsx::overlays::image_resource_id::none:
m_texture_type = rsx::overlays::texture_sampling_mode::none;
break;
case rsx::overlays::image_resource_id::font_file:
src = find_font(command.config.font_ref, cmd, upload_heap);
m_texture_type = src->image()->layers() == 1
? rsx::overlays::texture_sampling_mode::font2D
: rsx::overlays::texture_sampling_mode::font3D;
break;
case rsx::overlays::image_resource_id::raw_image:
src = find_temp_image(static_cast<rsx::overlays::image_info*>(command.config.external_data_ref), cmd, upload_heap, ui.uid);
break;
default:
src = view_cache[command.config.texture_ref].get();
break;
}
if (src)
{
const int res_id = src->image()->layers() > 1 ? 1 : 0;
image_views[res_id] = src;
}
overlay_pass::run(cmd, viewport, target, image_views, render_pass);
}
ui.update(get_system_time());
}
attachment_clear_pass::attachment_clear_pass()
{
vs_src =
"#version 450\n"
"#extension GL_ARB_separate_shader_objects : enable\n"
"layout(push_constant) uniform static_data{ vec4 regs[2]; };\n"
"layout(location=0) out vec4 color;\n"
"\n"
"void main()\n"
"{\n"
" vec2 positions[] = {vec2(-1., -1.), vec2(1., -1.), vec2(-1., 1.), vec2(1., 1.)};\n"
" color = regs[0];\n"
" gl_Position = vec4(positions[gl_VertexIndex % 4], 0., 1.);\n"
"}\n";
fs_src =
"#version 420\n"
"#extension GL_ARB_separate_shader_objects : enable\n"
"layout(location=0) in vec4 color;\n"
"layout(location=0) out vec4 out_color;\n"
"\n"
"void main()\n"
"{\n"
" out_color = color;\n"
"}\n";
// Disable samplers
m_num_usable_samplers = 0;
renderpass_config.set_depth_mask(false);
renderpass_config.set_color_mask(0, true, true, true, true);
renderpass_config.set_attachment_count(1);
}
std::vector<VkPushConstantRange> attachment_clear_pass::get_push_constants()
{
VkPushConstantRange constant;
constant.stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
constant.offset = 0;
constant.size = 32;
return { constant };
}
void attachment_clear_pass::update_uniforms(vk::command_buffer& cmd, vk::glsl::program* /*program*/)
{
f32 data[8];
data[0] = clear_color.r;
data[1] = clear_color.g;
data[2] = clear_color.b;
data[3] = clear_color.a;
data[4] = colormask.r;
data[5] = colormask.g;
data[6] = colormask.b;
data[7] = colormask.a;
vkCmdPushConstants(cmd, m_pipeline_layout, VK_SHADER_STAGE_VERTEX_BIT, 0, 32, data);
}
void attachment_clear_pass::set_up_viewport(vk::command_buffer& cmd, u32 x, u32 y, u32 w, u32 h)
{
VkViewport vp{};
vp.x = static_cast<f32>(x);
vp.y = static_cast<f32>(y);
vp.width = static_cast<f32>(w);
vp.height = static_cast<f32>(h);
vp.minDepth = 0.f;
vp.maxDepth = 1.f;
vkCmdSetViewport(cmd, 0, 1, &vp);
vkCmdSetScissor(cmd, 0, 1, ®ion);
}
void attachment_clear_pass::run(vk::command_buffer& cmd, vk::framebuffer* target, VkRect2D rect, u32 clearmask, color4f color, VkRenderPass render_pass)
{
region = rect;
color4f mask = { 0.f, 0.f, 0.f, 0.f };
if (clearmask & 0x10) mask.r = 1.f;
if (clearmask & 0x20) mask.g = 1.f;
if (clearmask & 0x40) mask.b = 1.f;
if (clearmask & 0x80) mask.a = 1.f;
if (mask != colormask || color != clear_color)
{
colormask = mask;
clear_color = color;
// Update color mask to match request
renderpass_config.set_color_mask(0, colormask.r, colormask.g, colormask.b, colormask.a);
}
// Update renderpass configuration with the real number of samples
renderpass_config.set_multisample_state(target->samples(), 0xFFFF, false, false, false);
// Render fullscreen quad
overlay_pass::run(cmd, { 0, 0, target->width(), target->height() }, target, std::vector<vk::image_view*>{}, render_pass);
}
stencil_clear_pass::stencil_clear_pass()
{
vs_src =
"#version 450\n"
"#extension GL_ARB_separate_shader_objects : enable\n"
"\n"
"void main()\n"
"{\n"
" vec2 positions[] = {vec2(-1., -1.), vec2(1., -1.), vec2(-1., 1.), vec2(1., 1.)};\n"
" gl_Position = vec4(positions[gl_VertexIndex % 4], 0., 1.);\n"
"}\n";
fs_src =
"#version 420\n"
"#extension GL_ARB_separate_shader_objects : enable\n"
"layout(location=0) out vec4 out_color;\n"
"\n"
"void main()\n"
"{\n"
" out_color = vec4(0.);\n"
"}\n";
}
void stencil_clear_pass::set_up_viewport(vk::command_buffer& cmd, u32 x, u32 y, u32 w, u32 h)
{
VkViewport vp{};
vp.x = static_cast<f32>(x);
vp.y = static_cast<f32>(y);
vp.width = static_cast<f32>(w);
vp.height = static_cast<f32>(h);
vp.minDepth = 0.f;
vp.maxDepth = 1.f;
vkCmdSetViewport(cmd, 0, 1, &vp);
vkCmdSetScissor(cmd, 0, 1, ®ion);
}
void stencil_clear_pass::run(vk::command_buffer& cmd, vk::render_target* target, VkRect2D rect, u32 stencil_clear, u32 stencil_write_mask, VkRenderPass render_pass)
{
region = rect;
// Stencil setup. Replace all pixels in the scissor region with stencil_clear with the correct write mask.
renderpass_config.enable_stencil_test(
VK_STENCIL_OP_REPLACE, VK_STENCIL_OP_REPLACE, VK_STENCIL_OP_REPLACE, // Always replace
VK_COMPARE_OP_ALWAYS, // Always pass
0xFF, // Full write-through
stencil_clear); // Write active bit
renderpass_config.set_stencil_mask(stencil_write_mask);
renderpass_config.set_depth_mask(false);
// Coverage sampling disabled, but actually report correct number of samples
renderpass_config.set_multisample_state(target->samples(), 0xFFFF, false, false, false);
overlay_pass::run(cmd, { 0, 0, target->width(), target->height() }, target, std::vector<vk::image_view*>{}, render_pass);
}
video_out_calibration_pass::video_out_calibration_pass()
{
vs_src =
#include "../Program/GLSLSnippets/GenericVSPassthrough.glsl"
;
fs_src =
#include "../Program/GLSLSnippets/VideoOutCalibrationPass.glsl"
;
std::pair<std::string_view, std::string> repl_list[] =
{
{ "%sampler_binding", fmt::format("(%d + x)", sampler_location(0)) },
{ "%set_decorator", "set=0" },
};
fs_src = fmt::replace_all(fs_src, repl_list);
renderpass_config.set_depth_mask(false);
renderpass_config.set_color_mask(0, true, true, true, true);
renderpass_config.set_attachment_count(1);
m_num_usable_samplers = 2;
}
std::vector<VkPushConstantRange> video_out_calibration_pass::get_push_constants()
{
VkPushConstantRange constant;
constant.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
constant.offset = 0;
constant.size = 16;
return { constant };
}
void video_out_calibration_pass::update_uniforms(vk::command_buffer& cmd, vk::glsl::program* /*program*/)
{
vkCmdPushConstants(cmd, m_pipeline_layout, VK_SHADER_STAGE_FRAGMENT_BIT, 0, 16, config.data);
}
void video_out_calibration_pass::run(vk::command_buffer& cmd, const areau& viewport, vk::framebuffer* target,
const rsx::simple_array<vk::viewable_image*>& src, f32 gamma, bool limited_rgb, stereo_render_mode_options stereo_mode, VkRenderPass render_pass)
{
config.gamma = gamma;
config.limit_range = limited_rgb? 1 : 0;
config.stereo_display_mode = static_cast<u8>(stereo_mode);
config.stereo_image_count = std::min(::size32(src), 2u);
std::vector<vk::image_view*> views;
views.reserve(2);
for (auto& img : src)
{
img->push_layout(cmd, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
views.push_back(img->get_view(rsx::default_remap_vector.with_encoding(VK_REMAP_IDENTITY)));
}
if (views.size() < 2)
{
views.push_back(vk::null_image_view(cmd, VK_IMAGE_VIEW_TYPE_2D));
}
overlay_pass::run(cmd, viewport, target, views, render_pass);
for (auto& img : src)
{
img->pop_layout(cmd);
}
}
}
| 29,824
|
C++
|
.cpp
| 767
| 35.53455
| 165
| 0.69773
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,449
|
VKMemAlloc.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKMemAlloc.cpp
|
#define VMA_IMPLEMENTATION
#include "util/atomic.hpp"
#include "Utilities/mutex.h"
// Protect some STL headers from macro (add more if it fails to compile)
#include <atomic>
#include <thread>
#include <memory>
#include <mutex>
// Replace VMA atomics with atomic_t
#define VMA_ATOMIC_UINT32 atomic_t<u32>
#define VMA_ATOMIC_UINT64 atomic_t<u64>
#define compare_exchange_strong compare_exchange
#define compare_exchange_weak compare_exchange
// Replace VMA mutex with shared_mutex
class VmaRWMutex
{
public:
void LockRead() { m_mutex.lock_shared(); }
void UnlockRead() { m_mutex.unlock_shared(); }
bool TryLockRead() { return m_mutex.try_lock_shared(); }
void LockWrite() { m_mutex.lock(); }
void UnlockWrite() { m_mutex.unlock(); }
bool TryLockWrite() { return m_mutex.try_lock(); }
void Lock() { m_mutex.lock(); }
void Unlock() { m_mutex.unlock(); }
bool TryLock() { return m_mutex.try_lock(); }
private:
shared_mutex m_mutex;
};
#define VMA_RW_MUTEX VmaRWMutex
#define VMA_MUTEX VmaRWMutex
#ifdef _MSC_VER
#pragma warning(push, 0)
#else
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wall"
#pragma GCC diagnostic ignored "-Wextra"
#pragma GCC diagnostic ignored "-Wold-style-cast"
#pragma GCC diagnostic ignored "-Wunused-variable"
#pragma GCC diagnostic ignored "-Wsuggest-override"
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wimplicit-fallthrough"
#ifdef __clang__
#pragma clang diagnostic ignored "-Winconsistent-missing-override"
#else
#pragma GCC diagnostic ignored "-Wsuggest-attribute=noreturn"
#endif
#endif
#include "3rdparty/GPUOpen/include/vk_mem_alloc.h"
#ifdef _MSC_VER
#pragma warning(pop)
#else
#pragma GCC diagnostic pop
#endif
| 1,768
|
C++
|
.cpp
| 55
| 30.854545
| 72
| 0.773872
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,450
|
VKCommonDecompiler.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKCommonDecompiler.cpp
|
#include "stdafx.h"
#include "VKCommonDecompiler.h"
namespace vk
{
static constexpr std::array<std::pair<std::string_view, int>, 18> varying_registers =
{ {
{ "tc0", 0 },
{ "tc1", 1 },
{ "tc2", 2 },
{ "tc3", 3 },
{ "tc4", 4 },
{ "tc5", 5 },
{ "tc6", 6 },
{ "tc7", 7 },
{ "tc8", 8 },
{ "tc9", 9 },
{ "diff_color", 10 },
{ "diff_color1", 11 },
{ "spec_color", 12 },
{ "spec_color1", 13 },
{ "fog_c", 14 },
{ "fogc", 14 }
} };
int get_varying_register_location(std::string_view varying_register_name)
{
for (const auto& varying_register : varying_registers)
{
if (varying_register.first == varying_register_name)
{
return varying_register.second;
}
}
fmt::throw_exception("Unknown register name: %s", varying_register_name);
}
}
| 788
|
C++
|
.cpp
| 35
| 19.742857
| 86
| 0.588
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,451
|
VKFormats.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKFormats.cpp
|
#include "stdafx.h"
#include "VKFormats.h"
#include "vkutils/device.h"
#include "vkutils/image.h"
namespace vk
{
VkFormat get_compatible_depth_surface_format(const gpu_formats_support& support, rsx::surface_depth_format2 format)
{
switch (format)
{
case rsx::surface_depth_format2::z16_uint:
return VK_FORMAT_D16_UNORM;
case rsx::surface_depth_format2::z16_float:
return VK_FORMAT_D32_SFLOAT;
case rsx::surface_depth_format2::z24s8_uint:
{
if (support.d24_unorm_s8) return VK_FORMAT_D24_UNORM_S8_UINT;
if (support.d32_sfloat_s8) return VK_FORMAT_D32_SFLOAT_S8_UINT;
fmt::throw_exception("No hardware support for z24s8");
}
case rsx::surface_depth_format2::z24s8_float:
{
if (support.d32_sfloat_s8) return VK_FORMAT_D32_SFLOAT_S8_UINT;
fmt::throw_exception("No hardware support for z24s8_float");
}
default:
break;
}
fmt::throw_exception("Invalid format (0x%x)", static_cast<u32>(format));
}
minification_filter get_min_filter(rsx::texture_minify_filter min_filter)
{
switch (min_filter)
{
case rsx::texture_minify_filter::nearest: return { VK_FILTER_NEAREST, VK_SAMPLER_MIPMAP_MODE_NEAREST, false };
case rsx::texture_minify_filter::linear: return { VK_FILTER_LINEAR, VK_SAMPLER_MIPMAP_MODE_NEAREST, false };
case rsx::texture_minify_filter::nearest_nearest: return { VK_FILTER_NEAREST, VK_SAMPLER_MIPMAP_MODE_NEAREST, true };
case rsx::texture_minify_filter::linear_nearest: return { VK_FILTER_LINEAR, VK_SAMPLER_MIPMAP_MODE_NEAREST, true };
case rsx::texture_minify_filter::nearest_linear: return { VK_FILTER_NEAREST, VK_SAMPLER_MIPMAP_MODE_LINEAR, true };
case rsx::texture_minify_filter::linear_linear: return { VK_FILTER_LINEAR, VK_SAMPLER_MIPMAP_MODE_LINEAR, true };
case rsx::texture_minify_filter::convolution_min: return { VK_FILTER_LINEAR, VK_SAMPLER_MIPMAP_MODE_NEAREST, false };
default:
fmt::throw_exception("Invalid min filter");
}
}
VkFilter get_mag_filter(rsx::texture_magnify_filter mag_filter)
{
switch (mag_filter)
{
case rsx::texture_magnify_filter::nearest: return VK_FILTER_NEAREST;
case rsx::texture_magnify_filter::linear: return VK_FILTER_LINEAR;
case rsx::texture_magnify_filter::convolution_mag: return VK_FILTER_LINEAR;
default:
break;
}
fmt::throw_exception("Invalid mag filter (0x%x)", static_cast<u32>(mag_filter));
}
VkBorderColor get_border_color(u32 color)
{
switch (color)
{
case 0x00000000:
{
return VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK;
}
case 0xFFFFFFFF:
{
return VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE;
}
case 0xFF000000:
{
return VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK;
}
default:
{
return VK_BORDER_COLOR_FLOAT_CUSTOM_EXT;
}
}
}
VkSamplerAddressMode vk_wrap_mode(rsx::texture_wrap_mode gcm_wrap)
{
switch (gcm_wrap)
{
case rsx::texture_wrap_mode::wrap: return VK_SAMPLER_ADDRESS_MODE_REPEAT;
case rsx::texture_wrap_mode::mirror: return VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT;
case rsx::texture_wrap_mode::clamp_to_edge: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
case rsx::texture_wrap_mode::border: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
case rsx::texture_wrap_mode::clamp: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
case rsx::texture_wrap_mode::mirror_once_clamp_to_edge: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
case rsx::texture_wrap_mode::mirror_once_border: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
case rsx::texture_wrap_mode::mirror_once_clamp: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
default:
fmt::throw_exception("Unhandled texture clamp mode");
}
}
float max_aniso(rsx::texture_max_anisotropy gcm_aniso)
{
switch (gcm_aniso)
{
case rsx::texture_max_anisotropy::x1: return 1.0f;
case rsx::texture_max_anisotropy::x2: return 2.0f;
case rsx::texture_max_anisotropy::x4: return 4.0f;
case rsx::texture_max_anisotropy::x6: return 6.0f;
case rsx::texture_max_anisotropy::x8: return 8.0f;
case rsx::texture_max_anisotropy::x10: return 10.0f;
case rsx::texture_max_anisotropy::x12: return 12.0f;
case rsx::texture_max_anisotropy::x16: return 16.0f;
default:
break;
}
fmt::throw_exception("Texture anisotropy error: bad max aniso (%d)", static_cast<u32>(gcm_aniso));
}
std::array<VkComponentSwizzle, 4> get_component_mapping(u32 format)
{
//Component map in ARGB format
std::array<VkComponentSwizzle, 4> mapping = {};
switch (format)
{
case CELL_GCM_TEXTURE_A1R5G5B5:
case CELL_GCM_TEXTURE_R5G5B5A1:
case CELL_GCM_TEXTURE_R6G5B5:
case CELL_GCM_TEXTURE_R5G6B5:
case CELL_GCM_TEXTURE_COMPRESSED_DXT1:
case CELL_GCM_TEXTURE_COMPRESSED_DXT23:
case CELL_GCM_TEXTURE_COMPRESSED_DXT45:
mapping = { VK_COMPONENT_SWIZZLE_A, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B }; break;
case CELL_GCM_TEXTURE_DEPTH24_D8:
case CELL_GCM_TEXTURE_DEPTH24_D8_FLOAT:
case CELL_GCM_TEXTURE_DEPTH16:
case CELL_GCM_TEXTURE_DEPTH16_FLOAT:
mapping = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_R }; break;
case CELL_GCM_TEXTURE_A4R4G4B4:
mapping = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A }; break;
case CELL_GCM_TEXTURE_G8B8:
mapping = { VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_R }; break;
case CELL_GCM_TEXTURE_B8:
mapping = { VK_COMPONENT_SWIZZLE_ONE, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_R }; break;
case CELL_GCM_TEXTURE_X16:
mapping = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_ONE, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_ONE }; break;
case CELL_GCM_TEXTURE_X32_FLOAT:
mapping = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_R }; break;
case CELL_GCM_TEXTURE_Y16_X16:
mapping = { VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_R }; break;
case CELL_GCM_TEXTURE_Y16_X16_FLOAT:
mapping = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G }; break;
case CELL_GCM_TEXTURE_W16_Z16_Y16_X16_FLOAT:
case CELL_GCM_TEXTURE_W32_Z32_Y32_X32_FLOAT:
mapping = { VK_COMPONENT_SWIZZLE_A, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B }; break;
case CELL_GCM_TEXTURE_D8R8G8B8:
mapping = { VK_COMPONENT_SWIZZLE_ONE, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B }; break;
case CELL_GCM_TEXTURE_D1R5G5B5:
mapping = { VK_COMPONENT_SWIZZLE_ONE, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B }; break;
case CELL_GCM_TEXTURE_COMPRESSED_HILO8:
case CELL_GCM_TEXTURE_COMPRESSED_HILO_S8:
mapping = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G }; break;
case CELL_GCM_TEXTURE_COMPRESSED_B8R8_G8R8:
case CELL_GCM_TEXTURE_COMPRESSED_R8B8_R8G8:
mapping = { VK_COMPONENT_SWIZZLE_A, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B }; break;
case CELL_GCM_TEXTURE_A8R8G8B8:
mapping = { VK_COMPONENT_SWIZZLE_A, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B }; break;
default:
fmt::throw_exception("Invalid or unsupported component mapping for texture format (0x%x)", format);
}
return mapping;
}
VkFormat get_compatible_sampler_format(const gpu_formats_support& support, u32 format)
{
switch (format)
{
#ifndef __APPLE__
case CELL_GCM_TEXTURE_R5G6B5: return VK_FORMAT_R5G6B5_UNORM_PACK16;
case CELL_GCM_TEXTURE_R6G5B5: return VK_FORMAT_R5G6B5_UNORM_PACK16; // Expand, discard high bit?
case CELL_GCM_TEXTURE_R5G5B5A1: return VK_FORMAT_R5G5B5A1_UNORM_PACK16;
case CELL_GCM_TEXTURE_D1R5G5B5: return VK_FORMAT_A1R5G5B5_UNORM_PACK16;
case CELL_GCM_TEXTURE_A1R5G5B5: return VK_FORMAT_A1R5G5B5_UNORM_PACK16;
case CELL_GCM_TEXTURE_A4R4G4B4: return VK_FORMAT_R4G4B4A4_UNORM_PACK16;
#else
// assign B8G8R8A8_UNORM to formats that are not supported by Metal
case CELL_GCM_TEXTURE_R6G5B5: return VK_FORMAT_B8G8R8A8_UNORM;
case CELL_GCM_TEXTURE_R5G6B5: return VK_FORMAT_B8G8R8A8_UNORM;
case CELL_GCM_TEXTURE_R5G5B5A1: return VK_FORMAT_B8G8R8A8_UNORM;
case CELL_GCM_TEXTURE_D1R5G5B5: return VK_FORMAT_B8G8R8A8_UNORM;
case CELL_GCM_TEXTURE_A1R5G5B5: return VK_FORMAT_B8G8R8A8_UNORM;
case CELL_GCM_TEXTURE_A4R4G4B4: return VK_FORMAT_B8G8R8A8_UNORM;
#endif
case CELL_GCM_TEXTURE_B8: return VK_FORMAT_R8_UNORM;
case CELL_GCM_TEXTURE_A8R8G8B8: return VK_FORMAT_B8G8R8A8_UNORM;
case CELL_GCM_TEXTURE_COMPRESSED_DXT1: return VK_FORMAT_BC1_RGBA_UNORM_BLOCK;
case CELL_GCM_TEXTURE_COMPRESSED_DXT23: return VK_FORMAT_BC2_UNORM_BLOCK;
case CELL_GCM_TEXTURE_COMPRESSED_DXT45: return VK_FORMAT_BC3_UNORM_BLOCK;
case CELL_GCM_TEXTURE_G8B8: return VK_FORMAT_R8G8_UNORM;
case CELL_GCM_TEXTURE_DEPTH24_D8: return support.d24_unorm_s8? VK_FORMAT_D24_UNORM_S8_UINT : VK_FORMAT_D32_SFLOAT_S8_UINT;
case CELL_GCM_TEXTURE_DEPTH24_D8_FLOAT: return VK_FORMAT_D32_SFLOAT_S8_UINT;
case CELL_GCM_TEXTURE_DEPTH16: return VK_FORMAT_D16_UNORM;
case CELL_GCM_TEXTURE_DEPTH16_FLOAT: return VK_FORMAT_D32_SFLOAT;
case CELL_GCM_TEXTURE_X16: return VK_FORMAT_R16_UNORM;
case CELL_GCM_TEXTURE_Y16_X16: return VK_FORMAT_R16G16_UNORM;
case CELL_GCM_TEXTURE_Y16_X16_FLOAT: return VK_FORMAT_R16G16_SFLOAT;
case CELL_GCM_TEXTURE_W16_Z16_Y16_X16_FLOAT: return VK_FORMAT_R16G16B16A16_SFLOAT;
case CELL_GCM_TEXTURE_W32_Z32_Y32_X32_FLOAT: return VK_FORMAT_R32G32B32A32_SFLOAT;
case CELL_GCM_TEXTURE_X32_FLOAT: return VK_FORMAT_R32_SFLOAT;
case CELL_GCM_TEXTURE_D8R8G8B8: return VK_FORMAT_B8G8R8A8_UNORM;
case CELL_GCM_TEXTURE_COMPRESSED_HILO8: return VK_FORMAT_R8G8_UNORM;
case CELL_GCM_TEXTURE_COMPRESSED_HILO_S8: return VK_FORMAT_R8G8_SNORM;
case CELL_GCM_TEXTURE_COMPRESSED_B8R8_G8R8: return VK_FORMAT_B8G8R8A8_UNORM;
case CELL_GCM_TEXTURE_COMPRESSED_R8B8_R8G8: return VK_FORMAT_B8G8R8A8_UNORM;
default:
break;
}
fmt::throw_exception("Invalid or unsupported sampler format for texture format (0x%x)", format);
}
VkFormat get_compatible_srgb_format(VkFormat rgb_format)
{
switch (rgb_format)
{
case VK_FORMAT_B8G8R8A8_UNORM:
return VK_FORMAT_B8G8R8A8_SRGB;
case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
return VK_FORMAT_BC1_RGBA_SRGB_BLOCK;
case VK_FORMAT_BC2_UNORM_BLOCK:
return VK_FORMAT_BC2_SRGB_BLOCK;
case VK_FORMAT_BC3_UNORM_BLOCK:
return VK_FORMAT_BC3_SRGB_BLOCK;
default:
return rgb_format;
}
}
u8 get_format_texel_width(VkFormat format)
{
switch (format)
{
case VK_FORMAT_R8_UNORM:
return 1;
case VK_FORMAT_R16_UINT:
case VK_FORMAT_R16_SFLOAT:
case VK_FORMAT_R16_UNORM:
case VK_FORMAT_R8G8_UNORM:
case VK_FORMAT_R8G8_SNORM:
case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
case VK_FORMAT_R5G6B5_UNORM_PACK16:
case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
return 2;
case VK_FORMAT_R32_UINT:
case VK_FORMAT_R32_SFLOAT:
case VK_FORMAT_R16G16_UNORM:
case VK_FORMAT_R16G16_SFLOAT:
case VK_FORMAT_R8G8B8A8_UNORM:
case VK_FORMAT_B8G8R8A8_UNORM:
case VK_FORMAT_B8G8R8A8_SRGB:
case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
case VK_FORMAT_BC2_UNORM_BLOCK:
case VK_FORMAT_BC3_UNORM_BLOCK:
case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
case VK_FORMAT_BC2_SRGB_BLOCK:
case VK_FORMAT_BC3_SRGB_BLOCK:
return 4;
case VK_FORMAT_R16G16B16A16_SFLOAT:
return 8;
case VK_FORMAT_R32G32B32A32_SFLOAT:
return 16;
case VK_FORMAT_D16_UNORM:
case VK_FORMAT_D32_SFLOAT:
return 2;
case VK_FORMAT_D32_SFLOAT_S8_UINT: //TODO: Translate to D24S8
case VK_FORMAT_D24_UNORM_S8_UINT:
return 4;
default:
break;
}
fmt::throw_exception("Unexpected vkFormat 0x%X", static_cast<u32>(format));
}
std::pair<u8, u8> get_format_element_size(VkFormat format)
{
// Return value is {ELEMENT_SIZE, NUM_ELEMENTS_PER_TEXEL}
// NOTE: Due to endianness issues, coalesced larger types are preferred
// e.g UINT1 to hold 4x1 bytes instead of UBYTE4 to hold 4x1
switch (format)
{
//8-bit
case VK_FORMAT_R8_UNORM:
return{ 1, 1 };
case VK_FORMAT_R8G8_UNORM:
case VK_FORMAT_R8G8_SNORM:
return{ 2, 1 }; //UNSIGNED_SHORT_8_8
case VK_FORMAT_R8G8B8A8_UNORM:
case VK_FORMAT_B8G8R8A8_UNORM:
case VK_FORMAT_B8G8R8A8_SRGB:
return{ 4, 1 }; //UNSIGNED_INT_8_8_8_8
//16-bit
case VK_FORMAT_R16_UINT:
case VK_FORMAT_R16_SFLOAT:
case VK_FORMAT_R16_UNORM:
return{ 2, 1 }; //UNSIGNED_SHORT and HALF_FLOAT
case VK_FORMAT_R16G16_UNORM:
case VK_FORMAT_R16G16_SFLOAT:
return{ 2, 2 }; //HALF_FLOAT
case VK_FORMAT_R16G16B16A16_SFLOAT:
return{ 2, 4 }; //HALF_FLOAT
case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
case VK_FORMAT_R5G6B5_UNORM_PACK16:
case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
return{ 2, 1 }; //UNSIGNED_SHORT_X_Y_Z_W
//32-bit
case VK_FORMAT_R32_UINT:
case VK_FORMAT_R32_SFLOAT:
return{ 4, 1 }; //FLOAT
case VK_FORMAT_R32G32B32A32_SFLOAT:
return{ 4, 4 }; //FLOAT
//DXT
case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
case VK_FORMAT_BC2_UNORM_BLOCK:
case VK_FORMAT_BC3_UNORM_BLOCK:
case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
case VK_FORMAT_BC2_SRGB_BLOCK:
case VK_FORMAT_BC3_SRGB_BLOCK:
return{ 4, 1 };
//Depth
case VK_FORMAT_D16_UNORM:
case VK_FORMAT_D32_SFLOAT:
return{ 2, 1 };
case VK_FORMAT_D32_SFLOAT_S8_UINT:
case VK_FORMAT_D24_UNORM_S8_UINT:
return{ 4, 1 };
default:
break;
}
fmt::throw_exception("Unexpected vkFormat 0x%X", static_cast<u32>(format));
}
std::pair<bool, u32> get_format_convert_flags(VkFormat format)
{
switch (format)
{
//8-bit
case VK_FORMAT_R8_UNORM:
return{ false, 1 };
case VK_FORMAT_B8G8R8A8_UNORM:
case VK_FORMAT_R8G8B8A8_UNORM:
case VK_FORMAT_B8G8R8A8_SRGB:
case VK_FORMAT_R8G8B8A8_SRGB:
return{ true, 4 };
//16-bit
case VK_FORMAT_R16_UINT:
case VK_FORMAT_R16_SFLOAT:
case VK_FORMAT_R16_UNORM:
case VK_FORMAT_R8G8_UNORM:
case VK_FORMAT_R8G8_SNORM:
case VK_FORMAT_R16G16_UNORM:
case VK_FORMAT_R16G16_SFLOAT:
case VK_FORMAT_R16G16B16A16_SFLOAT:
case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
case VK_FORMAT_R5G6B5_UNORM_PACK16:
case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
return{ true, 2 };
//32-bit
case VK_FORMAT_R32_UINT:
case VK_FORMAT_R32_SFLOAT:
case VK_FORMAT_R32G32B32A32_SFLOAT:
return{ true, 4 };
//DXT
case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
case VK_FORMAT_BC2_UNORM_BLOCK:
case VK_FORMAT_BC3_UNORM_BLOCK:
case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
case VK_FORMAT_BC2_SRGB_BLOCK:
case VK_FORMAT_BC3_SRGB_BLOCK:
return{ false, 1 };
//Depth
case VK_FORMAT_D16_UNORM:
case VK_FORMAT_D32_SFLOAT:
return{ true, 2 };
case VK_FORMAT_D32_SFLOAT_S8_UINT:
case VK_FORMAT_D24_UNORM_S8_UINT:
return{ true, 4 };
default:
break;
}
fmt::throw_exception("Unknown vkFormat 0x%x", static_cast<u32>(format));
}
bool formats_are_bitcast_compatible(VkFormat format1, VkFormat format2)
{
if (format1 == format2) [[likely]]
{
return true;
}
// Formats are compatible if the following conditions are met:
// 1. Texel sizes must match
// 2. Both formats require no transforms (basic memcpy) or...
// 3. Both formats have the same transform (e.g RG16_UNORM to RG16_SFLOAT, both are down and uploaded with a 2-byte byteswap)
if (get_format_texel_width(format1) != get_format_texel_width(format2))
{
return false;
}
const auto transform_a = get_format_convert_flags(format1);
const auto transform_b = get_format_convert_flags(format2);
if (transform_a.first == transform_b.first)
{
return !transform_a.first || (transform_a.second == transform_b.second);
}
return false;
}
bool formats_are_bitcast_compatible(image* image1, image* image2)
{
if (const u32 transfer_class = image1->format_class() | image2->format_class();
transfer_class & RSX_FORMAT_CLASS_DEPTH_FLOAT_MASK)
{
// If any one of the two images is a depth float, the other must match exactly or bust
return (image1->format_class() == image2->format_class());
}
return formats_are_bitcast_compatible(image1->format(), image2->format());
}
}
| 16,276
|
C++
|
.cpp
| 414
| 36.169082
| 127
| 0.743963
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,452
|
VKQueryPool.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKQueryPool.cpp
|
#include "stdafx.h"
#include "VKQueryPool.h"
#include "VKRenderPass.h"
#include "VKResourceManager.h"
#include "util/asm.hpp"
namespace vk
{
inline bool query_pool_manager::poke_query(query_slot_info& query, u32 index, VkQueryResultFlags flags)
{
// Query is ready if:
// 1. Any sample has been determined to have passed the Z test
// 2. The backend has fully processed the query and found no hits
u32 result[2] = { 0, 0 };
switch (const auto error = vkGetQueryPoolResults(*owner, *query.pool, index, 1, 8, result, 8, flags | VK_QUERY_RESULT_WITH_AVAILABILITY_BIT))
{
case VK_SUCCESS:
{
if (result[0])
{
query.any_passed = true;
query.ready = true;
query.data = result[0];
return true;
}
else if (result[1])
{
query.any_passed = false;
query.ready = true;
query.data = 0;
return true;
}
return false;
}
case VK_NOT_READY:
{
query.any_passed = !!result[0];
query.ready = query.any_passed && !!(flags & VK_QUERY_RESULT_PARTIAL_BIT);
query.data = result[0];
return query.ready;
}
default:
die_with_error(error);
return false;
}
}
query_pool_manager::query_pool_manager(vk::render_device& dev, VkQueryType type, u32 num_entries)
{
ensure(num_entries > 0);
owner = &dev;
query_type = type;
query_slot_status.resize(num_entries, {});
for (unsigned i = 0; i < num_entries; ++i)
{
m_available_slots.push_back(i);
}
}
query_pool_manager::~query_pool_manager()
{
if (m_current_query_pool)
{
m_current_query_pool.reset();
owner = nullptr;
}
}
void query_pool_manager::allocate_new_pool(vk::command_buffer& cmd)
{
ensure(!m_current_query_pool);
const u32 count = ::size32(query_slot_status);
m_current_query_pool = std::make_unique<query_pool>(*owner, query_type, count);
// From spec: "After query pool creation, each query must be reset before it is used."
vkCmdResetQueryPool(cmd, *m_current_query_pool.get(), 0, count);
m_pool_lifetime_counter = count;
}
void query_pool_manager::reallocate_pool(vk::command_buffer& cmd)
{
if (m_current_query_pool)
{
if (!m_current_query_pool->has_refs())
{
vk::get_resource_manager()->dispose(m_current_query_pool);
}
else
{
m_consumed_pools.emplace_back(std::move(m_current_query_pool));
// Sanity check
if (m_consumed_pools.size() > 3)
{
rsx_log.error("[Robustness warning] Query pool discard pile size is now %llu. Are we leaking??", m_consumed_pools.size());
}
}
}
allocate_new_pool(cmd);
}
void query_pool_manager::run_pool_cleanup()
{
for (auto It = m_consumed_pools.begin(); It != m_consumed_pools.end();)
{
if (!(*It)->has_refs())
{
vk::get_resource_manager()->dispose(*It);
It = m_consumed_pools.erase(It);
}
else
{
It++;
}
}
}
void query_pool_manager::set_control_flags(VkQueryControlFlags control_, VkQueryResultFlags result_)
{
control_flags = control_;
result_flags = result_;
}
void query_pool_manager::begin_query(vk::command_buffer& cmd, u32 index)
{
ensure(query_slot_status[index].active == false);
auto& query_info = query_slot_status[index];
query_info.pool = m_current_query_pool.get();
query_info.active = true;
vkCmdBeginQuery(cmd, *query_info.pool, index, control_flags);
}
void query_pool_manager::end_query(vk::command_buffer& cmd, u32 index)
{
vkCmdEndQuery(cmd, *query_slot_status[index].pool, index);
}
bool query_pool_manager::check_query_status(u32 index)
{
return poke_query(query_slot_status[index], index, result_flags);
}
u32 query_pool_manager::get_query_result(u32 index)
{
// Check for cached result
auto& query_info = query_slot_status[index];
if (!query_info.ready)
{
poke_query(query_info, index, result_flags);
while (!query_info.ready)
{
utils::pause();
poke_query(query_info, index, result_flags);
}
}
return query_info.data;
}
void query_pool_manager::get_query_result_indirect(vk::command_buffer& cmd, u32 index, u32 count, VkBuffer dst, VkDeviceSize dst_offset)
{
// We're technically supposed to stop any active renderpasses before streaming the results out, but that doesn't matter on IMR hw
// On TBDR setups like the apple M series, the stop is required (results are all 0 if you don't flush the RP), but this introduces a very heavy performance loss.
vkCmdCopyQueryPoolResults(cmd, *query_slot_status[index].pool, index, count, dst, dst_offset, 4, VK_QUERY_RESULT_WAIT_BIT);
}
void query_pool_manager::free_query(vk::command_buffer&/*cmd*/, u32 index)
{
// Release reference and discard
auto& query = query_slot_status[index];
ensure(query.active);
query.pool->release();
if (!query.pool->has_refs())
{
// No more refs held, remove if in discard pile
run_pool_cleanup();
}
query = {};
m_available_slots.push_back(index);
}
u32 query_pool_manager::allocate_query(vk::command_buffer& cmd)
{
if (!m_pool_lifetime_counter)
{
// Pool is exhaused, create a new one
// This is basically a driver-level pool reset without synchronization
// TODO: Alternatively, use VK_EXT_host_pool_reset to reset an old pool with no references and swap that in
if (vk::is_renderpass_open(cmd))
{
vk::end_renderpass(cmd);
}
reallocate_pool(cmd);
}
if (!m_available_slots.empty())
{
m_pool_lifetime_counter--;
const auto result = m_available_slots.front();
m_available_slots.pop_front();
return result;
}
return ~0u;
}
}
| 5,736
|
C++
|
.cpp
| 187
| 26.139037
| 164
| 0.667458
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,453
|
VKHelpers.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKHelpers.cpp
|
#include "stdafx.h"
#include "VKHelpers.h"
#include "VKGSRender.h"
#include "VKCompute.h"
#include "VKRenderPass.h"
#include "VKFramebuffer.h"
#include "VKResolveHelper.h"
#include "VKResourceManager.h"
#include "VKDMA.h"
#include "VKCommandStream.h"
#include "VKRenderPass.h"
#include "vkutils/scratch.h"
#include "vkutils/device.h"
#include "Emu/RSX/rsx_methods.h"
#include <unordered_map>
namespace vk
{
extern chip_class g_chip_class;
std::unordered_map<u32, std::unique_ptr<vk::compute_task>> g_compute_tasks;
std::unordered_map<u32, std::unique_ptr<vk::overlay_pass>> g_overlay_passes;
rsx::atomic_bitmask_t<runtime_state, u64> g_runtime_state;
// Driver compatibility workarounds
VkFlags g_heap_compatible_buffer_types = 0;
driver_vendor g_driver_vendor = driver_vendor::unknown;
bool g_drv_no_primitive_restart = false;
bool g_drv_sanitize_fp_values = false;
bool g_drv_disable_fence_reset = false;
bool g_drv_emulate_cond_render = false;
u64 g_num_processed_frames = 0;
u64 g_num_total_frames = 0;
void reset_overlay_passes()
{
for (const auto& p : g_overlay_passes)
{
p.second->free_resources();
}
}
void reset_global_resources()
{
// FIXME: These two shouldn't exist
vk::reset_resolve_resources();
vk::reset_overlay_passes();
get_upload_heap()->reset_allocation_stats();
}
void destroy_global_resources()
{
VkDevice dev = *g_render_device;
vk::clear_renderpass_cache(dev);
vk::clear_framebuffer_cache();
vk::clear_resolve_helpers();
vk::clear_dma_resources();
vk::clear_scratch_resources();
vk::get_upload_heap()->destroy();
g_compute_tasks.clear();
for (const auto& p : g_overlay_passes)
{
p.second->destroy();
}
g_overlay_passes.clear();
// This must be the last item destroyed
vk::get_resource_manager()->destroy();
// Statistics counter reset. Also verifies that everything was deleted.
vk::vmm_reset();
}
const vk::render_device *get_current_renderer()
{
return g_render_device;
}
void set_current_renderer(const vk::render_device &device)
{
g_render_device = &device;
g_runtime_state.clear();
g_drv_no_primitive_restart = false;
g_drv_sanitize_fp_values = false;
g_drv_disable_fence_reset = false;
g_drv_emulate_cond_render = (g_cfg.video.relaxed_zcull_sync && !g_render_device->get_conditional_render_support());
g_num_processed_frames = 0;
g_num_total_frames = 0;
g_heap_compatible_buffer_types = 0;
const auto& gpu = g_render_device->gpu();
const auto gpu_name = gpu.get_name();
g_driver_vendor = gpu.get_driver_vendor();
g_chip_class = gpu.get_chip_class();
switch (g_driver_vendor)
{
case driver_vendor::AMD:
// Primitive restart on older GCN is still broken
g_drv_no_primitive_restart = (g_chip_class == vk::chip_class::AMD_gcn_generic);
break;
case driver_vendor::RADV:
// Previous bugs with fence reset and primitive restart seem to have been fixed with newer drivers
break;
case driver_vendor::NVIDIA:
// Nvidia cards are easily susceptible to NaN poisoning
g_drv_sanitize_fp_values = true;
break;
case driver_vendor::INTEL:
case driver_vendor::ANV:
// INTEL vulkan drivers are mostly OK, workarounds are applied when creating the device
break;
case driver_vendor::MVK:
// Apple GPUs / moltenVK need more testing
break;
case driver_vendor::LAVAPIPE:
// This software device works well, with poor performance as the only downside
break;
case driver_vendor::V3DV:
// Broadcom GPUs need more testing, driver currently largely unstable
break;
case driver_vendor::DOZEN:
// This driver is often picked by mistake when the user meant to select something else. Complain loudly.
#ifdef _WIN32
MessageBox(NULL,
L"You're attempting to run rpcs3 on Microsoft's Dozen driver that emulates vulkan on top of Direct3D12.\n"
"This driver is unsupported. You should use your vendor's vulkan driver whenever possible.",
L"Unsupported Driver",
MB_ICONWARNING | MB_OK);
#else
rsx_log.error("Dozen is currently unsupported. How did you even get this to run outside windows?");
#endif
break;
default:
rsx_log.warning("Unsupported device: %s", gpu_name);
}
rsx_log.notice("Vulkan: Renderer initialized on device '%s'", gpu_name);
{
// Buffer memory tests, only useful for portability on macOS
VkBufferUsageFlags types[] =
{
VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT,
VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT,
VK_BUFFER_USAGE_INDEX_BUFFER_BIT,
VK_BUFFER_USAGE_STORAGE_BUFFER_BIT,
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT
};
VkFlags memory_flags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
VkBuffer tmp;
VkMemoryRequirements memory_reqs;
VkBufferCreateInfo info = {};
info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
info.size = 4096;
info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
info.flags = 0;
for (const auto &usage : types)
{
info.usage = usage;
CHECK_RESULT(vkCreateBuffer(*g_render_device, &info, nullptr, &tmp));
vkGetBufferMemoryRequirements(*g_render_device, tmp, &memory_reqs);
if (g_render_device->get_compatible_memory_type(memory_reqs.memoryTypeBits, memory_flags, nullptr))
{
g_heap_compatible_buffer_types |= usage;
}
vkDestroyBuffer(*g_render_device, tmp, nullptr);
}
}
descriptors::init();
}
VkFlags get_heap_compatible_buffer_types()
{
return g_heap_compatible_buffer_types;
}
driver_vendor get_driver_vendor()
{
return g_driver_vendor;
}
bool emulate_primitive_restart(rsx::primitive_type type)
{
if (g_drv_no_primitive_restart)
{
switch (type)
{
case rsx::primitive_type::triangle_strip:
case rsx::primitive_type::quad_strip:
return true;
default:
break;
}
}
return false;
}
bool sanitize_fp_values()
{
return g_drv_sanitize_fp_values;
}
bool fence_reset_disabled()
{
return g_drv_disable_fence_reset;
}
bool emulate_conditional_rendering()
{
return g_drv_emulate_cond_render;
}
void raise_status_interrupt(runtime_state status)
{
g_runtime_state |= status;
}
void clear_status_interrupt(runtime_state status)
{
g_runtime_state.clear(status);
}
bool test_status_interrupt(runtime_state status)
{
return g_runtime_state & status;
}
void enter_uninterruptible()
{
raise_status_interrupt(runtime_state::uninterruptible);
}
void leave_uninterruptible()
{
clear_status_interrupt(runtime_state::uninterruptible);
}
bool is_uninterruptible()
{
return test_status_interrupt(runtime_state::uninterruptible);
}
void advance_completed_frame_counter()
{
g_num_processed_frames++;
}
void advance_frame_counter()
{
ensure(g_num_processed_frames <= g_num_total_frames);
g_num_total_frames++;
}
u64 get_current_frame_id()
{
return g_num_total_frames;
}
u64 get_last_completed_frame_id()
{
return (g_num_processed_frames > 0)? g_num_processed_frames - 1: 0;
}
void do_query_cleanup(vk::command_buffer& cmd)
{
auto renderer = dynamic_cast<VKGSRender*>(rsx::get_current_renderer());
ensure(renderer);
renderer->emergency_query_cleanup(&cmd);
}
void on_descriptor_pool_fragmentation(bool is_fatal)
{
if (auto vkthr = dynamic_cast<VKGSRender*>(rsx::get_current_renderer()))
{
vkthr->on_descriptor_pool_fragmentation(is_fatal);
}
}
}
| 7,397
|
C++
|
.cpp
| 248
| 26.782258
| 117
| 0.728873
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,454
|
VKShaderInterpreter.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKShaderInterpreter.cpp
|
#include "stdafx.h"
#include "VKShaderInterpreter.h"
#include "VKVertexProgram.h"
#include "VKFragmentProgram.h"
#include "VKGSRender.h"
#include "../Program/GLSLCommon.h"
#include "../Program/ShaderInterpreter.h"
#include "../rsx_methods.h"
namespace vk
{
void shader_interpreter::build_vs()
{
::glsl::shader_properties properties{};
properties.domain = ::glsl::program_domain::glsl_vertex_program;
properties.require_lit_emulation = true;
// TODO: Extend decompiler thread
// TODO: Rename decompiler thread, it no longer spawns a thread
RSXVertexProgram null_prog;
std::string shader_str;
ParamArray arr;
VKVertexProgram vk_prog;
VKVertexDecompilerThread comp(null_prog, shader_str, arr, vk_prog);
ParamType uniforms = { PF_PARAM_UNIFORM, "vec4" };
uniforms.items.emplace_back("vc[468]", -1);
std::stringstream builder;
comp.insertHeader(builder);
comp.insertConstants(builder, { uniforms });
comp.insertInputs(builder, {});
// Insert vp stream input
builder << "\n"
"layout(std140, set=0, binding=" << m_vertex_instruction_start << ") readonly restrict buffer VertexInstructionBlock\n"
"{\n"
" uint base_address;\n"
" uint entry;\n"
" uint output_mask;\n"
" uint control;\n"
" uvec4 vp_instructions[];\n"
"};\n\n";
::glsl::insert_glsl_legacy_function(builder, properties);
::glsl::insert_vertex_input_fetch(builder, ::glsl::glsl_rules::glsl_rules_vulkan);
builder << program_common::interpreter::get_vertex_interpreter();
const std::string s = builder.str();
m_vs.create(::glsl::program_domain::glsl_vertex_program, s);
m_vs.compile();
// Prepare input table
const auto& binding_table = vk::get_current_renderer()->get_pipeline_binding_table();
vk::glsl::program_input in;
in.location = binding_table.vertex_params_bind_slot;
in.domain = ::glsl::glsl_vertex_program;
in.name = "VertexContextBuffer";
in.type = vk::glsl::input_type_uniform_buffer;
m_vs_inputs.push_back(in);
in.location = binding_table.vertex_buffers_first_bind_slot;
in.name = "persistent_input_stream";
in.type = vk::glsl::input_type_texel_buffer;
m_vs_inputs.push_back(in);
in.location = binding_table.vertex_buffers_first_bind_slot + 1;
in.name = "volatile_input_stream";
in.type = vk::glsl::input_type_texel_buffer;
m_vs_inputs.push_back(in);
in.location = binding_table.vertex_buffers_first_bind_slot + 2;
in.name = "vertex_layout_stream";
in.type = vk::glsl::input_type_texel_buffer;
m_vs_inputs.push_back(in);
in.location = binding_table.vertex_constant_buffers_bind_slot;
in.name = "VertexConstantsBuffer";
in.type = vk::glsl::input_type_uniform_buffer;
m_vs_inputs.push_back(in);
// TODO: Bind textures if needed
}
glsl::shader* shader_interpreter::build_fs(u64 compiler_options)
{
[[maybe_unused]] ::glsl::shader_properties properties{};
properties.domain = ::glsl::program_domain::glsl_fragment_program;
properties.require_depth_conversion = true;
properties.require_wpos = true;
u32 len;
ParamArray arr;
std::string shader_str;
RSXFragmentProgram frag;
VKFragmentProgram vk_prog;
VKFragmentDecompilerThread comp(shader_str, arr, frag, len, vk_prog);
const auto& binding_table = vk::get_current_renderer()->get_pipeline_binding_table();
std::stringstream builder;
builder <<
"#version 450\n"
"#extension GL_ARB_separate_shader_objects : enable\n\n";
::glsl::insert_subheader_block(builder);
comp.insertConstants(builder);
if (compiler_options & program_common::interpreter::COMPILER_OPT_ENABLE_ALPHA_TEST_GE)
{
builder << "#define ALPHA_TEST_GEQUAL\n";
}
if (compiler_options & program_common::interpreter::COMPILER_OPT_ENABLE_ALPHA_TEST_G)
{
builder << "#define ALPHA_TEST_GREATER\n";
}
if (compiler_options & program_common::interpreter::COMPILER_OPT_ENABLE_ALPHA_TEST_LE)
{
builder << "#define ALPHA_TEST_LEQUAL\n";
}
if (compiler_options & program_common::interpreter::COMPILER_OPT_ENABLE_ALPHA_TEST_L)
{
builder << "#define ALPHA_TEST_LESS\n";
}
if (compiler_options & program_common::interpreter::COMPILER_OPT_ENABLE_ALPHA_TEST_EQ)
{
builder << "#define ALPHA_TEST_EQUAL\n";
}
if (compiler_options & program_common::interpreter::COMPILER_OPT_ENABLE_ALPHA_TEST_NE)
{
builder << "#define ALPHA_TEST_NEQUAL\n";
}
if (!(compiler_options & program_common::interpreter::COMPILER_OPT_ENABLE_F32_EXPORT))
{
builder << "#define WITH_HALF_OUTPUT_REGISTER\n";
}
if (compiler_options & program_common::interpreter::COMPILER_OPT_ENABLE_DEPTH_EXPORT)
{
builder << "#define WITH_DEPTH_EXPORT\n";
}
if (compiler_options & program_common::interpreter::COMPILER_OPT_ENABLE_FLOW_CTRL)
{
builder << "#define WITH_FLOW_CTRL\n";
}
if (compiler_options & program_common::interpreter::COMPILER_OPT_ENABLE_PACKING)
{
builder << "#define WITH_PACKING\n";
}
if (compiler_options & program_common::interpreter::COMPILER_OPT_ENABLE_KIL)
{
builder << "#define WITH_KIL\n";
}
if (compiler_options & program_common::interpreter::COMPILER_OPT_ENABLE_STIPPLING)
{
builder << "#define WITH_STIPPLING\n";
}
const char* type_names[] = { "sampler1D", "sampler2D", "sampler3D", "samplerCube" };
if (compiler_options & program_common::interpreter::COMPILER_OPT_ENABLE_TEXTURES)
{
builder << "#define WITH_TEXTURES\n\n";
for (int i = 0, bind_location = m_fragment_textures_start; i < 4; ++i)
{
builder << "layout(set=0, binding=" << bind_location++ << ") " << "uniform " << type_names[i] << " " << type_names[i] << "_array[16];\n";
}
builder << "\n"
"#define IS_TEXTURE_RESIDENT(index) true\n"
"#define SAMPLER1D(index) sampler1D_array[index]\n"
"#define SAMPLER2D(index) sampler2D_array[index]\n"
"#define SAMPLER3D(index) sampler3D_array[index]\n"
"#define SAMPLERCUBE(index) samplerCube_array[index]\n\n";
}
builder <<
"layout(std430, binding=" << m_fragment_instruction_start << ") readonly restrict buffer FragmentInstructionBlock\n"
"{\n"
" uint shader_control;\n"
" uint texture_control;\n"
" uint reserved1;\n"
" uint reserved2;\n"
" uvec4 fp_instructions[];\n"
"};\n\n";
builder << program_common::interpreter::get_fragment_interpreter();
const std::string s = builder.str();
auto fs = new glsl::shader();
fs->create(::glsl::program_domain::glsl_fragment_program, s);
fs->compile();
// Prepare input table
vk::glsl::program_input in;
in.location = binding_table.fragment_constant_buffers_bind_slot;
in.domain = ::glsl::glsl_fragment_program;
in.name = "FragmentConstantsBuffer";
in.type = vk::glsl::input_type_uniform_buffer;
m_fs_inputs.push_back(in);
in.location = binding_table.fragment_state_bind_slot;
in.name = "FragmentStateBuffer";
m_fs_inputs.push_back(in);
in.location = binding_table.fragment_texture_params_bind_slot;
in.name = "TextureParametersBuffer";
m_fs_inputs.push_back(in);
for (int i = 0, location = m_fragment_textures_start; i < 4; ++i, ++location)
{
in.location = location;
in.name = std::string(type_names[i]) + "_array[16]";
m_fs_inputs.push_back(in);
}
m_fs_cache[compiler_options].reset(fs);
return fs;
}
std::pair<VkDescriptorSetLayout, VkPipelineLayout> shader_interpreter::create_layout(VkDevice dev)
{
const auto& binding_table = vk::get_current_renderer()->get_pipeline_binding_table();
rsx::simple_array<VkDescriptorSetLayoutBinding> bindings(binding_table.total_descriptor_bindings);
u32 idx = 0;
// Vertex stream, one stream for cacheable data, one stream for transient data. Third stream contains vertex layout info
for (int i = 0; i < 3; i++)
{
bindings[idx].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
bindings[idx].descriptorCount = 1;
bindings[idx].stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
bindings[idx].binding = binding_table.vertex_buffers_first_bind_slot + i;
bindings[idx].pImmutableSamplers = nullptr;
idx++;
}
bindings[idx].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
bindings[idx].descriptorCount = 1;
bindings[idx].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
bindings[idx].binding = binding_table.fragment_constant_buffers_bind_slot;
bindings[idx].pImmutableSamplers = nullptr;
idx++;
bindings[idx].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
bindings[idx].descriptorCount = 1;
bindings[idx].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
bindings[idx].binding = binding_table.fragment_state_bind_slot;
bindings[idx].pImmutableSamplers = nullptr;
idx++;
bindings[idx].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
bindings[idx].descriptorCount = 1;
bindings[idx].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
bindings[idx].binding = binding_table.fragment_texture_params_bind_slot;
bindings[idx].pImmutableSamplers = nullptr;
idx++;
bindings[idx].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
bindings[idx].descriptorCount = 1;
bindings[idx].stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
bindings[idx].binding = binding_table.vertex_constant_buffers_bind_slot;
bindings[idx].pImmutableSamplers = nullptr;
idx++;
bindings[idx].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
bindings[idx].descriptorCount = 1;
bindings[idx].stageFlags = VK_SHADER_STAGE_ALL_GRAPHICS;
bindings[idx].binding = binding_table.vertex_params_bind_slot;
bindings[idx].pImmutableSamplers = nullptr;
idx++;
bindings[idx].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
bindings[idx].descriptorCount = 1;
bindings[idx].stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
bindings[idx].binding = binding_table.conditional_render_predicate_slot;
bindings[idx].pImmutableSamplers = nullptr;
idx++;
bindings[idx].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
bindings[idx].descriptorCount = 1;
bindings[idx].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
bindings[idx].binding = binding_table.rasterizer_env_bind_slot;
bindings[idx].pImmutableSamplers = nullptr;
idx++;
bindings[idx].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
bindings[idx].descriptorCount = 16;
bindings[idx].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
bindings[idx].binding = binding_table.textures_first_bind_slot;
bindings[idx].pImmutableSamplers = nullptr;
m_fragment_textures_start = bindings[idx].binding;
idx++;
bindings[idx].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
bindings[idx].descriptorCount = 16;
bindings[idx].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
bindings[idx].binding = binding_table.textures_first_bind_slot + 1;
bindings[idx].pImmutableSamplers = nullptr;
idx++;
bindings[idx].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
bindings[idx].descriptorCount = 16;
bindings[idx].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
bindings[idx].binding = binding_table.textures_first_bind_slot + 2;
bindings[idx].pImmutableSamplers = nullptr;
idx++;
bindings[idx].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
bindings[idx].descriptorCount = 16;
bindings[idx].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
bindings[idx].binding = binding_table.textures_first_bind_slot + 3;
bindings[idx].pImmutableSamplers = nullptr;
idx++;
bindings[idx].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
bindings[idx].descriptorCount = 4;
bindings[idx].stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
bindings[idx].binding = binding_table.textures_first_bind_slot + 4;
bindings[idx].pImmutableSamplers = nullptr;
idx++;
bindings[idx].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
bindings[idx].descriptorCount = 1;
bindings[idx].stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
bindings[idx].binding = binding_table.textures_first_bind_slot + 5;
bindings[idx].pImmutableSamplers = nullptr;
m_vertex_instruction_start = bindings[idx].binding;
idx++;
bindings[idx].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
bindings[idx].descriptorCount = 1;
bindings[idx].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
bindings[idx].binding = binding_table.textures_first_bind_slot + 6;
bindings[idx].pImmutableSamplers = nullptr;
m_fragment_instruction_start = bindings[idx].binding;
idx++;
bindings.resize(idx);
std::array<VkPushConstantRange, 1> push_constants;
push_constants[0].offset = 0;
push_constants[0].size = 16;
push_constants[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
if (vk::emulate_conditional_rendering())
{
// Conditional render toggle
push_constants[0].size = 20;
}
const auto set_layout = vk::descriptors::create_layout(bindings);
VkPipelineLayoutCreateInfo layout_info = {};
layout_info.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
layout_info.setLayoutCount = 1;
layout_info.pSetLayouts = &set_layout;
layout_info.pushConstantRangeCount = 1;
layout_info.pPushConstantRanges = push_constants.data();
VkPipelineLayout result;
CHECK_RESULT(vkCreatePipelineLayout(dev, &layout_info, nullptr, &result));
return { set_layout, result };
}
void shader_interpreter::create_descriptor_pools(const vk::render_device& dev)
{
const auto max_draw_calls = dev.get_descriptor_max_draw_calls();
rsx::simple_array<VkDescriptorPoolSize> sizes =
{
{ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER , 6 },
{ VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER , 3 },
{ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER , 68 },
{ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 3 }
};
m_descriptor_pool.create(dev, sizes, max_draw_calls);
}
void shader_interpreter::init(const vk::render_device& dev)
{
m_device = dev;
std::tie(m_shared_descriptor_layout, m_shared_pipeline_layout) = create_layout(dev);
create_descriptor_pools(dev);
build_vs();
// TODO: Seed the cache
}
void shader_interpreter::destroy()
{
m_program_cache.clear();
m_descriptor_pool.destroy();
for (auto &fs : m_fs_cache)
{
fs.second->destroy();
}
m_vs.destroy();
m_fs_cache.clear();
if (m_shared_pipeline_layout)
{
vkDestroyPipelineLayout(m_device, m_shared_pipeline_layout, nullptr);
m_shared_pipeline_layout = VK_NULL_HANDLE;
}
if (m_shared_descriptor_layout)
{
vkDestroyDescriptorSetLayout(m_device, m_shared_descriptor_layout, nullptr);
m_shared_descriptor_layout = VK_NULL_HANDLE;
}
}
glsl::program* shader_interpreter::link(const vk::pipeline_props& properties, u64 compiler_opt)
{
glsl::shader* fs;
if (auto found = m_fs_cache.find(compiler_opt); found != m_fs_cache.end())
{
fs = found->second.get();
}
else
{
fs = build_fs(compiler_opt);
}
VkPipelineShaderStageCreateInfo shader_stages[2] = {};
shader_stages[0].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
shader_stages[0].stage = VK_SHADER_STAGE_VERTEX_BIT;
shader_stages[0].module = m_vs.get_handle();
shader_stages[0].pName = "main";
shader_stages[1].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
shader_stages[1].stage = VK_SHADER_STAGE_FRAGMENT_BIT;
shader_stages[1].module = fs->get_handle();
shader_stages[1].pName = "main";
std::vector<VkDynamicState> dynamic_state_descriptors;
dynamic_state_descriptors.push_back(VK_DYNAMIC_STATE_VIEWPORT);
dynamic_state_descriptors.push_back(VK_DYNAMIC_STATE_SCISSOR);
dynamic_state_descriptors.push_back(VK_DYNAMIC_STATE_LINE_WIDTH);
dynamic_state_descriptors.push_back(VK_DYNAMIC_STATE_BLEND_CONSTANTS);
dynamic_state_descriptors.push_back(VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK);
dynamic_state_descriptors.push_back(VK_DYNAMIC_STATE_STENCIL_WRITE_MASK);
dynamic_state_descriptors.push_back(VK_DYNAMIC_STATE_STENCIL_REFERENCE);
dynamic_state_descriptors.push_back(VK_DYNAMIC_STATE_DEPTH_BIAS);
if (vk::get_current_renderer()->get_depth_bounds_support())
{
dynamic_state_descriptors.push_back(VK_DYNAMIC_STATE_DEPTH_BOUNDS);
}
VkPipelineDynamicStateCreateInfo dynamic_state_info = {};
dynamic_state_info.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
dynamic_state_info.pDynamicStates = dynamic_state_descriptors.data();
dynamic_state_info.dynamicStateCount = ::size32(dynamic_state_descriptors);
VkPipelineVertexInputStateCreateInfo vi = { VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO };
VkPipelineViewportStateCreateInfo vp = {};
vp.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
vp.viewportCount = 1;
vp.scissorCount = 1;
VkPipelineMultisampleStateCreateInfo ms = properties.state.ms;
ensure(ms.rasterizationSamples == VkSampleCountFlagBits((properties.renderpass_key >> 16) & 0xF)); // "Multisample state mismatch!"
if (ms.rasterizationSamples != VK_SAMPLE_COUNT_1_BIT)
{
// Update the sample mask pointer
ms.pSampleMask = &properties.state.temp_storage.msaa_sample_mask;
}
// Rebase pointers from pipeline structure in case it is moved/copied
VkPipelineColorBlendStateCreateInfo cs = properties.state.cs;
cs.pAttachments = properties.state.att_state;
VkGraphicsPipelineCreateInfo info = {};
info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
info.pVertexInputState = &vi;
info.pInputAssemblyState = &properties.state.ia;
info.pRasterizationState = &properties.state.rs;
info.pColorBlendState = &cs;
info.pMultisampleState = &ms;
info.pViewportState = &vp;
info.pDepthStencilState = &properties.state.ds;
info.stageCount = 2;
info.pStages = shader_stages;
info.pDynamicState = &dynamic_state_info;
info.layout = m_shared_pipeline_layout;
info.basePipelineIndex = -1;
info.basePipelineHandle = VK_NULL_HANDLE;
info.renderPass = vk::get_renderpass(m_device, properties.renderpass_key);
auto compiler = vk::get_pipe_compiler();
auto program = compiler->compile(info, m_shared_pipeline_layout, vk::pipe_compiler::COMPILE_INLINE, {}, m_vs_inputs, m_fs_inputs);
return program.release();
}
void shader_interpreter::update_fragment_textures(const std::array<VkDescriptorImageInfo, 68>& sampled_images, vk::descriptor_set &set)
{
const VkDescriptorImageInfo* texture_ptr = sampled_images.data();
for (u32 i = 0, binding = m_fragment_textures_start; i < 4; ++i, ++binding, texture_ptr += 16)
{
set.push(texture_ptr, 16, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, binding);
}
}
VkDescriptorSet shader_interpreter::allocate_descriptor_set()
{
return m_descriptor_pool.allocate(m_shared_descriptor_layout);
}
glsl::program* shader_interpreter::get(const vk::pipeline_props& properties, const program_hash_util::fragment_program_utils::fragment_program_metadata& metadata)
{
pipeline_key key;
key.compiler_opt = 0;
key.properties = properties;
if (rsx::method_registers.alpha_test_enabled()) [[unlikely]]
{
switch (rsx::method_registers.alpha_func())
{
case rsx::comparison_function::always:
break;
case rsx::comparison_function::never:
return nullptr;
case rsx::comparison_function::greater_or_equal:
key.compiler_opt |= program_common::interpreter::COMPILER_OPT_ENABLE_ALPHA_TEST_GE;
break;
case rsx::comparison_function::greater:
key.compiler_opt |= program_common::interpreter::COMPILER_OPT_ENABLE_ALPHA_TEST_G;
break;
case rsx::comparison_function::less_or_equal:
key.compiler_opt |= program_common::interpreter::COMPILER_OPT_ENABLE_ALPHA_TEST_LE;
break;
case rsx::comparison_function::less:
key.compiler_opt |= program_common::interpreter::COMPILER_OPT_ENABLE_ALPHA_TEST_L;
break;
case rsx::comparison_function::equal:
key.compiler_opt |= program_common::interpreter::COMPILER_OPT_ENABLE_ALPHA_TEST_EQ;
break;
case rsx::comparison_function::not_equal:
key.compiler_opt |= program_common::interpreter::COMPILER_OPT_ENABLE_ALPHA_TEST_NE;
break;
}
}
if (rsx::method_registers.shader_control() & CELL_GCM_SHADER_CONTROL_DEPTH_EXPORT) key.compiler_opt |= program_common::interpreter::COMPILER_OPT_ENABLE_DEPTH_EXPORT;
if (rsx::method_registers.shader_control() & CELL_GCM_SHADER_CONTROL_32_BITS_EXPORTS) key.compiler_opt |= program_common::interpreter::COMPILER_OPT_ENABLE_F32_EXPORT;
if (rsx::method_registers.shader_control() & RSX_SHADER_CONTROL_USES_KIL) key.compiler_opt |= program_common::interpreter::COMPILER_OPT_ENABLE_KIL;
if (metadata.referenced_textures_mask) key.compiler_opt |= program_common::interpreter::COMPILER_OPT_ENABLE_TEXTURES;
if (metadata.has_branch_instructions) key.compiler_opt |= program_common::interpreter::COMPILER_OPT_ENABLE_FLOW_CTRL;
if (metadata.has_pack_instructions) key.compiler_opt |= program_common::interpreter::COMPILER_OPT_ENABLE_PACKING;
if (rsx::method_registers.polygon_stipple_enabled()) key.compiler_opt |= program_common::interpreter::COMPILER_OPT_ENABLE_STIPPLING;
if (m_current_key == key) [[likely]]
{
return m_current_interpreter;
}
else
{
m_current_key = key;
}
auto found = m_program_cache.find(key);
if (found != m_program_cache.end()) [[likely]]
{
m_current_interpreter = found->second.get();
return m_current_interpreter;
}
m_current_interpreter = link(properties, key.compiler_opt);
m_program_cache[key].reset(m_current_interpreter);
return m_current_interpreter;
}
bool shader_interpreter::is_interpreter(const glsl::program* prog) const
{
return prog == m_current_interpreter;
}
u32 shader_interpreter::get_vertex_instruction_location() const
{
return m_vertex_instruction_start;
}
u32 shader_interpreter::get_fragment_instruction_location() const
{
return m_fragment_instruction_start;
}
};
| 21,545
|
C++
|
.cpp
| 507
| 39.201183
| 168
| 0.741279
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,455
|
VKTextureCache.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKTextureCache.cpp
|
#include "stdafx.h"
#include "VKGSRender.h"
#include "VKTextureCache.h"
#include "VKCompute.h"
#include "util/asm.hpp"
namespace vk
{
u64 hash_image_properties(VkFormat format, u16 w, u16 h, u16 d, u16 mipmaps, VkImageType type, VkImageCreateFlags create_flags, VkSharingMode sharing_mode)
{
/**
* Key layout:
* 00-08: Format (Max 255)
* 08-24: Width (Max 64K)
* 24-40: Height (Max 64K)
* 40-48: Depth (Max 255)
* 48-54: Mipmaps (Max 63) <- We have some room here, it is not possible to have more than 12 mip levels on PS3 and 16 on PC is pushing it.
* 54-56: Type (Max 3)
* 56-57: Sharing (Max 1) <- Boolean. Exclusive = 0, shared = 1
* 57-64: Flags (Max 127) <- We have some room here, we only care about a small subset of create flags.
*/
ensure(static_cast<u32>(format) < 0xFF);
return (static_cast<u64>(format) & 0xFF) |
(static_cast<u64>(w) << 8) |
(static_cast<u64>(h) << 24) |
(static_cast<u64>(d) << 40) |
(static_cast<u64>(mipmaps) << 48) |
(static_cast<u64>(type) << 54) |
(static_cast<u64>(sharing_mode) << 56) |
(static_cast<u64>(create_flags) << 57);
}
texture_cache::cached_image_reference_t::cached_image_reference_t(texture_cache* parent, std::unique_ptr<vk::viewable_image>& previous)
{
ensure(previous);
this->parent = parent;
this->data = std::move(previous);
}
texture_cache::cached_image_reference_t::~cached_image_reference_t()
{
// Erase layout information to force TOP_OF_PIPE transition next time.
data->current_layout = VK_IMAGE_LAYOUT_UNDEFINED;
data->current_queue_family = VK_QUEUE_FAMILY_IGNORED;
// Move this object to the cached image pool
const auto key = hash_image_properties(data->format(), data->width(), data->height(), data->depth(), data->mipmaps(), data->info.imageType, data->info.flags, data->info.sharingMode);
std::lock_guard lock(parent->m_cached_pool_lock);
if (!parent->m_cache_is_exiting)
{
parent->m_cached_memory_size += data->memory->size();
parent->m_cached_images.emplace_front(key, data);
}
else
{
// Destroy if the cache is closed. The GPU is done with this resource anyway.
data.reset();
}
}
void cached_texture_section::dma_transfer(vk::command_buffer& cmd, vk::image* src, const areai& src_area, const utils::address_range& valid_range, u32 pitch)
{
ensure(src->samples() == 1);
if (!m_device)
{
m_device = &cmd.get_command_pool().get_owner();
}
if (dma_fence)
{
// NOTE: This can be reached if previously synchronized, or a special path happens.
// If a hard flush occurred while this surface was flush_always the cache would have reset its protection afterwards.
// DMA resource would still be present but already used to flush previously.
vk::get_resource_manager()->dispose(dma_fence);
}
if (vk::is_renderpass_open(cmd))
{
vk::end_renderpass(cmd);
}
src->push_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
const auto internal_bpp = vk::get_format_texel_width(src->format());
const auto transfer_width = static_cast<u32>(src_area.width());
const auto transfer_height = static_cast<u32>(src_area.height());
real_pitch = internal_bpp * transfer_width;
rsx_pitch = pitch;
const bool require_format_conversion = !!(src->aspect() & VK_IMAGE_ASPECT_STENCIL_BIT) || src->format() == VK_FORMAT_D32_SFLOAT;
const auto tiled_region = rsx::get_current_renderer()->get_tiled_memory_region(valid_range);
const bool require_tiling = !!tiled_region;
const bool require_gpu_transform = require_format_conversion || pack_unpack_swap_bytes || require_tiling;
auto dma_sync_region = valid_range;
dma_mapping_handle dma_mapping = { 0, nullptr };
auto dma_sync = [&dma_sync_region, &dma_mapping](bool load, bool force = false)
{
if (dma_mapping.second && !force)
{
return;
}
dma_mapping = vk::map_dma(dma_sync_region.start, dma_sync_region.length());
if (load)
{
vk::load_dma(dma_sync_region.start, dma_sync_region.length());
}
};
if (require_gpu_transform)
{
const auto transfer_pitch = real_pitch;
const auto task_length = transfer_pitch * src_area.height();
auto working_buffer_length = calculate_working_buffer_size(task_length, src->aspect());
#if !DEBUG_DMA_TILING
if (require_tiling)
{
// Safety padding
working_buffer_length += tiled_region.tile->size;
// Calculate actual working section for the memory op
dma_sync_region = tiled_region.tile_align(dma_sync_region);
}
#endif
auto working_buffer = vk::get_scratch_buffer(cmd, working_buffer_length);
u32 result_offset = 0;
VkBufferImageCopy region = {};
region.imageSubresource = { src->aspect(), 0, 0, 1 };
region.imageOffset = { src_area.x1, src_area.y1, 0 };
region.imageExtent = { transfer_width, transfer_height, 1 };
bool require_rw_barrier = true;
image_readback_options_t xfer_options{};
xfer_options.swap_bytes = require_format_conversion && pack_unpack_swap_bytes;
vk::copy_image_to_buffer(cmd, src, working_buffer, region, xfer_options);
// NOTE: For depth/stencil formats, copying to buffer and byteswap are combined into one step above
if (pack_unpack_swap_bytes && !require_format_conversion)
{
const auto texel_layout = vk::get_format_element_size(src->format());
const auto elem_size = texel_layout.first;
vk::cs_shuffle_base* shuffle_kernel;
if (elem_size == 2)
{
shuffle_kernel = vk::get_compute_task<vk::cs_shuffle_16>();
}
else if (elem_size == 4)
{
shuffle_kernel = vk::get_compute_task<vk::cs_shuffle_32>();
}
else
{
ensure(get_context() == rsx::texture_upload_context::dma);
shuffle_kernel = nullptr;
}
if (shuffle_kernel)
{
vk::insert_buffer_memory_barrier(cmd, working_buffer->value, 0, task_length,
VK_PIPELINE_STAGE_TRANSFER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT);
shuffle_kernel->run(cmd, working_buffer, task_length);
if (!require_tiling)
{
vk::insert_buffer_memory_barrier(cmd, working_buffer->value, 0, task_length,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT);
require_rw_barrier = false;
}
}
}
if (require_tiling)
{
#if !DEBUG_DMA_TILING
// Compute -> Compute barrier
vk::insert_buffer_memory_barrier(cmd, working_buffer->value, 0, task_length,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT);
// We don't need to calibrate write if two conditions are met:
// 1. The start offset of our 2D region is a multiple of 64 lines
// 2. We use the whole pitch.
// If these conditions are not met, we need to upload the entire tile (or at least the affected tiles wholly)
// FIXME: There is a 3rd condition - write onto already-persisted range. e.g One transfer copies half the image then the other half is copied later.
// We don't need to load again for the second copy in that scenario.
if (valid_range.start != dma_sync_region.start || real_pitch != tiled_region.tile->pitch)
{
// Tile indices run to the end of the row (full pitch).
// Tiles address outside their 64x64 area too, so we need to actually load the whole thing and "fill in" missing blocks.
// Visualizing "hot" pixels when doing a partial copy is very revealing, there's lots of data from the padding areas to be filled in.
dma_sync(true);
ensure(dma_mapping.second);
// Upload memory to the working buffer
const auto dst_offset = task_length; // Append to the end of the input
VkBufferCopy mem_load{};
mem_load.srcOffset = dma_mapping.first;
mem_load.dstOffset = dst_offset;
mem_load.size = dma_sync_region.length();
vkCmdCopyBuffer(cmd, dma_mapping.second->value, working_buffer->value, 1, &mem_load);
// Transfer -> Compute barrier
vk::insert_buffer_memory_barrier(cmd, working_buffer->value, dst_offset, dma_sync_region.length(),
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_SHADER_WRITE_BIT);
}
// Prepare payload
const RSX_detiler_config config =
{
.tile_base_address = tiled_region.base_address,
.tile_base_offset = valid_range.start - tiled_region.base_address,
.tile_rw_offset = dma_sync_region.start - tiled_region.base_address,
.tile_size = tiled_region.tile->size,
.tile_pitch = tiled_region.tile->pitch,
.bank = tiled_region.tile->bank,
.dst = working_buffer,
.dst_offset = task_length,
.src = working_buffer,
.src_offset = 0,
// TODO: Check interaction with anti-aliasing
.image_width = static_cast<u16>(transfer_width),
.image_height = static_cast<u16>(transfer_height),
.image_pitch = real_pitch,
.image_bpp = context == rsx::texture_upload_context::dma ? internal_bpp : rsx::get_format_block_size_in_bytes(gcm_format)
};
// Execute
const auto job = vk::get_compute_task<vk::cs_tile_memcpy<RSX_detiler_op::encode>>();
job->run(cmd, config);
// Update internal variables
result_offset = task_length;
real_pitch = tiled_region.tile->pitch; // We're always copying the full image. In case of partials we're "filling in" blocks, not doing partial 2D copies.
require_rw_barrier = true;
#if VISUALIZE_GPU_TILING
if (g_cfg.video.renderdoc_compatiblity)
{
vk::insert_buffer_memory_barrier(cmd, working_buffer->value, result_offset, working_buffer_length,
VK_PIPELINE_STAGE_TRANSFER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT);
// Debug write
auto scratch_img = vk::get_typeless_helper(VK_FORMAT_B8G8R8A8_UNORM, RSX_FORMAT_CLASS_COLOR, tiled_region.tile->pitch / 4, 768);
scratch_img->change_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
VkBufferImageCopy dbg_copy{};
dbg_copy.bufferOffset = config.dst_offset;
dbg_copy.imageExtent.width = width;
dbg_copy.imageExtent.height = height;
dbg_copy.imageExtent.depth = 1;
dbg_copy.bufferRowLength = tiled_region.tile->pitch / 4;
dbg_copy.imageSubresource = { .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .mipLevel = 0, .baseArrayLayer = 0, .layerCount = 1 };
vk::copy_buffer_to_image(cmd, working_buffer, scratch_img, dbg_copy);
scratch_img->change_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
}
#endif
#endif
}
if (require_rw_barrier)
{
vk::insert_buffer_memory_barrier(cmd, working_buffer->value, result_offset, dma_sync_region.length(),
VK_PIPELINE_STAGE_TRANSFER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT);
}
if (rsx_pitch == real_pitch) [[likely]]
{
dma_sync(false);
VkBufferCopy copy = {};
copy.srcOffset = result_offset;
copy.dstOffset = dma_mapping.first;
copy.size = dma_sync_region.length();
vkCmdCopyBuffer(cmd, working_buffer->value, dma_mapping.second->value, 1, ©);
}
else
{
dma_sync(true);
std::vector<VkBufferCopy> copy;
copy.reserve(transfer_height);
u32 dst_offset = dma_mapping.first;
u32 src_offset = result_offset;
for (unsigned row = 0; row < transfer_height; ++row)
{
copy.push_back({ src_offset, dst_offset, transfer_pitch });
src_offset += real_pitch;
dst_offset += rsx_pitch;
}
vkCmdCopyBuffer(cmd, working_buffer->value, dma_mapping.second->value, transfer_height, copy.data());
}
}
else
{
dma_sync(false);
VkBufferImageCopy region = {};
region.bufferRowLength = (rsx_pitch / internal_bpp);
region.imageSubresource = { src->aspect(), 0, 0, 1 };
region.imageOffset = { src_area.x1, src_area.y1, 0 };
region.imageExtent = { transfer_width, transfer_height, 1 };
region.bufferOffset = dma_mapping.first;
vkCmdCopyImageToBuffer(cmd, src->value, src->current_layout, dma_mapping.second->value, 1, ®ion);
}
src->pop_layout(cmd);
VkBufferMemoryBarrier2KHR mem_barrier =
{
.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2_KHR,
.srcStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT_KHR, // Finish all transfer...
.srcAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT_KHR,
.dstStageMask = VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR, // ...before proceeding with any command
.dstAccessMask = 0,
.buffer = dma_mapping.second->value,
.offset = dma_mapping.first,
.size = valid_range.length()
};
// Create event object for this transfer and queue signal op
dma_fence = std::make_unique<vk::event>(*m_device, sync_domain::host);
dma_fence->signal(cmd,
{
.sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
.bufferMemoryBarrierCount = 1,
.pBufferMemoryBarriers = &mem_barrier
});
// Set cb flag for queued dma operations
cmd.set_flag(vk::command_buffer::cb_has_dma_transfer);
if (get_context() == rsx::texture_upload_context::dma)
{
// Save readback hint in case transformation is required later
switch (internal_bpp)
{
case 2:
gcm_format = CELL_GCM_TEXTURE_R5G6B5;
break;
case 4:
default:
gcm_format = CELL_GCM_TEXTURE_A8R8G8B8;
break;
}
}
synchronized = true;
sync_timestamp = rsx::get_shared_tag();
}
void texture_cache::on_section_destroyed(cached_texture_section& tex)
{
if (tex.is_managed() && tex.exists())
{
auto disposable = vk::disposable_t::make(new cached_image_reference_t(this, tex.get_texture()));
vk::get_resource_manager()->dispose(disposable);
}
}
void texture_cache::clear()
{
{
std::lock_guard lock(m_cached_pool_lock);
m_cache_is_exiting = true;
}
baseclass::clear();
m_cached_images.clear();
m_cached_memory_size = 0;
}
void texture_cache::copy_transfer_regions_impl(vk::command_buffer& cmd, vk::image* dst, const std::vector<copy_region_descriptor>& sections_to_transfer) const
{
const auto dst_aspect = dst->aspect();
const auto dst_bpp = vk::get_format_texel_width(dst->format());
for (const auto& section : sections_to_transfer)
{
if (!section.src)
{
continue;
}
// Generates a region to write data to the final destination
const auto get_output_region = [&](s32 in_x, s32 in_y, u32 w, u32 h, vk::image* data_src)
{
VkImageCopy copy_rgn = {
.srcSubresource = { data_src->aspect(), 0, 0, 1},
.srcOffset = { in_x, in_y, 0 },
.dstSubresource = { dst_aspect, section.level, 0, 1 },
.dstOffset = { section.dst_x, section.dst_y, 0 },
.extent = { w, h, 1 }
};
if (dst->info.imageType == VK_IMAGE_TYPE_3D)
{
copy_rgn.dstOffset.z = section.dst_z;
}
else
{
copy_rgn.dstSubresource.baseArrayLayer = section.dst_z;
}
return copy_rgn;
};
const bool typeless = section.src->aspect() != dst_aspect ||
!formats_are_bitcast_compatible(dst, section.src);
// Avoid inserting unnecessary barrier GENERAL->TRANSFER_SRC->GENERAL in active render targets
const auto preferred_layout = (section.src->current_layout != VK_IMAGE_LAYOUT_GENERAL) ?
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL : VK_IMAGE_LAYOUT_GENERAL;
section.src->push_layout(cmd, preferred_layout);
auto src_image = section.src;
auto src_x = section.src_x;
auto src_y = section.src_y;
auto src_w = section.src_w;
auto src_h = section.src_h;
rsx::flags32_t transform = section.xform;
if (section.xform == rsx::surface_transform::coordinate_transform)
{
// Dimensions were given in 'dst' space. Work out the real source coordinates
const auto src_bpp = vk::get_format_texel_width(section.src->format());
src_x = (src_x * dst_bpp) / src_bpp;
src_w = utils::aligned_div<u16>(src_w * dst_bpp, src_bpp);
transform &= ~(rsx::surface_transform::coordinate_transform);
}
if (auto surface = dynamic_cast<vk::render_target*>(section.src))
{
surface->transform_samples_to_pixels(src_x, src_w, src_y, src_h);
}
if (typeless) [[unlikely]]
{
const auto src_bpp = vk::get_format_texel_width(section.src->format());
const u16 convert_w = u16(src_w * src_bpp) / dst_bpp;
const u16 convert_x = u16(src_x * src_bpp) / dst_bpp;
if (convert_w == section.dst_w && src_h == section.dst_h &&
transform == rsx::surface_transform::identity &&
section.level == 0 && section.dst_z == 0)
{
// Optimization to avoid double transfer
// TODO: Handle level and layer offsets
const areai src_rect = coordi{{ src_x, src_y }, { src_w, src_h }};
const areai dst_rect = coordi{{ section.dst_x, section.dst_y }, { section.dst_w, section.dst_h }};
vk::copy_image_typeless(cmd, section.src, dst, src_rect, dst_rect, 1);
section.src->pop_layout(cmd);
continue;
}
src_image = vk::get_typeless_helper(dst->format(), dst->format_class(), convert_x + convert_w, src_y + src_h);
src_image->change_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
const areai src_rect = coordi{{ src_x, src_y }, { src_w, src_h }};
const areai dst_rect = coordi{{ 0, 0 }, { convert_w, src_h }};
vk::copy_image_typeless(cmd, section.src, src_image, src_rect, dst_rect, 1);
src_image->change_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
src_x = 0;
src_y = 0;
src_w = convert_w;
}
ensure(src_image->current_layout == VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL || src_image->current_layout == VK_IMAGE_LAYOUT_GENERAL);
ensure(transform == rsx::surface_transform::identity);
if (src_w == section.dst_w && src_h == section.dst_h) [[likely]]
{
const auto copy_rgn = get_output_region(src_x, src_y, src_w, src_h, src_image);
vkCmdCopyImage(cmd, src_image->value, src_image->current_layout, dst->value, dst->current_layout, 1, ©_rgn);
}
else
{
u16 dst_x = section.dst_x, dst_y = section.dst_y;
vk::image* _dst = dst;
if (src_image->info.format != dst->info.format || section.level != 0 || section.dst_z != 0) [[ unlikely ]]
{
// Either a bitcast is required or a scale+copy to mipmap level / layer
const u32 requested_width = dst->width();
const u32 requested_height = src_y + src_h + section.dst_h; // Accounts for possible typeless ref on the same helper on src
_dst = vk::get_typeless_helper(src_image->format(), src_image->format_class(), requested_width, requested_height);
_dst->change_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
}
if (_dst != dst)
{
// We place the output after the source to account for the initial typeless-xfer if applicable
// If src_image == _dst then this is just a write-to-self. Either way, use best-fit placement.
dst_x = 0;
dst_y = src_y + src_h;
}
vk::copy_scaled_image(cmd, src_image, _dst,
coordi{ { src_x, src_y }, { src_w, src_h } },
coordi{ { dst_x, dst_y }, { section.dst_w, section.dst_h } },
1, src_image->format() == _dst->format(),
VK_FILTER_NEAREST);
if (_dst != dst) [[unlikely]]
{
// Casting comes after the scaling!
const auto copy_rgn = get_output_region(dst_x, dst_y, section.dst_w, section.dst_h, _dst);
_dst->change_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
vkCmdCopyImage(cmd, _dst->value, _dst->current_layout, dst->value, dst->current_layout, 1, ©_rgn);
}
}
section.src->pop_layout(cmd);
}
}
VkComponentMapping texture_cache::apply_component_mapping_flags(u32 gcm_format, rsx::component_order flags, const rsx::texture_channel_remap_t& remap_vector) const
{
switch (gcm_format)
{
case CELL_GCM_TEXTURE_DEPTH24_D8:
case CELL_GCM_TEXTURE_DEPTH24_D8_FLOAT:
case CELL_GCM_TEXTURE_DEPTH16:
case CELL_GCM_TEXTURE_DEPTH16_FLOAT:
// Dont bother letting this propagate
return{ VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_R };
default:
break;
}
VkComponentMapping mapping = {};
switch (flags)
{
case rsx::component_order::default_:
{
mapping = vk::apply_swizzle_remap(vk::get_component_mapping(gcm_format), remap_vector);
break;
}
case rsx::component_order::native:
{
mapping = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A };
break;
}
case rsx::component_order::swapped_native:
{
mapping = { VK_COMPONENT_SWIZZLE_A, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B };
break;
}
default:
break;
}
return mapping;
}
vk::image* texture_cache::get_template_from_collection_impl(const std::vector<copy_region_descriptor>& sections_to_transfer) const
{
if (sections_to_transfer.size() == 1) [[likely]]
{
return sections_to_transfer.front().src;
}
vk::image* result = nullptr;
for (const auto& section : sections_to_transfer)
{
if (!section.src)
continue;
if (!result)
{
result = section.src;
}
else
{
if (section.src->native_component_map.a != result->native_component_map.a ||
section.src->native_component_map.r != result->native_component_map.r ||
section.src->native_component_map.g != result->native_component_map.g ||
section.src->native_component_map.b != result->native_component_map.b)
{
// TODO
// This requires a far more complex setup as its not always possible to mix and match without compute assistance
return nullptr;
}
}
}
return result;
}
std::unique_ptr<vk::viewable_image> texture_cache::find_cached_image(VkFormat format, u16 w, u16 h, u16 d, u16 mipmaps, VkImageType type, VkImageCreateFlags create_flags, VkImageUsageFlags usage, VkSharingMode sharing)
{
reader_lock lock(m_cached_pool_lock);
if (!m_cached_images.empty())
{
const u64 desired_key = hash_image_properties(format, w, h, d, mipmaps, type, create_flags, sharing);
lock.upgrade();
for (auto it = m_cached_images.begin(); it != m_cached_images.end(); ++it)
{
if (it->key == desired_key && (it->data->info.usage & usage) == usage)
{
auto ret = std::move(it->data);
m_cached_images.erase(it);
m_cached_memory_size -= ret->memory->size();
return ret;
}
}
}
return {};
}
std::unique_ptr<vk::viewable_image> texture_cache::create_temporary_subresource_storage(
rsx::format_class format_class, VkFormat format,
u16 width, u16 height, u16 depth, u16 layers, u8 mips,
VkImageType image_type, VkFlags image_flags, VkFlags usage_flags)
{
auto image = find_cached_image(format, width, height, depth, mips, image_type, image_flags, usage_flags, VK_SHARING_MODE_EXCLUSIVE);
if (!image)
{
image = std::make_unique<vk::viewable_image>(*vk::get_current_renderer(), m_memory_types.device_local, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
image_type,
format,
width, height, depth, mips, layers, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, image_flags | VK_IMAGE_CREATE_ALLOW_NULL_RPCS3,
VMM_ALLOCATION_POOL_TEXTURE_CACHE, format_class);
if (!image->value)
{
// OOM, bail
return nullptr;
}
}
return image;
}
void texture_cache::dispose_reusable_image(std::unique_ptr<vk::viewable_image>& image)
{
auto disposable = vk::disposable_t::make(new cached_image_reference_t(this, image));
vk::get_resource_manager()->dispose(disposable);
}
vk::image_view* texture_cache::create_temporary_subresource_view_impl(vk::command_buffer& cmd, vk::image* source, VkImageType image_type, VkImageViewType view_type,
u32 gcm_format, u16 x, u16 y, u16 w, u16 h, u16 d, u8 mips, const rsx::texture_channel_remap_t& remap_vector, bool copy)
{
const VkImageCreateFlags image_flags = (view_type == VK_IMAGE_VIEW_TYPE_CUBE) ? VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : 0;
const VkImageUsageFlags usage_flags = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
const VkFormat dst_format = vk::get_compatible_sampler_format(m_formats_support, gcm_format);
const u16 layers = (view_type == VK_IMAGE_VIEW_TYPE_CUBE) ? 6 : 1;
// Provision
auto image = create_temporary_subresource_storage(rsx::classify_format(gcm_format), dst_format, w, h, d, layers, mips, image_type, image_flags, usage_flags);
// OOM?
if (!image)
{
return nullptr;
}
// This method is almost exclusively used to work on framebuffer resources
// Keep the original swizzle layout unless there is data format conversion
VkComponentMapping view_swizzle;
if (!source || dst_format != source->info.format)
{
// This is a data cast operation
// Use native mapping for the new type
// TODO: Also simulate the readback+reupload step (very tricky)
const auto remap = get_component_mapping(gcm_format);
view_swizzle = { remap[1], remap[2], remap[3], remap[0] };
}
else
{
view_swizzle = source->native_component_map;
}
image->set_debug_name("Temp view");
image->set_native_component_layout(view_swizzle);
auto view = image->get_view(remap_vector);
if (copy)
{
std::vector<copy_region_descriptor> region =
{ {
.src = source,
.xform = rsx::surface_transform::coordinate_transform,
.src_x = x,
.src_y = y,
.src_w = w,
.src_h = h,
.dst_w = w,
.dst_h = h
} };
vk::change_image_layout(cmd, image.get(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
copy_transfer_regions_impl(cmd, image.get(), region);
vk::change_image_layout(cmd, image.get(), VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
}
// TODO: Floating reference. We can do better with some restructuring.
image.release();
return view;
}
vk::image_view* texture_cache::create_temporary_subresource_view(vk::command_buffer& cmd, vk::image* source, u32 gcm_format,
u16 x, u16 y, u16 w, u16 h, const rsx::texture_channel_remap_t& remap_vector)
{
return create_temporary_subresource_view_impl(cmd, source, source->info.imageType, VK_IMAGE_VIEW_TYPE_2D,
gcm_format, x, y, w, h, 1, 1, remap_vector, true);
}
vk::image_view* texture_cache::create_temporary_subresource_view(vk::command_buffer& cmd, vk::image** source, u32 gcm_format,
u16 x, u16 y, u16 w, u16 h, const rsx::texture_channel_remap_t& remap_vector)
{
return create_temporary_subresource_view(cmd, *source, gcm_format, x, y, w, h, remap_vector);
}
vk::image_view* texture_cache::generate_cubemap_from_images(vk::command_buffer& cmd, u32 gcm_format, u16 size,
const std::vector<copy_region_descriptor>& sections_to_copy, const rsx::texture_channel_remap_t& remap_vector)
{
auto _template = get_template_from_collection_impl(sections_to_copy);
auto result = create_temporary_subresource_view_impl(cmd, _template, VK_IMAGE_TYPE_2D,
VK_IMAGE_VIEW_TYPE_CUBE, gcm_format, 0, 0, size, size, 1, 1, remap_vector, false);
if (!result)
{
// Failed to create temporary object, bail
return nullptr;
}
const auto image = result->image();
VkImageAspectFlags dst_aspect = vk::get_aspect_flags(result->info.format);
VkImageSubresourceRange dst_range = { dst_aspect, 0, 1, 0, 6 };
vk::change_image_layout(cmd, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, dst_range);
if (!(dst_aspect & VK_IMAGE_ASPECT_DEPTH_BIT))
{
VkClearColorValue clear = {};
vkCmdClearColorImage(cmd, image->value, image->current_layout, &clear, 1, &dst_range);
}
else
{
VkClearDepthStencilValue clear = { 1.f, 0 };
vkCmdClearDepthStencilImage(cmd, image->value, image->current_layout, &clear, 1, &dst_range);
}
copy_transfer_regions_impl(cmd, image, sections_to_copy);
vk::change_image_layout(cmd, image, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, dst_range);
return result;
}
vk::image_view* texture_cache::generate_3d_from_2d_images(vk::command_buffer& cmd, u32 gcm_format, u16 width, u16 height, u16 depth,
const std::vector<copy_region_descriptor>& sections_to_copy, const rsx::texture_channel_remap_t& remap_vector)
{
auto _template = get_template_from_collection_impl(sections_to_copy);
auto result = create_temporary_subresource_view_impl(cmd, _template, VK_IMAGE_TYPE_3D,
VK_IMAGE_VIEW_TYPE_3D, gcm_format, 0, 0, width, height, depth, 1, remap_vector, false);
if (!result)
{
// Failed to create temporary object, bail
return nullptr;
}
const auto image = result->image();
VkImageAspectFlags dst_aspect = vk::get_aspect_flags(result->info.format);
VkImageSubresourceRange dst_range = { dst_aspect, 0, 1, 0, 1 };
vk::change_image_layout(cmd, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, dst_range);
if (!(dst_aspect & VK_IMAGE_ASPECT_DEPTH_BIT))
{
VkClearColorValue clear = {};
vkCmdClearColorImage(cmd, image->value, image->current_layout, &clear, 1, &dst_range);
}
else
{
VkClearDepthStencilValue clear = { 1.f, 0 };
vkCmdClearDepthStencilImage(cmd, image->value, image->current_layout, &clear, 1, &dst_range);
}
copy_transfer_regions_impl(cmd, image, sections_to_copy);
vk::change_image_layout(cmd, image, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, dst_range);
return result;
}
vk::image_view* texture_cache::generate_atlas_from_images(vk::command_buffer& cmd, u32 gcm_format, u16 width, u16 height,
const std::vector<copy_region_descriptor>& sections_to_copy, const rsx::texture_channel_remap_t& remap_vector)
{
auto _template = get_template_from_collection_impl(sections_to_copy);
auto result = create_temporary_subresource_view_impl(cmd, _template, VK_IMAGE_TYPE_2D,
VK_IMAGE_VIEW_TYPE_2D, gcm_format, 0, 0, width, height, 1, 1, remap_vector, false);
if (!result)
{
// Failed to create temporary object, bail
return nullptr;
}
const auto image = result->image();
VkImageAspectFlags dst_aspect = vk::get_aspect_flags(result->info.format);
VkImageSubresourceRange dst_range = { dst_aspect, 0, 1, 0, 1 };
vk::change_image_layout(cmd, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, dst_range);
if (sections_to_copy[0].dst_w != width || sections_to_copy[0].dst_h != height)
{
if (!(dst_aspect & VK_IMAGE_ASPECT_DEPTH_BIT))
{
VkClearColorValue clear = {};
vkCmdClearColorImage(cmd, image->value, image->current_layout, &clear, 1, &dst_range);
}
else
{
VkClearDepthStencilValue clear = { 1.f, 0 };
vkCmdClearDepthStencilImage(cmd, image->value, image->current_layout, &clear, 1, &dst_range);
}
}
copy_transfer_regions_impl(cmd, image, sections_to_copy);
vk::change_image_layout(cmd, image, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, dst_range);
return result;
}
vk::image_view* texture_cache::generate_2d_mipmaps_from_images(vk::command_buffer& cmd, u32 gcm_format, u16 width, u16 height,
const std::vector<copy_region_descriptor>& sections_to_copy, const rsx::texture_channel_remap_t& remap_vector)
{
const auto mipmaps = ::narrow<u8>(sections_to_copy.size());
auto _template = get_template_from_collection_impl(sections_to_copy);
auto result = create_temporary_subresource_view_impl(cmd, _template, VK_IMAGE_TYPE_2D,
VK_IMAGE_VIEW_TYPE_2D, gcm_format, 0, 0, width, height, 1, mipmaps, remap_vector, false);
if (!result)
{
// Failed to create temporary object, bail
return nullptr;
}
const auto image = result->image();
VkImageAspectFlags dst_aspect = vk::get_aspect_flags(result->info.format);
VkImageSubresourceRange dst_range = { dst_aspect, 0, mipmaps, 0, 1 };
vk::change_image_layout(cmd, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, dst_range);
if (!(dst_aspect & VK_IMAGE_ASPECT_DEPTH_BIT))
{
VkClearColorValue clear = {};
vkCmdClearColorImage(cmd, image->value, image->current_layout, &clear, 1, &dst_range);
}
else
{
VkClearDepthStencilValue clear = { 1.f, 0 };
vkCmdClearDepthStencilImage(cmd, image->value, image->current_layout, &clear, 1, &dst_range);
}
copy_transfer_regions_impl(cmd, image, sections_to_copy);
vk::change_image_layout(cmd, image, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, dst_range);
return result;
}
void texture_cache::release_temporary_subresource(vk::image_view* view)
{
auto resource = dynamic_cast<vk::viewable_image*>(view->image());
ensure(resource);
auto image = std::unique_ptr<vk::viewable_image>(resource);
auto disposable = vk::disposable_t::make(new cached_image_reference_t(this, image));
vk::get_resource_manager()->dispose(disposable);
}
void texture_cache::update_image_contents(vk::command_buffer& cmd, vk::image_view* dst_view, vk::image* src, u16 width, u16 height)
{
std::vector<copy_region_descriptor> region =
{ {
.src = src,
.xform = rsx::surface_transform::identity,
.src_w = width,
.src_h = height,
.dst_w = width,
.dst_h = height
} };
auto dst = dst_view->image();
dst->push_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
copy_transfer_regions_impl(cmd, dst, region);
dst->pop_layout(cmd);
}
cached_texture_section* texture_cache::create_new_texture(vk::command_buffer& cmd, const utils::address_range& rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch,
u32 gcm_format, rsx::texture_upload_context context, rsx::texture_dimension_extended type, bool swizzled, rsx::component_order swizzle_flags, rsx::flags32_t flags)
{
const auto section_depth = depth;
// Define desirable attributes based on type
VkImageType image_type;
VkImageUsageFlags usage_flags = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
u8 layer = 0;
switch (type)
{
case rsx::texture_dimension_extended::texture_dimension_1d:
image_type = VK_IMAGE_TYPE_1D;
height = 1;
depth = 1;
layer = 1;
break;
case rsx::texture_dimension_extended::texture_dimension_2d:
image_type = VK_IMAGE_TYPE_2D;
depth = 1;
layer = 1;
break;
case rsx::texture_dimension_extended::texture_dimension_cubemap:
image_type = VK_IMAGE_TYPE_2D;
depth = 1;
layer = 6;
break;
case rsx::texture_dimension_extended::texture_dimension_3d:
image_type = VK_IMAGE_TYPE_3D;
layer = 1;
break;
default:
fmt::throw_exception("Unreachable");
}
// Check what actually exists at that address
const rsx::image_section_attributes_t search_desc = { .gcm_format = gcm_format, .width = width, .height = height, .depth = section_depth, .mipmaps = mipmaps };
const bool allow_dirty = (context != rsx::texture_upload_context::framebuffer_storage);
cached_texture_section& region = *find_cached_texture(rsx_range, search_desc, true, true, allow_dirty);
ensure(!region.is_locked());
vk::viewable_image* image = nullptr;
if (region.exists())
{
image = dynamic_cast<vk::viewable_image*>(region.get_raw_texture());
bool reusable = true;
if (flags & texture_create_flags::do_not_reuse)
{
reusable = false;
}
else if (flags & texture_create_flags::shareable)
{
reusable = (image && image->sharing_mode() == VK_SHARING_MODE_CONCURRENT);
}
if (!reusable || !image || region.get_image_type() != type || image->depth() != depth) // TODO
{
// Incompatible view/type
region.destroy();
image = nullptr;
}
else
{
ensure(region.is_managed());
// Reuse
region.set_rsx_pitch(pitch);
if (flags & texture_create_flags::initialize_image_contents)
{
// Wipe memory
image->change_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
VkImageSubresourceRange range{ image->aspect(), 0, image->mipmaps(), 0, image->layers() };
if (image->aspect() & VK_IMAGE_ASPECT_COLOR_BIT)
{
VkClearColorValue color = { {0.f, 0.f, 0.f, 1.f} };
vkCmdClearColorImage(cmd, image->value, image->current_layout, &color, 1, &range);
}
else
{
VkClearDepthStencilValue clear{ 1.f, 255 };
vkCmdClearDepthStencilImage(cmd, image->value, image->current_layout, &clear, 1, &range);
}
}
}
}
if (!image)
{
const bool is_cubemap = type == rsx::texture_dimension_extended::texture_dimension_cubemap;
const VkFormat vk_format = get_compatible_sampler_format(m_formats_support, gcm_format);
VkImageCreateFlags create_flags = is_cubemap ? VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : 0;
VkSharingMode sharing_mode = (flags & texture_create_flags::shareable) ? VK_SHARING_MODE_CONCURRENT : VK_SHARING_MODE_EXCLUSIVE;
if (auto found = find_cached_image(vk_format, width, height, depth, mipmaps, image_type, create_flags, usage_flags, sharing_mode))
{
image = found.release();
}
else
{
if (sharing_mode == VK_SHARING_MODE_CONCURRENT)
{
create_flags |= VK_IMAGE_CREATE_SHAREABLE_RPCS3;
}
image = new vk::viewable_image(*m_device,
m_memory_types.device_local, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
image_type, vk_format,
width, height, depth, mipmaps, layer, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_TILING_OPTIMAL, usage_flags, create_flags,
VMM_ALLOCATION_POOL_TEXTURE_CACHE, rsx::classify_format(gcm_format));
}
// New section, we must prepare it
region.reset(rsx_range);
region.set_gcm_format(gcm_format);
region.set_image_type(type);
region.create(width, height, section_depth, mipmaps, image, pitch, true, gcm_format);
}
region.set_view_flags(swizzle_flags);
region.set_context(context);
region.set_swizzled(swizzled);
region.set_dirty(false);
image->native_component_map = apply_component_mapping_flags(gcm_format, swizzle_flags, rsx::default_remap_vector);
// Its not necessary to lock blit dst textures as they are just reused as necessary
switch (context)
{
case rsx::texture_upload_context::shader_read:
case rsx::texture_upload_context::blit_engine_src:
region.protect(utils::protection::ro);
read_only_range = region.get_min_max(read_only_range, rsx::section_bounds::locked_range);
break;
case rsx::texture_upload_context::blit_engine_dst:
region.set_unpack_swap_bytes(true);
no_access_range = region.get_min_max(no_access_range, rsx::section_bounds::locked_range);
break;
case rsx::texture_upload_context::dma:
case rsx::texture_upload_context::framebuffer_storage:
// Should not be initialized with this method
default:
fmt::throw_exception("Unexpected upload context 0x%x", u32(context));
}
update_cache_tag();
return ®ion;
}
cached_texture_section* texture_cache::create_nul_section(
vk::command_buffer& /*cmd*/,
const utils::address_range& rsx_range,
const rsx::image_section_attributes_t& attrs,
const rsx::GCM_tile_reference& tile,
bool memory_load)
{
auto& region = *find_cached_texture(rsx_range, { .gcm_format = RSX_GCM_FORMAT_IGNORED }, true, false, false);
ensure(!region.is_locked());
// Prepare section
region.reset(rsx_range);
region.create_dma_only(attrs.width, attrs.height, attrs.pitch);
region.set_dirty(false);
region.set_unpack_swap_bytes(true);
if (memory_load && !tile) // Memory load on DMA tiles will always happen during the actual copy command
{
vk::map_dma(rsx_range.start, rsx_range.length());
vk::load_dma(rsx_range.start, rsx_range.length());
}
no_access_range = region.get_min_max(no_access_range, rsx::section_bounds::locked_range);
update_cache_tag();
return ®ion;
}
cached_texture_section* texture_cache::upload_image_from_cpu(vk::command_buffer& cmd, const utils::address_range& rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch, u32 gcm_format,
rsx::texture_upload_context context, const std::vector<rsx::subresource_layout>& subresource_layout, rsx::texture_dimension_extended type, bool swizzled)
{
if (context != rsx::texture_upload_context::shader_read)
{
if (vk::is_renderpass_open(cmd))
{
vk::end_renderpass(cmd);
}
}
const bool upload_async = rsx::get_current_renderer()->get_backend_config().supports_asynchronous_compute;
rsx::flags32_t create_flags = 0;
if (upload_async && g_fxo->get<AsyncTaskScheduler>().is_host_mode())
{
create_flags |= texture_create_flags::do_not_reuse;
if (m_device->get_graphics_queue() != m_device->get_transfer_queue())
{
create_flags |= texture_create_flags::shareable;
}
}
auto section = create_new_texture(cmd, rsx_range, width, height, depth, mipmaps, pitch, gcm_format, context, type, swizzled,
rsx::component_order::default_, create_flags);
auto image = section->get_raw_texture();
image->set_debug_name(fmt::format("Raw Texture @0x%x", rsx_range.start));
vk::enter_uninterruptible();
bool input_swizzled = swizzled;
if (context == rsx::texture_upload_context::blit_engine_src)
{
// Swizzling is ignored for blit engine copy and emulated using remapping
input_swizzled = false;
}
rsx::flags32_t upload_command_flags = initialize_image_layout | upload_contents_inline;
if (context == rsx::texture_upload_context::shader_read && upload_async)
{
upload_command_flags |= upload_contents_async;
}
std::vector<rsx::subresource_layout> tmp;
auto p_subresource_layout = &subresource_layout;
u32 heap_align = upload_heap_align_default;
if (auto tiled_region = rsx::get_current_renderer()->get_tiled_memory_region(rsx_range);
context == rsx::texture_upload_context::blit_engine_src && tiled_region)
{
if (mipmaps > 1)
{
// This really shouldn't happen on framebuffer tiled memory
rsx_log.error("Tiled decode of mipmapped textures is not supported.");
}
else
{
const auto bpp = rsx::get_format_block_size_in_bytes(gcm_format);
const auto [scratch_buf, linear_data_scratch_offset] = vk::detile_memory_block(cmd, tiled_region, rsx_range, width, height, bpp);
auto subres = subresource_layout.front();
// FIXME: !!EVIL!!
subres.data = { scratch_buf, linear_data_scratch_offset };
subres.pitch_in_block = width;
upload_command_flags |= source_is_gpu_resident;
heap_align = width * bpp;
tmp.push_back(subres);
p_subresource_layout = &tmp;
}
}
const u16 layer_count = (type == rsx::texture_dimension_extended::texture_dimension_cubemap) ? 6 : 1;
vk::upload_image(cmd, image, *p_subresource_layout, gcm_format, input_swizzled, layer_count, image->aspect(),
*m_texture_upload_heap, heap_align, upload_command_flags);
vk::leave_uninterruptible();
if (context != rsx::texture_upload_context::shader_read)
{
// Insert appropriate barrier depending on use. Shader read resources should be lazy-initialized before consuming.
// TODO: All texture resources should be initialized on use, this is wasteful
VkImageLayout preferred_layout;
switch (context)
{
default:
preferred_layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
break;
case rsx::texture_upload_context::blit_engine_dst:
preferred_layout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
break;
case rsx::texture_upload_context::blit_engine_src:
preferred_layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
break;
}
if (preferred_layout != image->current_layout)
{
image->change_layout(cmd, preferred_layout);
}
else
{
// Insert ordering barrier
ensure(preferred_layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
insert_image_memory_barrier(cmd, image->value, image->current_layout, preferred_layout,
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_WRITE_BIT,
{ image->aspect(), 0, image->mipmaps(), 0, image->layers() });
}
}
section->last_write_tag = rsx::get_shared_tag();
return section;
}
void texture_cache::set_component_order(cached_texture_section& section, u32 gcm_format, rsx::component_order expected_flags)
{
if (expected_flags == section.get_view_flags())
return;
const VkComponentMapping mapping = apply_component_mapping_flags(gcm_format, expected_flags, rsx::default_remap_vector);
auto image = static_cast<vk::viewable_image*>(section.get_raw_texture());
ensure(image);
image->set_native_component_layout(mapping);
section.set_view_flags(expected_flags);
}
void texture_cache::insert_texture_barrier(vk::command_buffer& cmd, vk::image* tex, bool strong_ordering)
{
if (!strong_ordering && tex->current_layout == VK_IMAGE_LAYOUT_GENERAL)
{
// A previous barrier already exists, do nothing
return;
}
vk::as_rtt(tex)->texture_barrier(cmd);
}
bool texture_cache::render_target_format_is_compatible(vk::image* tex, u32 gcm_format)
{
auto vk_format = tex->info.format;
switch (gcm_format)
{
default:
//TODO
err_once("Format incompatibility detected, reporting failure to force data copy (VK_FORMAT=0x%X, GCM_FORMAT=0x%X)", static_cast<u32>(vk_format), gcm_format);
return false;
#ifndef __APPLE__
case CELL_GCM_TEXTURE_R5G6B5:
return (vk_format == VK_FORMAT_R5G6B5_UNORM_PACK16);
#else
// R5G6B5 is not supported by Metal
case CELL_GCM_TEXTURE_R5G6B5:
return (vk_format == VK_FORMAT_B8G8R8A8_UNORM);
#endif
case CELL_GCM_TEXTURE_W16_Z16_Y16_X16_FLOAT:
return (vk_format == VK_FORMAT_R16G16B16A16_SFLOAT);
case CELL_GCM_TEXTURE_W32_Z32_Y32_X32_FLOAT:
return (vk_format == VK_FORMAT_R32G32B32A32_SFLOAT);
case CELL_GCM_TEXTURE_X32_FLOAT:
return (vk_format == VK_FORMAT_R32_SFLOAT);
case CELL_GCM_TEXTURE_A8R8G8B8:
case CELL_GCM_TEXTURE_D8R8G8B8:
return (vk_format == VK_FORMAT_B8G8R8A8_UNORM || vk_format == VK_FORMAT_D24_UNORM_S8_UINT || vk_format == VK_FORMAT_D32_SFLOAT_S8_UINT);
case CELL_GCM_TEXTURE_B8:
return (vk_format == VK_FORMAT_R8_UNORM);
case CELL_GCM_TEXTURE_G8B8:
return (vk_format == VK_FORMAT_R8G8_UNORM);
case CELL_GCM_TEXTURE_DEPTH24_D8:
case CELL_GCM_TEXTURE_DEPTH24_D8_FLOAT:
return (vk_format == VK_FORMAT_D24_UNORM_S8_UINT || vk_format == VK_FORMAT_D32_SFLOAT_S8_UINT);
case CELL_GCM_TEXTURE_X16:
case CELL_GCM_TEXTURE_DEPTH16:
case CELL_GCM_TEXTURE_DEPTH16_FLOAT:
return (vk_format == VK_FORMAT_D16_UNORM || vk_format == VK_FORMAT_D32_SFLOAT);
}
}
void texture_cache::prepare_for_dma_transfers(vk::command_buffer& cmd)
{
if (!cmd.is_recording())
{
cmd.begin();
}
}
void texture_cache::cleanup_after_dma_transfers(vk::command_buffer& cmd)
{
bool occlusion_query_active = !!(cmd.flags & vk::command_buffer::cb_has_open_query);
if (occlusion_query_active)
{
// We really stepped in it
vk::do_query_cleanup(cmd);
}
// End recording
cmd.end();
if (cmd.access_hint != vk::command_buffer::access_type_hint::all)
{
// Flush any pending async jobs in case of blockers
// TODO: Context-level manager should handle this logic
auto async_scheduler = g_fxo->try_get<AsyncTaskScheduler>();
vk::semaphore* async_sema = nullptr;
if (async_scheduler && async_scheduler->is_recording())
{
if (async_scheduler->is_host_mode())
{
async_sema = async_scheduler->get_sema();
}
else
{
vk::queue_submit_t submit_info{};
async_scheduler->flush(submit_info, VK_TRUE);
}
}
// Primary access command queue, must restart it after
vk::fence submit_fence(*m_device);
vk::queue_submit_t submit_info{ m_submit_queue, &submit_fence };
if (async_sema)
{
vk::queue_submit_t submit_info2{};
submit_info2.queue_signal(*async_sema);
async_scheduler->flush(submit_info2, VK_TRUE);
submit_info.wait_on(*async_sema, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
}
cmd.submit(submit_info, VK_TRUE);
vk::wait_for_fence(&submit_fence, GENERAL_WAIT_TIMEOUT);
CHECK_RESULT(vkResetCommandBuffer(cmd, 0));
cmd.begin();
}
else
{
// Auxilliary command queue with auto-restart capability
vk::queue_submit_t submit_info{ m_submit_queue, nullptr };
cmd.submit(submit_info, VK_TRUE);
}
ensure(cmd.flags == 0);
if (occlusion_query_active)
{
ensure(cmd.is_recording());
cmd.flags |= vk::command_buffer::cb_load_occluson_task;
}
}
void texture_cache::initialize(vk::render_device& device, VkQueue submit_queue, vk::data_heap& upload_heap)
{
m_device = &device;
m_memory_types = device.get_memory_mapping();
m_formats_support = device.get_formats_support();
m_submit_queue = submit_queue;
m_texture_upload_heap = &upload_heap;
}
void texture_cache::destroy()
{
clear();
}
bool texture_cache::is_depth_texture(u32 rsx_address, u32 rsx_size)
{
reader_lock lock(m_cache_mutex);
auto& block = m_storage.block_for(rsx_address);
if (block.get_locked_count() == 0)
return false;
for (auto& tex : block)
{
if (tex.is_dirty())
continue;
if (!tex.overlaps(rsx_address, rsx::section_bounds::full_range))
continue;
if ((rsx_address + rsx_size - tex.get_section_base()) <= tex.get_section_size())
{
switch (tex.get_format())
{
case VK_FORMAT_D16_UNORM:
case VK_FORMAT_D32_SFLOAT:
case VK_FORMAT_D32_SFLOAT_S8_UINT:
case VK_FORMAT_D24_UNORM_S8_UINT:
return true;
default:
return false;
}
}
}
//Unreachable; silence compiler warning anyway
return false;
}
bool texture_cache::handle_memory_pressure(rsx::problem_severity severity)
{
auto any_released = baseclass::handle_memory_pressure(severity);
// TODO: This can cause invalidation of in-flight resources
if (severity <= rsx::problem_severity::low || !m_cached_memory_size)
{
// Nothing left to do
return any_released;
}
constexpr u64 _1M = 0x100000;
if (severity <= rsx::problem_severity::moderate && m_cached_memory_size < (64 * _1M))
{
// Some memory is consumed by the temporary resources, but no need to panic just yet
return any_released;
}
std::unique_lock lock(m_cache_mutex, std::defer_lock);
if (!lock.try_lock())
{
rsx_log.warning("Unable to remove temporary resources because we're already in the texture cache!");
return any_released;
}
// Nuke temporary resources. They will still be visible to the GPU.
auto gc = vk::get_resource_manager();
any_released |= !m_cached_images.empty();
for (auto& img : m_cached_images)
{
gc->dispose(img.data);
}
m_cached_images.clear();
m_cached_memory_size = 0;
any_released |= !m_temporary_subresource_cache.empty();
for (auto& e : m_temporary_subresource_cache)
{
ensure(e.second.second);
release_temporary_subresource(e.second.second);
}
m_temporary_subresource_cache.clear();
return any_released;
}
void texture_cache::on_frame_end()
{
trim_sections();
if (m_storage.m_unreleased_texture_objects >= m_max_zombie_objects)
{
purge_unreleased_sections();
}
if (m_cached_images.size() > max_cached_image_pool_size ||
m_cached_memory_size > 256 * 0x100000)
{
std::lock_guard lock(m_cached_pool_lock);
const auto new_size = m_cached_images.size() / 2;
for (usz i = new_size; i < m_cached_images.size(); ++i)
{
m_cached_memory_size -= m_cached_images[i].data->memory->size();
}
m_cached_images.resize(new_size);
}
baseclass::on_frame_end();
reset_frame_statistics();
}
vk::viewable_image* texture_cache::upload_image_simple(vk::command_buffer& cmd, VkFormat format, u32 address, u32 width, u32 height, u32 pitch)
{
bool linear_format_supported = false;
switch (format)
{
case VK_FORMAT_B8G8R8A8_UNORM:
linear_format_supported = m_formats_support.bgra8_linear;
break;
case VK_FORMAT_R8G8B8A8_UNORM:
linear_format_supported = m_formats_support.argb8_linear;
break;
default:
rsx_log.error("Unsupported VkFormat 0x%x", static_cast<u32>(format));
return nullptr;
}
if (!linear_format_supported)
{
return nullptr;
}
// Uploads a linear memory range as a BGRA8 texture
auto image = std::make_unique<vk::viewable_image>(*m_device, m_memory_types.host_visible_coherent,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
VK_IMAGE_TYPE_2D,
format,
width, height, 1, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_LAYOUT_PREINITIALIZED,
VK_IMAGE_TILING_LINEAR, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, 0,
VMM_ALLOCATION_POOL_SWAPCHAIN);
VkImageSubresource subresource{};
subresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
VkSubresourceLayout layout{};
vkGetImageSubresourceLayout(*m_device, image->value, &subresource, &layout);
void* mem = image->memory->map(0, layout.rowPitch * height);
auto src = vm::_ptr<const char>(address);
auto dst = static_cast<char*>(mem);
// TODO: SSE optimization
for (u32 row = 0; row < height; ++row)
{
auto casted_src = reinterpret_cast<const be_t<u32>*>(src);
auto casted_dst = reinterpret_cast<u32*>(dst);
for (u32 col = 0; col < width; ++col)
casted_dst[col] = casted_src[col];
src += pitch;
dst += layout.rowPitch;
}
image->memory->unmap();
vk::change_image_layout(cmd, image.get(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
// Fully dispose immediately. These immages aren't really reusable right now.
auto result = image.get();
vk::get_resource_manager()->dispose(image);
return result;
}
bool texture_cache::blit(const rsx::blit_src_info& src, const rsx::blit_dst_info& dst, bool interpolate, vk::surface_cache& m_rtts, vk::command_buffer& cmd)
{
blitter helper;
auto reply = upload_scaled_image(src, dst, interpolate, cmd, m_rtts, helper);
if (reply.succeeded)
{
if (reply.real_dst_size)
{
flush_if_cache_miss_likely(cmd, reply.to_address_range());
}
return true;
}
return false;
}
u32 texture_cache::get_unreleased_textures_count() const
{
return baseclass::get_unreleased_textures_count() + ::size32(m_cached_images);
}
u64 texture_cache::get_temporary_memory_in_use() const
{
// TODO: Technically incorrect, we should have separate metrics for cached evictable resources (this value) and temporary active resources.
return m_cached_memory_size;
}
bool texture_cache::is_overallocated() const
{
const auto total_device_memory = m_device->get_memory_mapping().device_local_total_bytes / 0x100000;
u64 quota = 0;
if (total_device_memory >= 2048)
{
quota = std::min<u64>(3072, (total_device_memory * 40) / 100);
}
else if (total_device_memory >= 1024)
{
quota = std::max<u64>(204, (total_device_memory * 30) / 100);
}
else if (total_device_memory >= 768)
{
quota = 192;
}
else
{
quota = std::min<u64>(128, total_device_memory / 2);
}
quota *= 0x100000;
if (const u64 texture_cache_pool_usage = vmm_get_application_pool_usage(VMM_ALLOCATION_POOL_TEXTURE_CACHE);
texture_cache_pool_usage > quota)
{
rsx_log.warning("Texture cache is using %lluM of memory which exceeds the allocation quota of %lluM",
texture_cache_pool_usage, quota);
return true;
}
return false;
}
}
| 54,900
|
C++
|
.cpp
| 1,362
| 36.340675
| 219
| 0.698225
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,456
|
VKVertexProgram.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKVertexProgram.cpp
|
#include "stdafx.h"
#include "VKVertexProgram.h"
#include "VKCommonDecompiler.h"
#include "VKHelpers.h"
#include "vkutils/device.h"
#include "../Program/GLSLCommon.h"
std::string VKVertexDecompilerThread::getFloatTypeName(usz elementCount)
{
return glsl::getFloatTypeNameImpl(elementCount);
}
std::string VKVertexDecompilerThread::getIntTypeName(usz /*elementCount*/)
{
return "ivec4";
}
std::string VKVertexDecompilerThread::getFunction(FUNCTION f)
{
return glsl::getFunctionImpl(f);
}
std::string VKVertexDecompilerThread::compareFunction(COMPARE f, const std::string &Op0, const std::string &Op1, bool scalar)
{
return glsl::compareFunctionImpl(f, Op0, Op1, scalar);
}
void VKVertexDecompilerThread::insertHeader(std::stringstream &OS)
{
OS << "#version 450\n\n";
OS << "#extension GL_ARB_separate_shader_objects : enable\n\n";
OS << "layout(std140, set = 0, binding = 0) uniform VertexContextBuffer\n";
OS << "{\n";
OS << " mat4 scale_offset_mat;\n";
OS << " ivec4 user_clip_enabled[2];\n";
OS << " vec4 user_clip_factor[2];\n";
OS << " uint transform_branch_bits;\n";
OS << " float point_size;\n";
OS << " float z_near;\n";
OS << " float z_far;\n";
OS << "};\n\n";
if (m_device_props.emulate_conditional_rendering)
{
OS << "layout(std430, set = 0, binding = 8) readonly buffer EXT_Conditional_Rendering\n";
OS << "{\n";
OS << " uint conditional_rendering_predicate;\n";
OS << "};\n\n";
}
OS << "layout(push_constant) uniform VertexLayoutBuffer\n";
OS << "{\n";
OS << " uint vertex_base_index;\n";
OS << " uint vertex_index_offset;\n";
OS << " uint draw_id;\n";
OS << " uint layout_ptr_offset;\n";
if (m_device_props.emulate_conditional_rendering)
{
OS << " uint conditional_rendering_enabled;\n";
}
OS << "};\n\n";
vk::glsl::program_input in;
in.location = m_binding_table.vertex_params_bind_slot;
in.domain = glsl::glsl_vertex_program;
in.name = "VertexContextBuffer";
in.type = vk::glsl::input_type_uniform_buffer;
inputs.push_back(in);
}
void VKVertexDecompilerThread::insertInputs(std::stringstream& OS, const std::vector<ParamType>& /*inputs*/)
{
OS << "layout(set=0, binding=5) uniform usamplerBuffer persistent_input_stream;\n"; // Data stream with persistent vertex data (cacheable)
OS << "layout(set=0, binding=6) uniform usamplerBuffer volatile_input_stream;\n"; // Data stream with per-draw data (registers and immediate draw data)
OS << "layout(set=0, binding=7) uniform usamplerBuffer vertex_layout_stream;\n"; // Data stream defining vertex data layout
vk::glsl::program_input in;
in.location = m_binding_table.vertex_buffers_first_bind_slot;
in.domain = glsl::glsl_vertex_program;
in.name = "persistent_input_stream";
in.type = vk::glsl::input_type_texel_buffer;
this->inputs.push_back(in);
in.location = m_binding_table.vertex_buffers_first_bind_slot + 1;
in.domain = glsl::glsl_vertex_program;
in.name = "volatile_input_stream";
in.type = vk::glsl::input_type_texel_buffer;
this->inputs.push_back(in);
in.location = m_binding_table.vertex_buffers_first_bind_slot + 2;
in.domain = glsl::glsl_vertex_program;
in.name = "vertex_layout_stream";
in.type = vk::glsl::input_type_texel_buffer;
this->inputs.push_back(in);
}
void VKVertexDecompilerThread::insertConstants(std::stringstream & OS, const std::vector<ParamType> & constants)
{
vk::glsl::program_input in;
u32 location = m_binding_table.vertex_textures_first_bind_slot;
for (const ParamType &PT : constants)
{
for (const ParamItem &PI : PT.items)
{
if (PI.name.starts_with("vc["))
{
OS << "layout(std140, set=0, binding = " << static_cast<int>(m_binding_table.vertex_constant_buffers_bind_slot) << ") uniform VertexConstantsBuffer\n";
OS << "{\n";
OS << " vec4 " << PI.name << ";\n";
OS << "};\n\n";
in.location = m_binding_table.vertex_constant_buffers_bind_slot;
in.domain = glsl::glsl_vertex_program;
in.name = "VertexConstantsBuffer";
in.type = vk::glsl::input_type_uniform_buffer;
inputs.push_back(in);
continue;
}
if (PT.type == "sampler2D" ||
PT.type == "samplerCube" ||
PT.type == "sampler1D" ||
PT.type == "sampler3D")
{
in.location = location;
in.name = PI.name;
in.type = vk::glsl::input_type_texture;
inputs.push_back(in);
auto samplerType = PT.type;
if (m_prog.texture_state.multisampled_textures) [[ unlikely ]]
{
ensure(PI.name.length() > 3);
int index = atoi(&PI.name[3]);
if (m_prog.texture_state.multisampled_textures & (1 << index))
{
if (samplerType != "sampler1D" && samplerType != "sampler2D")
{
rsx_log.error("Unexpected multisampled sampler type '%s'", samplerType);
}
samplerType = "sampler2DMS";
}
}
OS << "layout(set = 0, binding=" << location++ << ") uniform " << samplerType << " " << PI.name << ";\n";
}
}
}
}
static const vertex_reg_info reg_table[] =
{
{ "gl_Position", false, "dst_reg0", "", false },
//Technically these two are for both back and front
{ "diff_color", true, "dst_reg1", "", false, "", "", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_FRONTDIFFUSE | CELL_GCM_ATTRIB_OUTPUT_MASK_BACKDIFFUSE },
{ "spec_color", true, "dst_reg2", "", false, "", "", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_FRONTSPECULAR | CELL_GCM_ATTRIB_OUTPUT_MASK_BACKSPECULAR },
{ "diff_color1", true, "dst_reg3", "", false, "", "", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_FRONTDIFFUSE | CELL_GCM_ATTRIB_OUTPUT_MASK_BACKDIFFUSE },
{ "spec_color1", true, "dst_reg4", "", false, "", "", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_FRONTSPECULAR | CELL_GCM_ATTRIB_OUTPUT_MASK_BACKSPECULAR },
{ "fog_c", true, "dst_reg5", ".xxxx", true, "", "", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_FOG },
//Warning: With spir-v if you declare clip distance var, you must assign a value even when its disabled! Runtime does not assign a default value
{ "gl_ClipDistance[0]", false, "dst_reg5", ".y * user_clip_factor[0].x", false, "user_clip_enabled[0].x > 0", "0.5", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_UC0 },
{ "gl_ClipDistance[1]", false, "dst_reg5", ".z * user_clip_factor[0].y", false, "user_clip_enabled[0].y > 0", "0.5", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_UC1 },
{ "gl_ClipDistance[2]", false, "dst_reg5", ".w * user_clip_factor[0].z", false, "user_clip_enabled[0].z > 0", "0.5", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_UC2 },
{ "gl_PointSize", false, "dst_reg6", ".x", false, "", "", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_POINTSIZE },
{ "gl_ClipDistance[3]", false, "dst_reg6", ".y * user_clip_factor[0].w", false, "user_clip_enabled[0].w > 0", "0.5", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_UC3 },
{ "gl_ClipDistance[4]", false, "dst_reg6", ".z * user_clip_factor[1].x", false, "user_clip_enabled[1].x > 0", "0.5", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_UC4 },
{ "gl_ClipDistance[5]", false, "dst_reg6", ".w * user_clip_factor[1].y", false, "user_clip_enabled[1].y > 0", "0.5", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_UC5 },
{ "tc0", true, "dst_reg7", "", false, "", "", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_TEX0 },
{ "tc1", true, "dst_reg8", "", false, "", "", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_TEX1 },
{ "tc2", true, "dst_reg9", "", false, "", "", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_TEX2 },
{ "tc3", true, "dst_reg10", "", false, "", "", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_TEX3 },
{ "tc4", true, "dst_reg11", "", false, "", "", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_TEX4 },
{ "tc5", true, "dst_reg12", "", false, "", "", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_TEX5 },
{ "tc6", true, "dst_reg13", "", false, "", "", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_TEX6 },
{ "tc7", true, "dst_reg14", "", false, "", "", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_TEX7 },
{ "tc8", true, "dst_reg15", "", false, "", "", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_TEX8 },
{ "tc9", true, "dst_reg6", "", false, "", "", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_TEX9 } // In this line, dst_reg6 is correct since dst_reg goes from 0 to 15.
};
void VKVertexDecompilerThread::insertOutputs(std::stringstream& OS, const std::vector<ParamType>& /*outputs*/)
{
for (auto &i : reg_table)
{
if (i.need_declare)
{
// All outputs must be declared always to allow setting default values
OS << "layout(location=" << vk::get_varying_register_location(i.name) << ") out vec4 " << i.name << ";\n";
}
}
}
void VKVertexDecompilerThread::insertMainStart(std::stringstream & OS)
{
glsl::shader_properties properties2{};
properties2.domain = glsl::glsl_vertex_program;
properties2.require_lit_emulation = properties.has_lit_op;
properties2.emulate_zclip_transform = true;
properties2.emulate_depth_clip_only = vk::g_render_device->get_shader_types_support().allow_float64;
properties2.low_precision_tests = vk::is_NVIDIA(vk::get_driver_vendor());
properties2.require_explicit_invariance = (vk::is_NVIDIA(vk::get_driver_vendor()) && g_cfg.video.shader_precision != gpu_preset_level::low);
glsl::insert_glsl_legacy_function(OS, properties2);
glsl::insert_vertex_input_fetch(OS, glsl::glsl_rules_vulkan);
// Declare global registers with optional initialization
std::string registers;
if (ParamType *vec4Types = m_parr.SearchParam(PF_PARAM_OUT, "vec4"))
{
for (auto &PI : vec4Types->items)
{
if (registers.length())
registers += ", ";
else
registers = "vec4 ";
registers += PI.name;
if (!PI.value.empty())
{
// Simplify default initialization
if (PI.value == "vec4(0.0, 0.0, 0.0, 0.0)")
registers += " = vec4(0.)";
else
registers += " = " + PI.value;
}
}
}
if (!registers.empty())
{
OS << registers << ";\n";
}
OS << "void vs_main()\n";
OS << "{\n";
//Declare temporary registers, ignoring those mapped to outputs
for (const ParamType &PT : m_parr.params[PF_PARAM_NONE])
{
for (const ParamItem &PI : PT.items)
{
if (PI.name.starts_with("dst_reg"))
continue;
OS << " " << PT.type << " " << PI.name;
if (!PI.value.empty())
OS << " = " << PI.value;
OS << ";\n";
}
}
for (const ParamType &PT : m_parr.params[PF_PARAM_IN])
{
for (const ParamItem &PI : PT.items)
{
OS << " vec4 " << PI.name << "= read_location(" << std::to_string(PI.location) << ");\n";
}
}
}
void VKVertexDecompilerThread::insertMainEnd(std::stringstream & OS)
{
OS << "}\n\n";
OS << "void main ()\n";
OS << "{\n\n";
if (m_device_props.emulate_conditional_rendering)
{
OS << " if (conditional_rendering_enabled != 0 && conditional_rendering_predicate == 0)\n";
OS << " {\n";
OS << " gl_Position = vec4(0., 0., 0., -1.);\n";
OS << " return;\n";
OS << "}\n\n";
}
OS << " vs_main();\n\n";
for (auto &i : reg_table)
{
if (!i.check_mask || i.test(rsx_vertex_program.output_mask))
{
if (m_parr.HasParam(PF_PARAM_OUT, "vec4", i.src_reg))
{
std::string condition = (!i.cond.empty()) ? "(" + i.cond + ") " : "";
if (condition.empty() || i.default_val.empty())
{
if (!condition.empty()) condition = "if " + condition;
OS << " " << condition << i.name << " = " << i.src_reg << i.src_reg_mask << ";\n";
}
else
{
//Insert if-else condition
OS << " " << i.name << " = " << condition << "? " << i.src_reg << i.src_reg_mask << ": " << i.default_val << ";\n";
}
// Register was marked for output and a properly initialized source register exists
// Nothing more to do
continue;
}
}
if (i.need_declare)
{
OS << " " << i.name << " = vec4(0., 0., 0., 1.);\n";
}
else if (i.check_mask_value == CELL_GCM_ATTRIB_OUTPUT_MASK_POINTSIZE)
{
// Default point size if none was generated by the program
OS << " gl_PointSize = point_size;\n";
}
}
OS << " gl_Position = gl_Position * scale_offset_mat;\n";
OS << " gl_Position = apply_zclip_xform(gl_Position, z_near, z_far);\n";
OS << "}\n";
}
void VKVertexDecompilerThread::Task()
{
m_device_props.emulate_conditional_rendering = vk::emulate_conditional_rendering();
m_binding_table = vk::g_render_device->get_pipeline_binding_table();
m_shader = Decompile();
vk_prog->SetInputs(inputs);
}
VKVertexProgram::VKVertexProgram() = default;
VKVertexProgram::~VKVertexProgram()
{
Delete();
}
void VKVertexProgram::Decompile(const RSXVertexProgram& prog)
{
std::string source;
VKVertexDecompilerThread decompiler(prog, source, parr, *this);
decompiler.Task();
has_indexed_constants = decompiler.properties.has_indexed_constants;
constant_ids = std::vector<u16>(decompiler.m_constant_ids.begin(), decompiler.m_constant_ids.end());
shader.create(::glsl::program_domain::glsl_vertex_program, source);
}
void VKVertexProgram::Compile()
{
if (g_cfg.video.log_programs)
fs::write_file(fs::get_cache_dir() + "shaderlog/VertexProgram" + std::to_string(id) + ".spirv", fs::rewrite, shader.get_source());
handle = shader.compile();
}
void VKVertexProgram::Delete()
{
shader.destroy();
}
void VKVertexProgram::SetInputs(std::vector<vk::glsl::program_input>& inputs)
{
for (auto &it : inputs)
{
uniforms.push_back(it);
}
}
| 13,017
|
C++
|
.cpp
| 318
| 38.062893
| 162
| 0.654297
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,457
|
VKPresent.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKPresent.cpp
|
#include "stdafx.h"
#include "VKGSRender.h"
#include "vkutils/buffer_object.h"
#include "Emu/RSX/Overlays/overlay_manager.h"
#include "Emu/RSX/Overlays/overlays.h"
#include "Emu/RSX/Overlays/overlay_debug_overlay.h"
#include "Emu/Cell/Modules/cellVideoOut.h"
#include "upscalers/bilinear_pass.hpp"
#include "upscalers/fsr_pass.h"
#include "upscalers/nearest_pass.hpp"
#include "util/asm.hpp"
#include "util/video_provider.h"
extern atomic_t<bool> g_user_asked_for_screenshot;
extern atomic_t<recording_mode> g_recording_mode;
namespace
{
VkFormat RSX_display_format_to_vk_format(u8 format)
{
switch (format)
{
default:
rsx_log.error("Unhandled video output format 0x%x", static_cast<s32>(format));
[[fallthrough]];
case CELL_VIDEO_OUT_BUFFER_COLOR_FORMAT_X8R8G8B8:
return VK_FORMAT_B8G8R8A8_UNORM;
case CELL_VIDEO_OUT_BUFFER_COLOR_FORMAT_X8B8G8R8:
return VK_FORMAT_R8G8B8A8_UNORM;
case CELL_VIDEO_OUT_BUFFER_COLOR_FORMAT_R16G16B16X16_FLOAT:
return VK_FORMAT_R16G16B16A16_SFLOAT;
}
}
}
void VKGSRender::reinitialize_swapchain()
{
m_swapchain_dims.width = m_frame->client_width();
m_swapchain_dims.height = m_frame->client_height();
// Reject requests to acquire new swapchain if the window is minimized
// The NVIDIA driver will spam VK_ERROR_OUT_OF_DATE_KHR if you try to acquire an image from the swapchain and the window is minimized
// However, any attempt to actually renew the swapchain will crash the driver with VK_ERROR_DEVICE_LOST while the window is in this state
if (m_swapchain_dims.width == 0 || m_swapchain_dims.height == 0)
{
swapchain_unavailable = true;
return;
}
// NOTE: This operation will create a hard sync point
close_and_submit_command_buffer();
m_current_command_buffer->reset();
m_current_command_buffer->begin();
for (auto &ctx : frame_context_storage)
{
if (ctx.present_image == umax)
continue;
// Release present image by presenting it
frame_context_cleanup(&ctx);
}
// Discard the current upscaling pipeline if any
m_upscaler.reset();
// Drain all the queues
vkDeviceWaitIdle(*m_device);
// Rebuild swapchain. Old swapchain destruction is handled by the init_swapchain call
if (!m_swapchain->init(m_swapchain_dims.width, m_swapchain_dims.height))
{
rsx_log.warning("Swapchain initialization failed. Request ignored [%dx%d]", m_swapchain_dims.width, m_swapchain_dims.height);
swapchain_unavailable = true;
return;
}
// Prepare new swapchain images for use
for (u32 i = 0; i < m_swapchain->get_swap_image_count(); ++i)
{
const auto target_layout = m_swapchain->get_optimal_present_layout();
const auto target_image = m_swapchain->get_image(i);
VkClearColorValue clear_color{};
VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1};
vk::change_image_layout(*m_current_command_buffer, target_image, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, range);
vkCmdClearColorImage(*m_current_command_buffer, target_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear_color, 1, &range);
vk::change_image_layout(*m_current_command_buffer, target_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, target_layout, range);
}
// Will have to block until rendering is completed
vk::fence resize_fence(*m_device);
// Flush the command buffer
close_and_submit_command_buffer(&resize_fence);
vk::wait_for_fence(&resize_fence);
m_current_command_buffer->reset();
m_current_command_buffer->begin();
swapchain_unavailable = false;
should_reinitialize_swapchain = false;
}
void VKGSRender::present(vk::frame_context_t *ctx)
{
ensure(ctx->present_image != umax);
// Partial CS flush
ctx->swap_command_buffer->flush();
if (!swapchain_unavailable)
{
switch (VkResult error = m_swapchain->present(ctx->present_wait_semaphore, ctx->present_image))
{
case VK_SUCCESS:
break;
case VK_SUBOPTIMAL_KHR:
should_reinitialize_swapchain = true;
break;
case VK_ERROR_OUT_OF_DATE_KHR:
swapchain_unavailable = true;
break;
default:
// Other errors not part of rpcs3. This can be caused by 3rd party injectors with bad code, of which we have no control over.
// Let the application attempt to recover instead of crashing outright.
rsx_log.error("VkPresent returned unexpected error code %lld. Will attempt to recreate the swapchain. Please disable 3rd party injector tools.", static_cast<s64>(error));
swapchain_unavailable = true;
break;
}
}
// Presentation image released; reset value
ctx->present_image = -1;
}
void VKGSRender::advance_queued_frames()
{
// Check all other frames for completion and clear resources
check_present_status();
// Run video memory balancer
m_device->rebalance_memory_type_usage();
vk::vmm_check_memory_usage();
// m_rtts storage is double buffered and should be safe to tag on frame boundary
m_rtts.trim(*m_current_command_buffer, vk::vmm_determine_memory_load_severity());
// Texture cache is also double buffered to prevent use-after-free
m_texture_cache.on_frame_end();
m_samplers_dirty.store(true);
vk::remove_unused_framebuffers();
m_vertex_cache->purge();
m_current_frame->tag_frame_end(m_attrib_ring_info.get_current_put_pos_minus_one(),
m_vertex_env_ring_info.get_current_put_pos_minus_one(),
m_fragment_env_ring_info.get_current_put_pos_minus_one(),
m_vertex_layout_ring_info.get_current_put_pos_minus_one(),
m_fragment_texture_params_ring_info.get_current_put_pos_minus_one(),
m_fragment_constants_ring_info.get_current_put_pos_minus_one(),
m_transform_constants_ring_info.get_current_put_pos_minus_one(),
m_index_buffer_ring_info.get_current_put_pos_minus_one(),
m_texture_upload_buffer_ring_info.get_current_put_pos_minus_one(),
m_raster_env_ring_info.get_current_put_pos_minus_one());
m_queued_frames.push_back(m_current_frame);
ensure(m_queued_frames.size() <= VK_MAX_ASYNC_FRAMES);
m_current_queue_index = (m_current_queue_index + 1) % VK_MAX_ASYNC_FRAMES;
m_current_frame = &frame_context_storage[m_current_queue_index];
m_current_frame->flags |= frame_context_state::dirty;
vk::advance_frame_counter();
}
void VKGSRender::queue_swap_request()
{
ensure(!m_current_frame->swap_command_buffer);
m_current_frame->swap_command_buffer = m_current_command_buffer;
if (m_swapchain->is_headless())
{
m_swapchain->end_frame(*m_current_command_buffer, m_current_frame->present_image);
close_and_submit_command_buffer();
}
else
{
close_and_submit_command_buffer(nullptr,
m_current_frame->acquire_signal_semaphore,
m_current_frame->present_wait_semaphore,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT);
}
// Set up a present request for this frame as well
present(m_current_frame);
// Grab next cb in line and make it usable
m_current_command_buffer = m_primary_cb_list.next();
m_current_command_buffer->reset();
m_current_command_buffer->begin();
// Set up new pointers for the next frame
advance_queued_frames();
}
void VKGSRender::frame_context_cleanup(vk::frame_context_t *ctx)
{
ensure(ctx->swap_command_buffer);
// Perform hard swap here
if (ctx->swap_command_buffer->wait(FRAME_PRESENT_TIMEOUT) != VK_SUCCESS)
{
// Lost surface/device, release swapchain
swapchain_unavailable = true;
}
// Resource cleanup.
// TODO: This is some outdated crap.
{
if (m_overlay_manager && m_overlay_manager->has_dirty())
{
auto ui_renderer = vk::get_overlay_pass<vk::ui_overlay_renderer>();
m_overlay_manager->lock_shared();
std::vector<u32> uids_to_dispose;
uids_to_dispose.reserve(m_overlay_manager->get_dirty().size());
for (const auto& view : m_overlay_manager->get_dirty())
{
ui_renderer->remove_temp_resources(view->uid);
uids_to_dispose.push_back(view->uid);
}
m_overlay_manager->unlock_shared();
m_overlay_manager->dispose(uids_to_dispose);
}
vk::get_resource_manager()->trim();
vk::reset_global_resources();
ctx->buffer_views_to_clean.clear();
const auto shadermode = g_cfg.video.shadermode.get();
if (shadermode == shader_mode::async_with_interpreter || shadermode == shader_mode::interpreter_only)
{
// TODO: This is jank AF
m_vertex_instructions_buffer.reset_allocation_stats();
m_fragment_instructions_buffer.reset_allocation_stats();
}
if (ctx->last_frame_sync_time > m_last_heap_sync_time)
{
m_last_heap_sync_time = ctx->last_frame_sync_time;
// Heap cleanup; deallocates memory consumed by the frame if it is still held
m_attrib_ring_info.m_get_pos = ctx->attrib_heap_ptr;
m_vertex_env_ring_info.m_get_pos = ctx->vtx_env_heap_ptr;
m_fragment_env_ring_info.m_get_pos = ctx->frag_env_heap_ptr;
m_fragment_constants_ring_info.m_get_pos = ctx->frag_const_heap_ptr;
m_transform_constants_ring_info.m_get_pos = ctx->vtx_const_heap_ptr;
m_vertex_layout_ring_info.m_get_pos = ctx->vtx_layout_heap_ptr;
m_fragment_texture_params_ring_info.m_get_pos = ctx->frag_texparam_heap_ptr;
m_index_buffer_ring_info.m_get_pos = ctx->index_heap_ptr;
m_texture_upload_buffer_ring_info.m_get_pos = ctx->texture_upload_heap_ptr;
m_attrib_ring_info.notify();
m_vertex_env_ring_info.notify();
m_fragment_env_ring_info.notify();
m_fragment_constants_ring_info.notify();
m_transform_constants_ring_info.notify();
m_vertex_layout_ring_info.notify();
m_fragment_texture_params_ring_info.notify();
m_index_buffer_ring_info.notify();
m_texture_upload_buffer_ring_info.notify();
}
}
ctx->swap_command_buffer = nullptr;
// Remove from queued list
while (!m_queued_frames.empty())
{
auto frame = m_queued_frames.front();
m_queued_frames.pop_front();
if (frame == ctx)
{
break;
}
}
vk::advance_completed_frame_counter();
}
vk::viewable_image* VKGSRender::get_present_source(/* inout */ vk::present_surface_info* info, const rsx::avconf& avconfig)
{
vk::viewable_image* image_to_flip = nullptr;
// @FIXME: This entire function needs to be rewritten to go through the texture cache's "upload_texture" routine.
// That method is not a 1:1 replacement due to handling of insets that is done differently here.
// Check the surface store first
const auto format_bpp = rsx::get_format_block_size_in_bytes(info->format);
const auto overlap_info = m_rtts.get_merged_texture_memory_region(*m_current_command_buffer,
info->address, info->width, info->height, info->pitch, format_bpp, rsx::surface_access::transfer_read);
if (!overlap_info.empty())
{
const auto& section = overlap_info.back();
auto surface = vk::as_rtt(section.surface);
bool viable = false;
if (section.base_address >= info->address)
{
const auto surface_width = surface->get_surface_width<rsx::surface_metrics::samples>();
const auto surface_height = surface->get_surface_height<rsx::surface_metrics::samples>();
if (section.base_address == info->address)
{
// Check for fit or crop
viable = (surface_width >= info->width && surface_height >= info->height);
}
else
{
// Check for borders and letterboxing
const u32 inset_offset = section.base_address - info->address;
const u32 inset_y = inset_offset / info->pitch;
const u32 inset_x = (inset_offset % info->pitch) / format_bpp;
const u32 full_width = surface_width + inset_x + inset_x;
const u32 full_height = surface_height + inset_y + inset_y;
viable = (full_width == info->width && full_height == info->height);
}
if (viable)
{
image_to_flip = section.surface->get_surface(rsx::surface_access::transfer_read);
std::tie(info->width, info->height) = rsx::apply_resolution_scale<true>(
std::min(surface_width, info->width),
std::min(surface_height, info->height));
}
}
}
else if (auto surface = m_texture_cache.find_texture_from_dimensions<true>(info->address, info->format);
surface && surface->get_width() >= info->width && surface->get_height() >= info->height)
{
// Hack - this should be the first location to check for output
// The render might have been done offscreen or in software and a blit used to display
image_to_flip = dynamic_cast<vk::viewable_image*>(surface->get_raw_texture());
}
// The correct output format is determined by the AV configuration set in CellVideoOutConfigure by the game.
// 99.9% of the time, this will match the backbuffer fbo format used in rendering/compositing the output.
// But in some cases, let's just say some devs are creative.
const auto expected_format = RSX_display_format_to_vk_format(avconfig.format);
if (!image_to_flip) [[ unlikely ]]
{
// Read from cell
const auto range = utils::address_range::start_length(info->address, info->pitch * info->height);
const u32 lookup_mask = rsx::texture_upload_context::blit_engine_dst | rsx::texture_upload_context::framebuffer_storage;
const auto overlap = m_texture_cache.find_texture_from_range<true>(range, 0, lookup_mask);
for (const auto & section : overlap)
{
if (!section->is_synchronized())
{
section->copy_texture(*m_current_command_buffer, true);
}
}
if (m_current_command_buffer->flags & vk::command_buffer::cb_has_dma_transfer)
{
// Submit for processing to lower hard fault penalty
flush_command_queue();
}
m_texture_cache.invalidate_range(*m_current_command_buffer, range, rsx::invalidation_cause::read);
image_to_flip = m_texture_cache.upload_image_simple(*m_current_command_buffer, expected_format, info->address, info->width, info->height, info->pitch);
}
else if (image_to_flip->format() != expected_format)
{
// Devs are being creative. Force-cast this to the proper pixel layout.
auto dst_img = m_texture_cache.create_temporary_subresource_storage(
RSX_FORMAT_CLASS_COLOR, expected_format, info->width, info->height, 1, 1, 1,
VK_IMAGE_TYPE_2D, 0, VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT);
if (dst_img)
{
const areai src_rect = { 0, 0, static_cast<int>(info->width), static_cast<int>(info->height) };
const areai dst_rect = src_rect;
dst_img->change_layout(*m_current_command_buffer, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
if (vk::formats_are_bitcast_compatible(dst_img.get(), image_to_flip))
{
vk::copy_image(*m_current_command_buffer, image_to_flip, dst_img.get(), src_rect, dst_rect, 1);
}
else
{
vk::copy_image_typeless(*m_current_command_buffer, image_to_flip, dst_img.get(), src_rect, dst_rect, 1);
}
image_to_flip = dst_img.get();
m_texture_cache.dispose_reusable_image(dst_img);
}
}
return image_to_flip;
}
void VKGSRender::flip(const rsx::display_flip_info_t& info)
{
// Check swapchain condition/status
if (!m_swapchain->supports_automatic_wm_reports())
{
if (m_swapchain_dims.width != m_frame->client_width() + 0u ||
m_swapchain_dims.height != m_frame->client_height() + 0u)
{
swapchain_unavailable = true;
}
}
if (swapchain_unavailable || should_reinitialize_swapchain)
{
reinitialize_swapchain();
}
m_profiler.start();
if (m_current_frame == &m_aux_frame_context)
{
m_current_frame = &frame_context_storage[m_current_queue_index];
if (m_current_frame->swap_command_buffer)
{
// Its possible this flip request is triggered by overlays and the flip queue is in undefined state
frame_context_cleanup(m_current_frame);
}
// Swap aux storage and current frame; aux storage should always be ready for use at all times
m_current_frame->swap_storage(m_aux_frame_context);
m_current_frame->grab_resources(m_aux_frame_context);
}
else if (m_current_frame->swap_command_buffer)
{
if (info.stats.draw_calls > 0)
{
// This can be 'legal' if the window was being resized and no polling happened because of swapchain_unavailable flag
rsx_log.error("Possible data corruption on frame context storage detected");
}
// There were no draws and back-to-back flips happened
frame_context_cleanup(m_current_frame);
}
if (info.skip_frame || swapchain_unavailable)
{
if (!info.skip_frame)
{
ensure(swapchain_unavailable);
// Perform a mini-flip here without invoking present code
m_current_frame->swap_command_buffer = m_current_command_buffer;
flush_command_queue(true);
vk::advance_frame_counter();
frame_context_cleanup(m_current_frame);
}
m_frame->flip(m_context);
rsx::thread::flip(info);
return;
}
u32 buffer_width = display_buffers[info.buffer].width;
u32 buffer_height = display_buffers[info.buffer].height;
u32 buffer_pitch = display_buffers[info.buffer].pitch;
u32 av_format;
const auto& avconfig = g_fxo->get<rsx::avconf>();
if (!buffer_width)
{
buffer_width = avconfig.resolution_x;
buffer_height = avconfig.resolution_y;
}
if (avconfig.state)
{
av_format = avconfig.get_compatible_gcm_format();
if (!buffer_pitch)
buffer_pitch = buffer_width * avconfig.get_bpp();
const u32 video_frame_height = (avconfig.stereo_mode == stereo_render_mode_options::disabled ? avconfig.resolution_y : ((avconfig.resolution_y - 30) / 2));
buffer_width = std::min(buffer_width, avconfig.resolution_x);
buffer_height = std::min(buffer_height, video_frame_height);
}
else
{
av_format = CELL_GCM_TEXTURE_A8R8G8B8;
if (!buffer_pitch)
buffer_pitch = buffer_width * 4;
}
// Scan memory for required data. This is done early to optimize waiting for the driver image acquire below.
vk::viewable_image *image_to_flip = nullptr, *image_to_flip2 = nullptr;
if (info.buffer < display_buffers_count && buffer_width && buffer_height)
{
vk::present_surface_info present_info
{
.address = rsx::get_address(display_buffers[info.buffer].offset, CELL_GCM_LOCATION_LOCAL),
.format = av_format,
.width = buffer_width,
.height = buffer_height,
.pitch = buffer_pitch,
.eye = 0
};
image_to_flip = get_present_source(&present_info, avconfig);
if (avconfig.stereo_mode != stereo_render_mode_options::disabled) [[unlikely]]
{
const auto [unused, min_expected_height] = rsx::apply_resolution_scale<true>(RSX_SURFACE_DIMENSION_IGNORED, buffer_height + 30);
if (image_to_flip->height() < min_expected_height)
{
// Get image for second eye
const u32 image_offset = (buffer_height + 30) * buffer_pitch + display_buffers[info.buffer].offset;
present_info.width = buffer_width;
present_info.height = buffer_height;
present_info.address = rsx::get_address(image_offset, CELL_GCM_LOCATION_LOCAL);
present_info.eye = 1;
image_to_flip2 = get_present_source(&present_info, avconfig);
}
else
{
// Account for possible insets
const auto [unused2, scaled_buffer_height] = rsx::apply_resolution_scale<true>(RSX_SURFACE_DIMENSION_IGNORED, buffer_height);
buffer_height = std::min<u32>(image_to_flip->height() - min_expected_height, scaled_buffer_height);
}
}
buffer_width = present_info.width;
buffer_height = present_info.height;
}
if (info.emu_flip)
{
evaluate_cpu_usage_reduction_limits();
}
// Prepare surface for new frame. Set no timeout here so that we wait for the next image if need be
ensure(m_current_frame->present_image == umax);
ensure(m_current_frame->swap_command_buffer == nullptr);
u64 timeout = m_swapchain->get_swap_image_count() <= VK_MAX_ASYNC_FRAMES? 0ull: 100000000ull;
while (VkResult status = m_swapchain->acquire_next_swapchain_image(m_current_frame->acquire_signal_semaphore, timeout, &m_current_frame->present_image))
{
switch (status)
{
case VK_TIMEOUT:
case VK_NOT_READY:
{
// In some cases, after a fullscreen switch, the driver only allows N-1 images to be acquirable, where N = number of available swap images.
// This means that any acquired images have to be released
// before acquireNextImage can return successfully. This is despite the driver reporting 2 swap chain images available
// This makes fullscreen performance slower than windowed performance as throughput is lowered due to losing one presentable image
// Found on AMD Crimson 17.7.2
// Whatever returned from status, this is now a spin
timeout = 0ull;
check_present_status();
continue;
}
case VK_SUBOPTIMAL_KHR:
should_reinitialize_swapchain = true;
break;
case VK_ERROR_OUT_OF_DATE_KHR:
rsx_log.warning("vkAcquireNextImageKHR failed with VK_ERROR_OUT_OF_DATE_KHR. Flip request ignored until surface is recreated.");
swapchain_unavailable = true;
reinitialize_swapchain();
continue;
default:
vk::die_with_error(status);
}
if (should_reinitialize_swapchain)
{
// Image is valid, new swapchain will be generated later
break;
}
}
// Confirm that the driver did not silently fail
ensure(m_current_frame->present_image != umax);
// Calculate output dimensions. Done after swapchain acquisition in case it was recreated.
areai aspect_ratio;
if (!g_cfg.video.stretch_to_display_area)
{
const auto converted = avconfig.aspect_convert_region({ buffer_width, buffer_height }, m_swapchain_dims);
aspect_ratio = static_cast<areai>(converted);
}
else
{
aspect_ratio = { 0, 0, s32(m_swapchain_dims.width), s32(m_swapchain_dims.height) };
}
// Blit contents to screen..
VkImage target_image = m_swapchain->get_image(m_current_frame->present_image);
const auto present_layout = m_swapchain->get_optimal_present_layout();
const VkImageSubresourceRange subresource_range = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 };
VkImageLayout target_layout = present_layout;
VkRenderPass single_target_pass = VK_NULL_HANDLE;
vk::framebuffer_holder* direct_fbo = nullptr;
rsx::simple_array<vk::viewable_image*> calibration_src;
if (!image_to_flip || aspect_ratio.x1 || aspect_ratio.y1)
{
// Clear the window background to black
VkClearColorValue clear_black {};
vk::change_image_layout(*m_current_command_buffer, target_image, present_layout, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, subresource_range);
vkCmdClearColorImage(*m_current_command_buffer, target_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear_black, 1, &subresource_range);
target_layout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
}
const output_scaling_mode output_scaling = g_cfg.video.output_scaling.get();
if (!m_upscaler || m_output_scaling != output_scaling)
{
m_output_scaling = output_scaling;
if (m_output_scaling == output_scaling_mode::nearest)
{
m_upscaler = std::make_unique<vk::nearest_upscale_pass>();
}
else if (m_output_scaling == output_scaling_mode::fsr)
{
m_upscaler = std::make_unique<vk::fsr_upscale_pass>();
}
else
{
m_upscaler = std::make_unique<vk::bilinear_upscale_pass>();
}
}
if (image_to_flip)
{
const bool use_full_rgb_range_output = g_cfg.video.full_rgb_range_output.get();
if (!use_full_rgb_range_output || !rsx::fcmp(avconfig.gamma, 1.f) || avconfig.stereo_mode != stereo_render_mode_options::disabled) [[unlikely]]
{
if (image_to_flip) calibration_src.push_back(image_to_flip);
if (image_to_flip2) calibration_src.push_back(image_to_flip2);
if (m_output_scaling == output_scaling_mode::fsr && avconfig.stereo_mode == stereo_render_mode_options::disabled) // 3D will be implemented later
{
// Run upscaling pass before the rest of the output effects pipeline
// This can be done with all upscalers but we already get bilinear upscaling for free if we just out the filters directly
VkImageBlit request = {};
request.srcSubresource = { image_to_flip->aspect(), 0, 0, 1 };
request.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
request.srcOffsets[0] = { 0, 0, 0 };
request.srcOffsets[1] = { s32(buffer_width), s32(buffer_height), 1 };
request.dstOffsets[0] = { 0, 0, 0 };
request.dstOffsets[1] = { aspect_ratio.width(), aspect_ratio.height(), 1 };
for (unsigned i = 0; i < calibration_src.size(); ++i)
{
const rsx::flags32_t mode = (i == 0) ? UPSCALE_LEFT_VIEW : UPSCALE_RIGHT_VIEW;
calibration_src[i] = m_upscaler->scale_output(*m_current_command_buffer, image_to_flip, VK_NULL_HANDLE, VK_IMAGE_LAYOUT_UNDEFINED, request, mode);
}
}
vk::change_image_layout(*m_current_command_buffer, target_image, target_layout, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, subresource_range);
target_layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
const auto key = vk::get_renderpass_key(m_swapchain->get_surface_format());
single_target_pass = vk::get_renderpass(*m_device, key);
ensure(single_target_pass != VK_NULL_HANDLE);
direct_fbo = vk::get_framebuffer(*m_device, m_swapchain_dims.width, m_swapchain_dims.height, VK_FALSE, single_target_pass, m_swapchain->get_surface_format(), target_image);
direct_fbo->add_ref();
vk::get_overlay_pass<vk::video_out_calibration_pass>()->run(
*m_current_command_buffer, areau(aspect_ratio), direct_fbo, calibration_src,
avconfig.gamma, !use_full_rgb_range_output, avconfig.stereo_mode, single_target_pass);
direct_fbo->release();
}
else
{
// Do raw transfer here as there is no image object associated with textures owned by the driver (TODO)
VkImageBlit rgn = {};
rgn.srcSubresource = { image_to_flip->aspect(), 0, 0, 1 };
rgn.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
rgn.srcOffsets[0] = { 0, 0, 0 };
rgn.srcOffsets[1] = { s32(buffer_width), s32(buffer_height), 1 };
rgn.dstOffsets[0] = { aspect_ratio.x1, aspect_ratio.y1, 0 };
rgn.dstOffsets[1] = { aspect_ratio.x2, aspect_ratio.y2, 1 };
if (target_layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
{
vk::change_image_layout(*m_current_command_buffer, target_image, target_layout, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, subresource_range);
target_layout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
}
m_upscaler->scale_output(*m_current_command_buffer, image_to_flip, target_image, target_layout, rgn, UPSCALE_AND_COMMIT | UPSCALE_DEFAULT_VIEW);
}
if (g_user_asked_for_screenshot || (g_recording_mode != recording_mode::stopped && m_frame->can_consume_frame()))
{
const usz sshot_size = buffer_height * buffer_width * 4;
vk::buffer sshot_vkbuf(*m_device, utils::align(sshot_size, 0x100000), m_device->get_memory_mapping().host_visible_coherent,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, VK_BUFFER_USAGE_TRANSFER_DST_BIT, 0, VMM_ALLOCATION_POOL_UNDEFINED);
VkBufferImageCopy copy_info;
copy_info.bufferOffset = 0;
copy_info.bufferRowLength = 0;
copy_info.bufferImageHeight = 0;
copy_info.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copy_info.imageSubresource.baseArrayLayer = 0;
copy_info.imageSubresource.layerCount = 1;
copy_info.imageSubresource.mipLevel = 0;
copy_info.imageOffset.x = 0;
copy_info.imageOffset.y = 0;
copy_info.imageOffset.z = 0;
copy_info.imageExtent.width = buffer_width;
copy_info.imageExtent.height = buffer_height;
copy_info.imageExtent.depth = 1;
image_to_flip->push_layout(*m_current_command_buffer, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
vk::copy_image_to_buffer(*m_current_command_buffer, image_to_flip, &sshot_vkbuf, copy_info);
image_to_flip->pop_layout(*m_current_command_buffer);
flush_command_queue(true);
auto src = sshot_vkbuf.map(0, sshot_size);
std::vector<u8> sshot_frame(sshot_size);
memcpy(sshot_frame.data(), src, sshot_size);
sshot_vkbuf.unmap();
const bool is_bgra = image_to_flip->format() == VK_FORMAT_B8G8R8A8_UNORM;
if (g_user_asked_for_screenshot.exchange(false))
{
m_frame->take_screenshot(std::move(sshot_frame), buffer_width, buffer_height, is_bgra);
}
else
{
m_frame->present_frame(sshot_frame, buffer_width * 4, buffer_width, buffer_height, is_bgra);
}
}
}
const bool has_overlay = (m_overlay_manager && m_overlay_manager->has_visible());
if (g_cfg.video.overlay || has_overlay)
{
if (target_layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
{
// Change the image layout whilst setting up a dependency on waiting for the blit op to finish before we start writing
VkImageMemoryBarrier barrier = {};
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
barrier.oldLayout = target_layout;
barrier.image = target_image;
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.subresourceRange = subresource_range;
vkCmdPipelineBarrier(*m_current_command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &barrier);
target_layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
}
if (!direct_fbo)
{
const auto key = vk::get_renderpass_key(m_swapchain->get_surface_format());
single_target_pass = vk::get_renderpass(*m_device, key);
ensure(single_target_pass != VK_NULL_HANDLE);
direct_fbo = vk::get_framebuffer(*m_device, m_swapchain_dims.width, m_swapchain_dims.height, VK_FALSE, single_target_pass, m_swapchain->get_surface_format(), target_image);
}
direct_fbo->add_ref();
if (has_overlay)
{
// Lock to avoid modification during run-update chain
auto ui_renderer = vk::get_overlay_pass<vk::ui_overlay_renderer>();
std::lock_guard lock(*m_overlay_manager);
for (const auto& view : m_overlay_manager->get_views())
{
ui_renderer->run(*m_current_command_buffer, areau(aspect_ratio), direct_fbo, single_target_pass, m_texture_upload_buffer_ring_info, *view.get());
}
}
if (g_cfg.video.overlay)
{
const auto num_dirty_textures = m_texture_cache.get_unreleased_textures_count();
const auto texture_memory_size = m_texture_cache.get_texture_memory_in_use() / (1024 * 1024);
const auto tmp_texture_memory_size = m_texture_cache.get_temporary_memory_in_use() / (1024 * 1024);
const auto num_flushes = m_texture_cache.get_num_flush_requests();
const auto num_mispredict = m_texture_cache.get_num_cache_mispredictions();
const auto num_speculate = m_texture_cache.get_num_cache_speculative_writes();
const auto num_misses = m_texture_cache.get_num_cache_misses();
const auto num_unavoidable = m_texture_cache.get_num_unavoidable_hard_faults();
const auto cache_miss_ratio = static_cast<u32>(ceil(m_texture_cache.get_cache_miss_ratio() * 100));
const auto num_texture_upload = m_texture_cache.get_texture_upload_calls_this_frame();
const auto num_texture_upload_miss = m_texture_cache.get_texture_upload_misses_this_frame();
const auto texture_upload_miss_ratio = m_texture_cache.get_texture_upload_miss_percentage();
const auto texture_copies_ellided = m_texture_cache.get_texture_copies_ellided_this_frame();
const auto vertex_cache_hit_count = (info.stats.vertex_cache_request_count - info.stats.vertex_cache_miss_count);
const auto vertex_cache_hit_ratio = info.stats.vertex_cache_request_count
? (vertex_cache_hit_count * 100) / info.stats.vertex_cache_request_count
: 0;
rsx::overlays::set_debug_overlay_text(fmt::format(
"RSX Load: %3d%%\n"
"draw calls: %17d\n"
"submits: %20d\n"
"draw call setup: %12dus\n"
"vertex upload time: %9dus\n"
"texture upload time: %8dus\n"
"draw call execution: %8dus\n"
"submit and flip: %12dus\n"
"Unreleased textures: %8d\n"
"Texture cache memory: %7dM\n"
"Temporary texture memory: %3dM\n"
"Flush requests: %13d = %2d (%3d%%) hard faults, %2d unavoidable, %2d misprediction(s), %2d speculation(s)\n"
"Texture uploads: %12u (%u from CPU - %02u%%, %u copies avoided)\n"
"Vertex cache hits: %10u/%u (%u%%)",
get_load(), info.stats.draw_calls, info.stats.submit_count, info.stats.setup_time, info.stats.vertex_upload_time,
info.stats.textures_upload_time, info.stats.draw_exec_time, info.stats.flip_time,
num_dirty_textures, texture_memory_size, tmp_texture_memory_size,
num_flushes, num_misses, cache_miss_ratio, num_unavoidable, num_mispredict, num_speculate,
num_texture_upload, num_texture_upload_miss, texture_upload_miss_ratio, texture_copies_ellided,
vertex_cache_hit_count, info.stats.vertex_cache_request_count, vertex_cache_hit_ratio)
);
}
direct_fbo->release();
}
if (target_layout != present_layout)
{
vk::change_image_layout(*m_current_command_buffer, target_image, target_layout, present_layout, subresource_range);
}
queue_swap_request();
m_frame_stats.flip_time = m_profiler.duration();
m_frame->flip(m_context);
rsx::thread::flip(info);
}
| 32,668
|
C++
|
.cpp
| 727
| 41.61348
| 196
| 0.723005
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,458
|
VKRenderTargets.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/VKRenderTargets.cpp
|
#include "VKCompute.h"
#include "VKDMA.h"
#include "VKRenderTargets.h"
#include "VKResourceManager.h"
#include "Emu/RSX/rsx_methods.h"
#include "Emu/RSX/RSXThread.h"
#include "Emu/RSX/Common/tiled_dma_copy.hpp"
namespace vk
{
namespace surface_cache_utils
{
void dispose(vk::buffer* buf)
{
auto obj = vk::disposable_t::make(buf);
vk::get_resource_manager()->dispose(obj);
}
}
void surface_cache::destroy()
{
invalidate_all();
invalidated_resources.clear();
}
u64 surface_cache::get_surface_cache_memory_quota(u64 total_device_memory)
{
total_device_memory /= 0x100000;
u64 quota = 0;
if (total_device_memory >= 2048)
{
quota = std::min<u64>(6144, (total_device_memory * 40) / 100);
}
else if (total_device_memory >= 1024)
{
quota = std::max<u64>(512, (total_device_memory * 30) / 100);
}
else if (total_device_memory >= 768)
{
quota = 256;
}
else
{
// Remove upto 128MB but at least aim for half of available VRAM
quota = std::min<u64>(128, total_device_memory / 2);
}
return quota * 0x100000;
}
bool surface_cache::can_collapse_surface(const std::unique_ptr<vk::render_target>& surface, rsx::problem_severity severity)
{
if (severity < rsx::problem_severity::fatal &&
vk::vmm_determine_memory_load_severity() < rsx::problem_severity::fatal)
{
// We may be able to allocate what we need.
return true;
}
// Check if we need to do any allocations. Do not collapse in such a situation otherwise
if (surface->samples() > 1 && !surface->resolve_surface)
{
return false;
}
// Resolve target does exist. Scan through the entire collapse chain
for (auto& region : surface->old_contents)
{
// FIXME: This is just lazy
auto proxy = std::unique_ptr<vk::render_target>(vk::as_rtt(region.source));
const bool collapsible = can_collapse_surface(proxy, severity);
proxy.release();
if (!collapsible)
{
return false;
}
}
return true;
}
bool surface_cache::handle_memory_pressure(vk::command_buffer& cmd, rsx::problem_severity severity)
{
bool any_released = rsx::surface_store<surface_cache_traits>::handle_memory_pressure(cmd, severity);
if (severity >= rsx::problem_severity::fatal)
{
std::vector<std::unique_ptr<vk::viewable_image>> resolve_target_cache;
std::vector<vk::render_target*> deferred_spills;
auto gc = vk::get_resource_manager();
// Drop MSAA resolve/unresolve caches. Only trigger when a hard sync is guaranteed to follow else it will cause even more problems!
// 2-pass to ensure resources are available where they are most needed
auto relieve_memory_pressure = [&](auto& list, const utils::address_range& range)
{
for (auto it = list.begin_range(range); it != list.end(); ++it)
{
auto& rtt = it->second;
if (!rtt->spill_request_tag || rtt->spill_request_tag < rtt->last_rw_access_tag)
{
// We're not going to be spilling into system RAM. If a MSAA resolve target exists, remove it to save memory.
if (rtt->resolve_surface)
{
resolve_target_cache.emplace_back(std::move(rtt->resolve_surface));
rtt->msaa_flags |= rsx::surface_state_flags::require_resolve;
any_released |= true;
}
rtt->spill_request_tag = 0;
continue;
}
if (rtt->resolve_surface || rtt->samples() == 1)
{
// Can spill immediately. Do it.
ensure(rtt->spill(cmd, resolve_target_cache));
any_released |= true;
continue;
}
deferred_spills.push_back(rtt.get());
}
};
// 1. Spill an strip any 'invalidated resources'. At this point it doesn't matter and we donate to the resolve cache which is a plus.
for (auto& surface : invalidated_resources)
{
if (!surface->value && !surface->resolve_surface)
{
// Unspilled resources can have no value but have a resolve surface used for read
continue;
}
// Only spill anything with references. Other surfaces already marked for removal should be inevitably deleted when it is time to free_invalidated
if (surface->has_refs() && (surface->resolve_surface || surface->samples() == 1))
{
ensure(surface->spill(cmd, resolve_target_cache));
any_released |= true;
}
else if (surface->resolve_surface)
{
ensure(!surface->has_refs());
resolve_target_cache.emplace_back(std::move(surface->resolve_surface));
surface->msaa_flags |= rsx::surface_state_flags::require_resolve;
any_released |= true;
}
else if (surface->has_refs())
{
deferred_spills.push_back(surface.get());
}
}
// 2. Scan the list and spill resources that can be spilled immediately if requested. Also gather resources from those that don't need it.
relieve_memory_pressure(m_render_targets_storage, m_render_targets_memory_range);
relieve_memory_pressure(m_depth_stencil_storage, m_depth_stencil_memory_range);
// 3. Write to system heap everything marked to spill
for (auto& surface : deferred_spills)
{
any_released |= surface->spill(cmd, resolve_target_cache);
}
// 4. Cleanup; removes all the resources used up here that are no longer needed for the moment
for (auto& data : resolve_target_cache)
{
gc->dispose(data);
}
}
return any_released;
}
void surface_cache::trim(vk::command_buffer& cmd, rsx::problem_severity memory_pressure)
{
run_cleanup_internal(cmd, rsx::problem_severity::moderate, 300, [](vk::command_buffer& cmd)
{
if (!cmd.is_recording())
{
cmd.begin();
}
});
const u64 last_finished_frame = vk::get_last_completed_frame_id();
invalidated_resources.remove_if([&](std::unique_ptr<vk::render_target>& rtt)
{
ensure(rtt->frame_tag != 0);
if (rtt->has_refs())
{
// Actively in use, likely for a reading pass.
// Call handle_memory_pressure before calling this method.
return false;
}
if (rtt->frame_tag >= last_finished_frame)
{
// RTT itself still in use by the frame.
return false;
}
if (!rtt->old_contents.empty())
{
rtt->clear_rw_barrier();
}
if (rtt->resolve_surface && memory_pressure >= rsx::problem_severity::moderate)
{
// We do not need to keep resolve targets around.
// TODO: We should surrender this to an image cache immediately for reuse.
vk::get_resource_manager()->dispose(rtt->resolve_surface);
}
switch (memory_pressure)
{
case rsx::problem_severity::low:
return (rtt->unused_check_count() >= 2);
case rsx::problem_severity::moderate:
return (rtt->unused_check_count() >= 1);
case rsx::problem_severity::severe:
case rsx::problem_severity::fatal:
// We're almost dead anyway. Remove forcefully.
vk::get_resource_manager()->dispose(rtt);
return true;
default:
fmt::throw_exception("Unreachable");
}
});
}
bool surface_cache::is_overallocated()
{
const auto surface_cache_vram_load = vmm_get_application_pool_usage(VMM_ALLOCATION_POOL_SURFACE_CACHE);
const auto surface_cache_allocation_quota = get_surface_cache_memory_quota(get_current_renderer()->get_memory_mapping().device_local_total_bytes);
return (surface_cache_vram_load > surface_cache_allocation_quota);
}
bool surface_cache::spill_unused_memory()
{
// Determine how much memory we need to save to system RAM if any
const u64 current_surface_cache_memory = vk::vmm_get_application_pool_usage(VMM_ALLOCATION_POOL_SURFACE_CACHE);
const u64 total_device_memory = vk::get_current_renderer()->get_memory_mapping().device_local_total_bytes;
const u64 target_memory = get_surface_cache_memory_quota(total_device_memory);
rsx_log.warning("Surface cache memory usage is %lluM", current_surface_cache_memory / 0x100000);
if (current_surface_cache_memory < target_memory)
{
rsx_log.warning("Surface cache memory usage is very low. Will not spill contents to RAM");
return false;
}
// Very slow, but should only be called when the situation is dire
std::vector<render_target*> sorted_list;
sorted_list.reserve(1024);
auto process_list_function = [&](auto& list, const utils::address_range& range)
{
for (auto it = list.begin_range(range); it != list.end(); ++it)
{
// NOTE: Check if memory is available instead of value in case we ran out of memory during unspill
auto& surface = it->second;
if (surface->memory && !surface->is_bound)
{
sorted_list.push_back(surface.get());
}
}
};
process_list_function(m_render_targets_storage, m_render_targets_memory_range);
process_list_function(m_depth_stencil_storage, m_depth_stencil_memory_range);
std::sort(sorted_list.begin(), sorted_list.end(), FN(x->last_rw_access_tag < y->last_rw_access_tag));
// Remove upto target_memory bytes from VRAM
u64 bytes_spilled = 0;
const u64 bytes_to_remove = current_surface_cache_memory - target_memory;
const u64 spill_time = rsx::get_shared_tag();
for (auto& surface : sorted_list)
{
bytes_spilled += surface->memory->size();
surface->spill_request_tag = spill_time;
if (bytes_spilled >= bytes_to_remove)
{
break;
}
}
rsx_log.warning("Surface cache will attempt to spill %llu bytes.", bytes_spilled);
return (bytes_spilled > 0);
}
// Get the linear resolve target bound to this surface. Initialize if none exists
vk::viewable_image* render_target::get_resolve_target_safe(vk::command_buffer& cmd)
{
if (!resolve_surface)
{
// Create a resolve surface
const auto resolve_w = width() * samples_x;
const auto resolve_h = height() * samples_y;
VkImageUsageFlags usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
usage |= (this->info.usage & (VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT));
resolve_surface.reset(new vk::viewable_image(
*g_render_device,
g_render_device->get_memory_mapping().device_local,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
VK_IMAGE_TYPE_2D,
format(),
resolve_w, resolve_h, 1, 1, 1,
VK_SAMPLE_COUNT_1_BIT,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_TILING_OPTIMAL,
usage,
0,
VMM_ALLOCATION_POOL_SURFACE_CACHE,
format_class()));
resolve_surface->native_component_map = native_component_map;
resolve_surface->change_layout(cmd, VK_IMAGE_LAYOUT_GENERAL);
}
return resolve_surface.get();
}
// Resolve the planar MSAA data into a linear block
void render_target::resolve(vk::command_buffer& cmd)
{
VkImageSubresourceRange range = { aspect(), 0, 1, 0, 1 };
// NOTE: This surface can only be in the ATTACHMENT_OPTIMAL layout
// The resolve surface can be in any type of access, but we have to assume it is likely in read-only mode like shader read-only
if (!is_depth_surface()) [[likely]]
{
// This is the source; finish writing before reading
vk::insert_image_memory_barrier(
cmd, this->value,
this->current_layout, VK_IMAGE_LAYOUT_GENERAL,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
VK_ACCESS_SHADER_READ_BIT,
range);
// This is the target; finish reading before writing
vk::insert_image_memory_barrier(
cmd, resolve_surface->value,
resolve_surface->current_layout, VK_IMAGE_LAYOUT_GENERAL,
VK_PIPELINE_STAGE_TRANSFER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT,
VK_ACCESS_SHADER_WRITE_BIT,
range);
this->current_layout = VK_IMAGE_LAYOUT_GENERAL;
resolve_surface->current_layout = VK_IMAGE_LAYOUT_GENERAL;
}
else
{
this->push_layout(cmd, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
resolve_surface->change_layout(cmd, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
}
vk::resolve_image(cmd, resolve_surface.get(), this);
if (!is_depth_surface()) [[likely]]
{
vk::insert_image_memory_barrier(
cmd, this->value,
this->current_layout, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_ACCESS_SHADER_READ_BIT,
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
range);
vk::insert_image_memory_barrier(
cmd, resolve_surface->value,
VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_ACCESS_SHADER_WRITE_BIT,
VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT,
range);
this->current_layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
resolve_surface->current_layout = VK_IMAGE_LAYOUT_GENERAL;
}
else
{
this->pop_layout(cmd);
resolve_surface->change_layout(cmd, VK_IMAGE_LAYOUT_GENERAL);
}
msaa_flags &= ~(rsx::surface_state_flags::require_resolve);
}
// Unresolve the linear data into planar MSAA data
void render_target::unresolve(vk::command_buffer& cmd)
{
ensure(!(msaa_flags & rsx::surface_state_flags::require_resolve));
VkImageSubresourceRange range = { aspect(), 0, 1, 0, 1 };
if (!is_depth_surface()) [[likely]]
{
ensure(current_layout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
// This is the dest; finish reading before writing
vk::insert_image_memory_barrier(
cmd, this->value,
this->current_layout, VK_IMAGE_LAYOUT_GENERAL,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_ACCESS_SHADER_READ_BIT,
VK_ACCESS_SHADER_WRITE_BIT,
range);
// This is the source; finish writing before reading
vk::insert_image_memory_barrier(
cmd, resolve_surface->value,
resolve_surface->current_layout, VK_IMAGE_LAYOUT_GENERAL,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT,
VK_ACCESS_SHADER_READ_BIT,
range);
this->current_layout = VK_IMAGE_LAYOUT_GENERAL;
resolve_surface->current_layout = VK_IMAGE_LAYOUT_GENERAL;
}
else
{
this->push_layout(cmd, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
resolve_surface->change_layout(cmd, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
}
vk::unresolve_image(cmd, this, resolve_surface.get());
if (!is_depth_surface()) [[likely]]
{
vk::insert_image_memory_barrier(
cmd, this->value,
this->current_layout, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_ACCESS_SHADER_WRITE_BIT,
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT,
range);
vk::insert_image_memory_barrier(
cmd, resolve_surface->value,
VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_ACCESS_SHADER_READ_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT,
range);
this->current_layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
resolve_surface->current_layout = VK_IMAGE_LAYOUT_GENERAL;
}
else
{
this->pop_layout(cmd);
resolve_surface->change_layout(cmd, VK_IMAGE_LAYOUT_GENERAL);
}
msaa_flags &= ~(rsx::surface_state_flags::require_unresolve);
}
// Default-initialize memory without loading
void render_target::clear_memory(vk::command_buffer& cmd, vk::image* surface)
{
const auto optimal_layout = (surface->current_layout == VK_IMAGE_LAYOUT_GENERAL) ?
VK_IMAGE_LAYOUT_GENERAL :
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
surface->push_layout(cmd, optimal_layout);
VkImageSubresourceRange range{ surface->aspect(), 0, 1, 0, 1 };
if (surface->aspect() & VK_IMAGE_ASPECT_COLOR_BIT)
{
VkClearColorValue color = { {0.f, 0.f, 0.f, 1.f} };
vkCmdClearColorImage(cmd, surface->value, surface->current_layout, &color, 1, &range);
}
else
{
VkClearDepthStencilValue clear{ 1.f, 255 };
vkCmdClearDepthStencilImage(cmd, surface->value, surface->current_layout, &clear, 1, &range);
}
surface->pop_layout(cmd);
if (surface == this)
{
state_flags &= ~rsx::surface_state_flags::erase_bkgnd;
}
}
std::vector<VkBufferImageCopy> render_target::build_spill_transfer_descriptors(vk::image* target)
{
std::vector<VkBufferImageCopy> result;
result.reserve(2);
result.push_back({});
auto& rgn = result.back();
rgn.imageExtent.width = target->width();
rgn.imageExtent.height = target->height();
rgn.imageExtent.depth = 1;
rgn.imageSubresource.aspectMask = target->aspect();
rgn.imageSubresource.layerCount = 1;
if (aspect() == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT))
{
result.push_back(rgn);
rgn.imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
result.back().imageSubresource.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
result.back().bufferOffset = target->width() * target->height() * 4;
}
return result;
}
bool render_target::spill(vk::command_buffer& cmd, std::vector<std::unique_ptr<vk::viewable_image>>& resolve_cache)
{
u64 element_size;
switch (const auto fmt = format())
{
case VK_FORMAT_D32_SFLOAT:
element_size = 4;
break;
case VK_FORMAT_D32_SFLOAT_S8_UINT:
case VK_FORMAT_D24_UNORM_S8_UINT:
element_size = 5;
break;
default:
element_size = get_format_texel_width(fmt);
break;
}
vk::viewable_image* src = nullptr;
if (samples() == 1) [[likely]]
{
ensure(value);
src = this;
}
else if (resolve_surface)
{
src = resolve_surface.get();
}
else
{
const auto transfer_w = width() * samples_x;
const auto transfer_h = height() * samples_y;
for (auto& surface : resolve_cache)
{
if (surface->format() == format() &&
surface->width() == transfer_w &&
surface->height() == transfer_h)
{
src = surface.get();
break;
}
}
if (!src)
{
if (vmm_determine_memory_load_severity() <= rsx::problem_severity::moderate)
{
// We have some freedom to allocate something. Add to the shared cache
src = get_resolve_target_safe(cmd);
}
else
{
// TODO: Spill to DMA buf
// For now, just skip this one if we don't have the capacity for it
rsx_log.warning("Could not spill memory due to resolve failure. Will ignore spilling for the moment.");
return false;
}
}
msaa_flags |= rsx::surface_state_flags::require_resolve;
}
// If a resolve is requested, move data to the target
if (msaa_flags & rsx::surface_state_flags::require_resolve)
{
ensure(samples() > 1);
const bool borrowed = [&]()
{
if (src != resolve_surface.get())
{
ensure(!resolve_surface);
resolve_surface.reset(src);
return true;
}
return false;
}();
resolve(cmd);
if (borrowed)
{
resolve_surface.release();
}
}
const auto pdev = vk::get_current_renderer();
const auto alloc_size = element_size * src->width() * src->height();
m_spilled_mem = std::make_unique<vk::buffer>(*pdev, alloc_size, pdev->get_memory_mapping().host_visible_coherent,
0, VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT, 0, VMM_ALLOCATION_POOL_UNDEFINED);
const auto regions = build_spill_transfer_descriptors(src);
src->change_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
vkCmdCopyImageToBuffer(cmd, src->value, src->current_layout, m_spilled_mem->value, ::size32(regions), regions.data());
// Destroy this object through a cloned object
auto obj = std::unique_ptr<viewable_image>(clone());
vk::get_resource_manager()->dispose(obj);
if (resolve_surface)
{
// Just add to the resolve cache and move on
resolve_cache.emplace_back(std::move(resolve_surface));
}
ensure(!memory && !value && views.empty() && !resolve_surface);
spill_request_tag = 0ull;
return true;
}
void render_target::unspill(vk::command_buffer& cmd)
{
// Recreate the image
const auto pdev = vk::get_current_renderer();
create_impl(*pdev, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, pdev->get_memory_mapping().device_local, VMM_ALLOCATION_POOL_SURFACE_CACHE);
change_layout(cmd, is_depth_surface() ? VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL : VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
// Load image from host-visible buffer
ensure(m_spilled_mem);
// Data transfer can be skipped if an erase command is being served
if (!(state_flags & rsx::surface_state_flags::erase_bkgnd))
{
// Warn. Ideally this should never happen if you have enough resources
rsx_log.warning("[PERFORMANCE WARNING] Loading spilled memory back to the GPU. You may want to lower your resolution scaling.");
vk::image* dst = (samples() > 1) ? get_resolve_target_safe(cmd) : this;
const auto regions = build_spill_transfer_descriptors(dst);
dst->change_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
vkCmdCopyBufferToImage(cmd, m_spilled_mem->value, dst->value, dst->current_layout, ::size32(regions), regions.data());
if (samples() > 1)
{
msaa_flags &= ~rsx::surface_state_flags::require_resolve;
msaa_flags |= rsx::surface_state_flags::require_unresolve;
}
}
// Delete host-visible buffer
vk::get_resource_manager()->dispose(m_spilled_mem);
}
// Load memory from cell and use to initialize the surface
void render_target::load_memory(vk::command_buffer& cmd)
{
auto& upload_heap = *vk::get_upload_heap();
const bool is_swizzled = (raster_type == rsx::surface_raster_type::swizzle);
rsx::subresource_layout subres{};
subres.width_in_block = subres.width_in_texel = surface_width * samples_x;
subres.height_in_block = subres.height_in_texel = surface_height * samples_y;
subres.pitch_in_block = rsx_pitch / get_bpp();
subres.depth = 1;
subres.data = { vm::get_super_ptr<const std::byte>(base_addr), static_cast<std::span<const std::byte>::size_type>(rsx_pitch * surface_height * samples_y) };
const auto range = get_memory_range();
rsx::flags32_t upload_flags = upload_contents_inline;
u32 heap_align = rsx_pitch;
#if DEBUG_DMA_TILING
std::vector<u8> ext_data;
#endif
if (auto tiled_region = rsx::get_current_renderer()->get_tiled_memory_region(range))
{
#if DEBUG_DMA_TILING
auto real_data = vm::get_super_ptr<u8>(range.start);
ext_data.resize(tiled_region.tile->size);
auto detile_func = get_bpp() == 4
? rsx::detile_texel_data32
: rsx::detile_texel_data16;
detile_func(
ext_data.data(),
real_data,
tiled_region.base_address,
range.start - tiled_region.base_address,
tiled_region.tile->size,
tiled_region.tile->bank,
tiled_region.tile->pitch,
subres.width_in_block,
subres.height_in_block
);
subres.data = std::span(ext_data);
#else
const auto [scratch_buf, linear_data_scratch_offset] = vk::detile_memory_block(cmd, tiled_region, range, subres.width_in_block, subres.height_in_block, get_bpp());
// FIXME: !!EVIL!!
subres.data = { scratch_buf, linear_data_scratch_offset };
subres.pitch_in_block = subres.width_in_block;
upload_flags |= source_is_gpu_resident;
heap_align = subres.width_in_block * get_bpp();
#endif
}
if (g_cfg.video.resolution_scale_percent == 100 && spp == 1) [[likely]]
{
push_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
vk::upload_image(cmd, this, { subres }, get_gcm_format(), is_swizzled, 1, aspect(), upload_heap, heap_align, upload_flags);
pop_layout(cmd);
}
else
{
vk::image* content = nullptr;
vk::image* final_dst = (samples() > 1) ? get_resolve_target_safe(cmd) : this;
// Prepare dst image
final_dst->push_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
if (final_dst->width() == subres.width_in_block && final_dst->height() == subres.height_in_block)
{
// Possible if MSAA is enabled with 100% resolution scale or
// surface dimensions are less than resolution scale threshold and no MSAA.
// Writethrough.
content = final_dst;
}
else
{
content = vk::get_typeless_helper(format(), format_class(), subres.width_in_block, subres.height_in_block);
if (content->current_layout == VK_IMAGE_LAYOUT_UNDEFINED)
{
content->change_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
}
content->push_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
}
// Load Cell data into temp buffer
vk::upload_image(cmd, content, { subres }, get_gcm_format(), is_swizzled, 1, aspect(), upload_heap, heap_align, upload_flags);
// Write into final image
if (content != final_dst)
{
// Avoid layout push/pop on scratch memory by setting explicit layout here
content->pop_layout(cmd);
content->push_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
vk::copy_scaled_image(cmd, content, final_dst,
{ 0, 0, subres.width_in_block, subres.height_in_block },
{ 0, 0, static_cast<s32>(final_dst->width()), static_cast<s32>(final_dst->height()) },
1, true, aspect() == VK_IMAGE_ASPECT_COLOR_BIT ? VK_FILTER_LINEAR : VK_FILTER_NEAREST);
content->pop_layout(cmd);
}
final_dst->pop_layout(cmd);
if (samples() > 1)
{
// Trigger unresolve
msaa_flags = rsx::surface_state_flags::require_unresolve;
}
}
state_flags &= ~rsx::surface_state_flags::erase_bkgnd;
}
void render_target::initialize_memory(vk::command_buffer& cmd, rsx::surface_access access)
{
const bool is_depth = is_depth_surface();
const bool should_read_buffers = is_depth ? !!g_cfg.video.read_depth_buffer : !!g_cfg.video.read_color_buffers;
if (!should_read_buffers)
{
clear_memory(cmd, this);
if (samples() > 1 && access.is_transfer_or_read())
{
// Only clear the resolve surface if reading from it, otherwise it's a waste
clear_memory(cmd, get_resolve_target_safe(cmd));
}
msaa_flags = rsx::surface_state_flags::ready;
}
else
{
load_memory(cmd);
}
}
vk::viewable_image* render_target::get_surface(rsx::surface_access access_type)
{
last_rw_access_tag = rsx::get_shared_tag();
if (samples() == 1 || !access_type.is_transfer())
{
return this;
}
// A read barrier should have been called before this!
ensure(resolve_surface); // "Read access without explicit barrier"
ensure(!(msaa_flags & rsx::surface_state_flags::require_resolve));
return resolve_surface.get();
}
bool render_target::is_depth_surface() const
{
return !!(aspect() & VK_IMAGE_ASPECT_DEPTH_BIT);
}
bool render_target::matches_dimensions(u16 _width, u16 _height) const
{
// Use forward scaling to account for rounding and clamping errors
const auto [scaled_w, scaled_h] = rsx::apply_resolution_scale<true>(_width, _height);
return (scaled_w == width()) && (scaled_h == height());
}
void render_target::texture_barrier(vk::command_buffer& cmd)
{
const auto is_framebuffer_read_only = is_depth_surface() && !rsx::method_registers.depth_write_enabled();
const auto supports_fbo_loops = cmd.get_command_pool().get_owner().get_framebuffer_loops_support();
const auto optimal_layout = supports_fbo_loops ? VK_IMAGE_LAYOUT_ATTACHMENT_FEEDBACK_LOOP_OPTIMAL_EXT
: VK_IMAGE_LAYOUT_GENERAL;
if (m_cyclic_ref_tracker.can_skip() && current_layout == optimal_layout && is_framebuffer_read_only)
{
// If we have back-to-back depth-read barriers, skip subsequent ones
// If an actual write is happening, this flag will be automatically reset
return;
}
vk::insert_texture_barrier(cmd, this, optimal_layout);
m_cyclic_ref_tracker.on_insert_texture_barrier();
if (is_framebuffer_read_only)
{
m_cyclic_ref_tracker.allow_skip();
}
}
void render_target::post_texture_barrier(vk::command_buffer& cmd)
{
// This is a fall-out barrier after a cyclic ref when the same surface is still bound.
// In this case, we're just checking that the previous read completes before the next write.
const bool is_framebuffer_read_only = is_depth_surface() && !rsx::method_registers.depth_write_enabled();
if (m_cyclic_ref_tracker.can_skip() && is_framebuffer_read_only)
{
// Barrier ellided if triggered by a chain of cyclic references with no actual writes
m_cyclic_ref_tracker.reset();
return;
}
VkPipelineStageFlags src_stage, dst_stage;
VkAccessFlags src_access, dst_access;
if (!is_depth_surface()) [[likely]]
{
src_stage = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
dst_stage = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
src_access = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
dst_access = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
}
else
{
src_stage = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
dst_stage = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
src_access = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
dst_access = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
}
vk::insert_image_memory_barrier(cmd, value, current_layout, current_layout,
src_stage, dst_stage, src_access, dst_access, { aspect(), 0, 1, 0, 1 });
m_cyclic_ref_tracker.reset();
}
void render_target::reset_surface_counters()
{
frame_tag = 0;
m_cyclic_ref_tracker.reset();
}
image_view* render_target::get_view(const rsx::texture_channel_remap_t& remap, VkImageAspectFlags mask)
{
if (remap.encoded == VK_REMAP_VIEW_MULTISAMPLED)
{
// Special remap flag, intercept here
return vk::viewable_image::get_view(remap.with_encoding(VK_REMAP_IDENTITY), mask);
}
return vk::viewable_image::get_view(remap, mask);
}
void render_target::memory_barrier(vk::command_buffer& cmd, rsx::surface_access access)
{
if (access == rsx::surface_access::gpu_reference)
{
// This barrier only requires that an object is made available for GPU usage.
if (!value)
{
unspill(cmd);
}
spill_request_tag = 0;
return;
}
const bool is_depth = is_depth_surface();
const bool should_read_buffers = is_depth ? !!g_cfg.video.read_depth_buffer : !!g_cfg.video.read_color_buffers;
if (should_read_buffers)
{
// TODO: Decide what to do when memory loads are disabled but the underlying has memory changed
// NOTE: Assume test() is expensive when in a pinch
if (last_use_tag && state_flags == rsx::surface_state_flags::ready && !test())
{
// TODO: Figure out why merely returning and failing the test does not work when reading (TLoU)
// The result should have been the same either way
state_flags |= rsx::surface_state_flags::erase_bkgnd;
}
}
// Unspill here, because erase flag may have been set above.
if (!value)
{
unspill(cmd);
}
if (access == rsx::surface_access::shader_write && m_cyclic_ref_tracker.is_enabled())
{
if (current_layout == VK_IMAGE_LAYOUT_GENERAL || current_layout == VK_IMAGE_LAYOUT_ATTACHMENT_FEEDBACK_LOOP_OPTIMAL_EXT)
{
// Flag draw barrier observed
m_cyclic_ref_tracker.on_insert_draw_barrier();
// Check if we've had more draws than barriers so far (fall-out condition)
if (m_cyclic_ref_tracker.requires_post_loop_barrier())
{
post_texture_barrier(cmd);
}
}
else
{
// Layouts changed elsewhere. Reset.
m_cyclic_ref_tracker.reset();
}
}
if (old_contents.empty()) [[likely]]
{
if (state_flags & rsx::surface_state_flags::erase_bkgnd)
{
// NOTE: This step CAN introduce MSAA flags!
initialize_memory(cmd, access);
ensure(state_flags == rsx::surface_state_flags::ready);
on_write(rsx::get_shared_tag(), static_cast<rsx::surface_state_flags>(msaa_flags));
}
if (msaa_flags & rsx::surface_state_flags::require_resolve)
{
if (access.is_transfer())
{
// Only do this step when read access is required
get_resolve_target_safe(cmd);
resolve(cmd);
}
}
else if (msaa_flags & rsx::surface_state_flags::require_unresolve)
{
if (access == rsx::surface_access::shader_write)
{
// Only do this step when it is needed to start rendering
ensure(resolve_surface);
unresolve(cmd);
}
}
return;
}
// Memory transfers
vk::image* target_image = (samples() > 1) ? get_resolve_target_safe(cmd) : this;
vk::blitter hw_blitter;
const auto dst_bpp = get_bpp();
unsigned first = prepare_rw_barrier_for_transfer(this);
const bool accept_all = (last_use_tag && test());
bool optimize_copy = true;
u64 newest_tag = 0;
for (auto i = first; i < old_contents.size(); ++i)
{
auto& section = old_contents[i];
auto src_texture = static_cast<vk::render_target*>(section.source);
src_texture->memory_barrier(cmd, rsx::surface_access::transfer_read);
if (!accept_all && !src_texture->test()) [[likely]]
{
// If this surface is intact, accept all incoming data as it is guaranteed to be safe
// If this surface has not been initialized or is dirty, do not add more dirty data to it
continue;
}
const auto src_bpp = src_texture->get_bpp();
rsx::typeless_xfer typeless_info{};
if (src_texture->aspect() != aspect() ||
!formats_are_bitcast_compatible(this, src_texture))
{
typeless_info.src_is_typeless = true;
typeless_info.src_context = rsx::texture_upload_context::framebuffer_storage;
typeless_info.src_native_format_override = static_cast<u32>(info.format);
typeless_info.src_gcm_format = src_texture->get_gcm_format();
typeless_info.src_scaling_hint = f32(src_bpp) / dst_bpp;
}
section.init_transfer(this);
auto src_area = section.src_rect();
auto dst_area = section.dst_rect();
if (g_cfg.video.antialiasing_level != msaa_level::none)
{
src_texture->transform_pixels_to_samples(src_area);
this->transform_pixels_to_samples(dst_area);
}
bool memory_load = true;
if (dst_area.x1 == 0 && dst_area.y1 == 0 &&
unsigned(dst_area.x2) == target_image->width() && unsigned(dst_area.y2) == target_image->height())
{
// Skip a bunch of useless work
state_flags &= ~(rsx::surface_state_flags::erase_bkgnd);
msaa_flags = rsx::surface_state_flags::ready;
memory_load = false;
stencil_init_flags = src_texture->stencil_init_flags;
}
else if (state_flags & rsx::surface_state_flags::erase_bkgnd)
{
// Might introduce MSAA flags
initialize_memory(cmd, rsx::surface_access::memory_write);
ensure(state_flags == rsx::surface_state_flags::ready);
}
if (msaa_flags & rsx::surface_state_flags::require_resolve)
{
// Need to forward resolve this
resolve(cmd);
}
if (samples() > 1)
{
// Ensure a writable surface exists for this surface
get_resolve_target_safe(cmd);
}
if (src_texture->samples() > 1)
{
// Ensure a readable surface exists for the source
src_texture->get_resolve_target_safe(cmd);
}
hw_blitter.scale_image(
cmd,
src_texture->get_surface(rsx::surface_access::transfer_read),
this->get_surface(rsx::surface_access::transfer_write),
src_area,
dst_area,
/*linear?*/false, typeless_info);
optimize_copy = optimize_copy && !memory_load;
newest_tag = src_texture->last_use_tag;
}
if (!newest_tag) [[unlikely]]
{
// Underlying memory has been modified and we could not find valid data to fill it
clear_rw_barrier();
state_flags |= rsx::surface_state_flags::erase_bkgnd;
initialize_memory(cmd, access);
ensure(state_flags == rsx::surface_state_flags::ready);
}
// NOTE: Optimize flag relates to stencil resolve/unresolve for NVIDIA.
on_write_copy(newest_tag, optimize_copy);
if (access == rsx::surface_access::shader_write && samples() > 1)
{
// Write barrier, must initialize
unresolve(cmd);
}
}
}
| 35,844
|
C++
|
.cpp
| 949
| 33.771338
| 166
| 0.697725
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,459
|
fsr_pass.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/upscalers/fsr1/fsr_pass.cpp
|
#include "../../vkutils/barriers.h"
#include "../../VKHelpers.h"
#include "../../VKResourceManager.h"
#include "../fsr_pass.h"
#if defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wignored-qualifiers"
#pragma GCC diagnostic ignored "-Wold-style-cast"
#pragma GCC diagnostic ignored "-Wunused-function"
#elif defined(__clang__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wignored-qualifiers"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wunused-function"
#endif
#define A_CPU 1
#include "3rdparty/GPUOpen/include/ffx_a.h"
#include "3rdparty/GPUOpen/include/ffx_fsr1.h"
#undef A_CPU
#if defined(__GNUC__)
#pragma GCC diagnostic pop
#elif defined(__clang__)
#pragma clang diagnostic pop
#endif
namespace vk
{
namespace FidelityFX
{
fsr_pass::fsr_pass(const std::string& config_definitions, u32 push_constants_size_)
{
// Just use AMD-provided source with minimal modification
const char* shader_core =
#include "Emu/RSX/Program/Upscalers/FSR1/fsr_ubershader.glsl"
;
// Replacements
const char* ffx_a_contents =
#include "Emu/RSX/Program/Upscalers/FSR1/fsr_ffx_a_flattened.inc"
;
const char* ffx_fsr_contents =
#include "Emu/RSX/Program/Upscalers/FSR1/fsr_ffx_fsr1_flattened.inc"
;
const std::pair<std::string_view, std::string> replacement_table[] =
{
{ "%FFX_DEFINITIONS%", config_definitions },
{ "%FFX_A_IMPORT%", ffx_a_contents },
{ "%FFX_FSR_IMPORT%", ffx_fsr_contents },
{ "%push_block%", "push_constant" }
};
m_src = shader_core;
m_src = fmt::replace_all(m_src, replacement_table);
// Fill with 0 to avoid sending incomplete/unused variables to the GPU
memset(m_constants_buf, 0, sizeof(m_constants_buf));
// Enable push constants
use_push_constants = true;
push_constants_size = push_constants_size_;
create();
}
std::vector<std::pair<VkDescriptorType, u8>> fsr_pass::get_descriptor_layout()
{
return
{
{ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1 },
{ VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1 }
};
}
void fsr_pass::declare_inputs()
{
std::vector<vk::glsl::program_input> inputs =
{
{
::glsl::program_domain::glsl_compute_program,
vk::glsl::program_input_type::input_type_texture,
{}, {},
0,
"InputTexture"
},
{
::glsl::program_domain::glsl_compute_program,
vk::glsl::program_input_type::input_type_texture,
{}, {},
1,
"OutputTexture"
}
};
m_program->load_uniforms(inputs);
}
void fsr_pass::bind_resources()
{
// Bind relevant stuff
if (!m_sampler)
{
const auto pdev = vk::get_current_renderer();
m_sampler = std::make_unique<vk::sampler>(*pdev,
VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
VK_FALSE, 0.f, 1.f, 0.f, 0.f, VK_FILTER_LINEAR, VK_FILTER_LINEAR, VK_SAMPLER_MIPMAP_MODE_NEAREST, VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK);
}
m_program->bind_uniform({ m_sampler->value, m_input_image->value, m_input_image->image()->current_layout }, "InputTexture", VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, m_descriptor_set);
m_program->bind_uniform({ VK_NULL_HANDLE, m_output_image->value, m_output_image->image()->current_layout }, "OutputTexture", VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, m_descriptor_set);
}
void fsr_pass::run(const vk::command_buffer& cmd, vk::viewable_image* src, vk::viewable_image* dst, const size2u& input_size, const size2u& output_size)
{
m_input_image = src->get_view(rsx::default_remap_vector.with_encoding(VK_REMAP_IDENTITY));
m_output_image = dst->get_view(rsx::default_remap_vector.with_encoding(VK_REMAP_IDENTITY));
m_input_size = input_size;
m_output_size = output_size;
configure(cmd);
constexpr auto wg_size = 16;
const auto invocations_x = utils::aligned_div(output_size.width, wg_size);
const auto invocations_y = utils::aligned_div(output_size.height, wg_size);
ensure(invocations_x == (output_size.width + (wg_size - 1)) / wg_size);
ensure(invocations_y == (output_size.height + (wg_size - 1)) / wg_size);
compute_task::run(cmd, invocations_x, invocations_y, 1);
}
easu_pass::easu_pass()
: fsr_pass(
"#define SAMPLE_EASU 1\n"
"#define SAMPLE_RCAS 0\n"
"#define SAMPLE_BILINEAR 0\n"
"#define SAMPLE_SLOW_FALLBACK 1",
80 // 5*VEC4
)
{}
void easu_pass::configure(const vk::command_buffer& cmd)
{
auto src_image = m_input_image->image();
// NOTE: Configuration vector 4 is unused as we do not support HDR natively
auto con0 = &m_constants_buf[0];
auto con1 = &m_constants_buf[4];
auto con2 = &m_constants_buf[8];
auto con3 = &m_constants_buf[12];
FsrEasuCon(con0, con1, con2, con3,
static_cast<f32>(m_input_size.width), static_cast<f32>(m_input_size.height), // Incoming viewport size to upscale (actual size)
static_cast<f32>(src_image->width()), static_cast<f32>(src_image->height()), // Size of the raw image to upscale (in case viewport does not cover it all)
static_cast<f32>(m_output_size.width), static_cast<f32>(m_output_size.height)); // Size of output viewport (target size)
vkCmdPushConstants(cmd, m_pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, push_constants_size, m_constants_buf);
}
rcas_pass::rcas_pass()
: fsr_pass(
"#define SAMPLE_RCAS 1\n"
"#define SAMPLE_EASU 0\n"
"#define SAMPLE_BILINEAR 0\n"
"#define SAMPLE_SLOW_FALLBACK 1",
32 // 2*VEC4
)
{}
void rcas_pass::configure(const vk::command_buffer& cmd)
{
// 0 is actually the sharpest with 2 being the chosen limit. Each progressive unit 'halves' the sharpening intensity.
auto cas_attenuation = 2.f - (g_cfg.video.vk.rcas_sharpening_intensity / 50.f);
FsrRcasCon(&m_constants_buf[0], cas_attenuation);
vkCmdPushConstants(cmd, m_pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, push_constants_size, m_constants_buf);
}
} // Namespace FidelityFX
void fsr_upscale_pass::dispose_images()
{
auto safe_delete = [](auto& data)
{
if (data && data->value)
{
vk::get_resource_manager()->dispose(data);
}
else if (data)
{
data.reset();
}
};
safe_delete(m_output_left);
safe_delete(m_output_right);
safe_delete(m_intermediate_data);
}
void fsr_upscale_pass::initialize_image(u32 output_w, u32 output_h, rsx::flags32_t mode)
{
dispose_images();
const auto pdev = vk::get_current_renderer();
auto initialize_image_impl = [pdev, output_w, output_h](VkImageUsageFlags usage, VkFormat format)
{
return std::make_unique<vk::viewable_image>(
*pdev, // Owner
pdev->get_memory_mapping().device_local, // Must be in device optimal memory
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
VK_IMAGE_TYPE_2D,
format,
output_w, output_h, 1, 1, 1, VK_SAMPLE_COUNT_1_BIT, // Dimensions (w, h, d, mips, layers, samples)
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_TILING_OPTIMAL,
usage,
VK_IMAGE_CREATE_ALLOW_NULL_RPCS3, // Allow creation to fail if there is no memory
VMM_ALLOCATION_POOL_SWAPCHAIN,
RSX_FORMAT_CLASS_COLOR);
};
const VkFlags usage_mask_output = VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
const VkFlags usage_mask_intermediate = VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
bool failed = true;
VkFormat data_format = VK_FORMAT_UNDEFINED;
// Check if it is possible to actually write to the format we want.
// Fallback to RGBA8 is supported as well
std::array<VkFormat, 2> supported_formats = { VK_FORMAT_B8G8R8A8_UNORM, VK_FORMAT_R8G8B8A8_UNORM };
for (const auto& format : supported_formats)
{
const VkFlags all_required_bits = VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT | VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT | VK_FORMAT_FEATURE_TRANSFER_SRC_BIT;
if ((pdev->get_format_properties(format).optimalTilingFeatures & all_required_bits) == all_required_bits)
{
data_format = format;
failed = false;
break;
}
}
if ((mode & UPSCALE_LEFT_VIEW) && !failed)
{
m_output_left = initialize_image_impl(usage_mask_output, data_format);
failed |= (m_output_left->value == VK_NULL_HANDLE);
}
if ((mode & UPSCALE_RIGHT_VIEW) && !failed)
{
m_output_right = initialize_image_impl(usage_mask_output, data_format);
failed |= (m_output_right->value == VK_NULL_HANDLE);
}
if (!failed)
{
m_intermediate_data = initialize_image_impl(usage_mask_intermediate, data_format);
failed |= (m_intermediate_data->value == VK_NULL_HANDLE);
}
if (failed)
{
if (data_format != VK_FORMAT_UNDEFINED)
{
dispose_images();
rsx_log.warning("FSR is enabled, but the system is out of memory. Will fall back to bilinear upscaling.");
}
else
{
ensure(!m_output_left && !m_output_right && !m_intermediate_data);
rsx_log.error("FSR is not supported by this driver and hardware combination.");
}
}
}
vk::viewable_image* fsr_upscale_pass::scale_output(
const vk::command_buffer& cmd,
vk::viewable_image* src,
VkImage present_surface,
VkImageLayout present_surface_layout,
const VkImageBlit& request,
rsx::flags32_t mode)
{
size2u input_size, output_size;
input_size.width = std::abs(request.srcOffsets[1].x - request.srcOffsets[0].x);
input_size.height = std::abs(request.srcOffsets[1].y - request.srcOffsets[0].y);
output_size.width = std::abs(request.dstOffsets[1].x - request.dstOffsets[0].x);
output_size.height = std::abs(request.dstOffsets[1].y - request.dstOffsets[0].y);
auto src_image = src;
auto target_image = present_surface;
auto target_image_layout = present_surface_layout;
auto output_request = request;
if (input_size.width < output_size.width && input_size.height < output_size.height)
{
// Cannot upscale both LEFT and RIGHT images at the same time.
// Default maps to LEFT for simplicity
ensure((mode & (UPSCALE_LEFT_VIEW | UPSCALE_RIGHT_VIEW)) != (UPSCALE_LEFT_VIEW | UPSCALE_RIGHT_VIEW));
auto& m_output_data = (mode & UPSCALE_LEFT_VIEW) ? m_output_left : m_output_right;
if (!m_output_data || m_output_data->width() != output_size.width || m_output_data->height() != output_size.height)
{
initialize_image(output_size.width, output_size.height, mode);
}
if (m_output_data)
{
// Execute the pass here
auto cs_easu_task = vk::get_compute_task<vk::FidelityFX::easu_pass>();
auto cs_rcas_task = vk::get_compute_task<vk::FidelityFX::rcas_pass>();
// Prepare for EASU pass
src->push_layout(cmd, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
if (m_intermediate_data->current_layout != VK_IMAGE_LAYOUT_GENERAL)
{
m_intermediate_data->change_layout(cmd, VK_IMAGE_LAYOUT_GENERAL);
}
else
{
// R/W CS-CS barrier in case of back-to-back upscales
vk::insert_image_memory_barrier(cmd,
m_intermediate_data->value,
m_intermediate_data->current_layout, m_intermediate_data->current_layout,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_ACCESS_SHADER_READ_BIT,
VK_ACCESS_SHADER_WRITE_BIT,
{ VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 });
}
// EASU
cs_easu_task->run(cmd, src, m_intermediate_data.get(), input_size, output_size);
// Prepare for RCAS pass
m_output_data->change_layout(cmd, VK_IMAGE_LAYOUT_GENERAL);
// R/W CS-CS barrier before RCAS
vk::insert_image_memory_barrier(cmd,
m_intermediate_data->value,
m_intermediate_data->current_layout, m_intermediate_data->current_layout,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_ACCESS_SHADER_WRITE_BIT,
VK_ACCESS_SHADER_READ_BIT,
{ VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 });
// RCAS
cs_rcas_task->run(cmd, m_intermediate_data.get(), m_output_data.get(), input_size, output_size);
// Cleanup
src->pop_layout(cmd);
// Swap input for FSR target
src_image = m_output_data.get();
// Update output parameters to match expected output
if (mode & UPSCALE_AND_COMMIT)
{
// Explicit CS-Transfer barrier
vk::insert_image_memory_barrier(cmd,
m_output_data->value,
m_output_data->current_layout, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_ACCESS_SHADER_WRITE_BIT,
VK_ACCESS_TRANSFER_READ_BIT,
{ VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 });
m_output_data->current_layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
output_request.srcOffsets[0].x = 0;
output_request.srcOffsets[1].x = output_size.width;
output_request.srcOffsets[0].y = 0;
output_request.srcOffsets[1].y = output_size.height;
// Preserve mirroring/flipping
if (request.srcOffsets[0].x > request.srcOffsets[1].x)
{
std::swap(output_request.srcOffsets[0].x, output_request.srcOffsets[1].x);
}
if (request.srcOffsets[0].y > request.srcOffsets[1].y)
{
std::swap(output_request.srcOffsets[0].y, output_request.srcOffsets[1].y);
}
}
}
}
if (mode & UPSCALE_AND_COMMIT)
{
src_image->push_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
vkCmdBlitImage(cmd, src_image->value, src_image->current_layout, target_image, target_image_layout, 1, &output_request, VK_FILTER_LINEAR);
src_image->pop_layout(cmd);
return nullptr;
}
return src_image;
}
}
| 13,999
|
C++
|
.cpp
| 344
| 35.305233
| 189
| 0.670287
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,460
|
sync.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/sync.cpp
|
#include "barriers.h"
#include "buffer_object.h"
#include "commands.h"
#include "device.h"
#include "garbage_collector.h"
#include "sync.h"
#include "shared.h"
#include "Emu/Cell/timers.hpp"
#include "util/sysinfo.hpp"
#include "util/asm.hpp"
namespace vk
{
namespace globals
{
static std::unique_ptr<gpu_debug_marker_pool> g_gpu_debug_marker_pool;
static std::unique_ptr<gpu_label_pool> g_gpu_label_pool;
gpu_debug_marker_pool& get_shared_marker_pool(const vk::render_device& dev)
{
if (!g_gpu_debug_marker_pool)
{
g_gpu_debug_marker_pool = std::make_unique<gpu_debug_marker_pool>(dev, 65536);
vk::get_gc()->add_exit_callback([]()
{
g_gpu_debug_marker_pool.reset();
});
}
return *g_gpu_debug_marker_pool;
}
gpu_label_pool& get_shared_label_pool(const vk::render_device& dev)
{
if (!g_gpu_label_pool)
{
g_gpu_label_pool = std::make_unique<gpu_label_pool>(dev, 65536);
vk::get_gc()->add_exit_callback([]()
{
g_gpu_label_pool.reset();
});
}
return *g_gpu_label_pool;
}
}
// Util
namespace v1_utils
{
VkPipelineStageFlags gather_src_stages(const VkDependencyInfoKHR& dependency)
{
VkPipelineStageFlags stages = VK_PIPELINE_STAGE_NONE;
for (u32 i = 0; i < dependency.bufferMemoryBarrierCount; ++i)
{
stages |= dependency.pBufferMemoryBarriers[i].srcStageMask;
}
for (u32 i = 0; i < dependency.imageMemoryBarrierCount; ++i)
{
stages |= dependency.pImageMemoryBarriers[i].srcStageMask;
}
for (u32 i = 0; i < dependency.memoryBarrierCount; ++i)
{
stages |= dependency.pMemoryBarriers[i].srcStageMask;
}
return stages;
}
VkPipelineStageFlags gather_dst_stages(const VkDependencyInfoKHR& dependency)
{
VkPipelineStageFlags stages = VK_PIPELINE_STAGE_NONE;
for (u32 i = 0; i < dependency.bufferMemoryBarrierCount; ++i)
{
stages |= dependency.pBufferMemoryBarriers[i].dstStageMask;
}
for (u32 i = 0; i < dependency.imageMemoryBarrierCount; ++i)
{
stages |= dependency.pImageMemoryBarriers[i].dstStageMask;
}
for (u32 i = 0; i < dependency.memoryBarrierCount; ++i)
{
stages |= dependency.pMemoryBarriers[i].dstStageMask;
}
return stages;
}
auto get_memory_barriers(const VkDependencyInfoKHR& dependency)
{
std::vector<VkMemoryBarrier> result;
for (u32 i = 0; i < dependency.memoryBarrierCount; ++i)
{
result.push_back
({
VK_STRUCTURE_TYPE_MEMORY_BARRIER,
nullptr,
static_cast<VkAccessFlags>(dependency.pMemoryBarriers[i].srcAccessMask),
static_cast<VkAccessFlags>(dependency.pMemoryBarriers[i].dstAccessMask)
});
}
return result;
}
auto get_image_memory_barriers(const VkDependencyInfoKHR& dependency)
{
std::vector<VkImageMemoryBarrier> result;
for (u32 i = 0; i < dependency.imageMemoryBarrierCount; ++i)
{
result.push_back
({
VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
nullptr,
static_cast<VkAccessFlags>(dependency.pImageMemoryBarriers[i].srcAccessMask),
static_cast<VkAccessFlags>(dependency.pImageMemoryBarriers[i].dstAccessMask),
dependency.pImageMemoryBarriers[i].oldLayout,
dependency.pImageMemoryBarriers[i].newLayout,
dependency.pImageMemoryBarriers[i].srcQueueFamilyIndex,
dependency.pImageMemoryBarriers[i].dstQueueFamilyIndex,
dependency.pImageMemoryBarriers[i].image,
dependency.pImageMemoryBarriers[i].subresourceRange
});
}
return result;
}
auto get_buffer_memory_barriers(const VkDependencyInfoKHR& dependency)
{
std::vector<VkBufferMemoryBarrier> result;
for (u32 i = 0; i < dependency.bufferMemoryBarrierCount; ++i)
{
result.push_back
({
VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
nullptr,
static_cast<VkAccessFlags>(dependency.pBufferMemoryBarriers[i].srcAccessMask),
static_cast<VkAccessFlags>(dependency.pBufferMemoryBarriers[i].dstAccessMask),
dependency.pBufferMemoryBarriers[i].srcQueueFamilyIndex,
dependency.pBufferMemoryBarriers[i].dstQueueFamilyIndex,
dependency.pBufferMemoryBarriers[i].buffer,
dependency.pBufferMemoryBarriers[i].offset,
dependency.pBufferMemoryBarriers[i].size
});
}
return result;
}
}
// Objects
fence::fence(VkDevice dev)
{
owner = dev;
VkFenceCreateInfo info = {};
info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
CHECK_RESULT(vkCreateFence(dev, &info, nullptr, &handle));
}
fence::~fence()
{
if (handle)
{
vkDestroyFence(owner, handle, nullptr);
handle = VK_NULL_HANDLE;
}
}
void fence::reset()
{
vkResetFences(owner, 1, &handle);
flushed.release(false);
}
void fence::signal_flushed()
{
flushed.release(true);
}
void fence::wait_flush()
{
while (!flushed)
{
utils::pause();
}
}
fence::operator bool() const
{
return (handle != VK_NULL_HANDLE);
}
semaphore::semaphore(const render_device& dev)
: m_device(dev)
{
VkSemaphoreCreateInfo info{};
info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
CHECK_RESULT(vkCreateSemaphore(m_device, &info, nullptr, &m_handle));
}
semaphore::~semaphore()
{
vkDestroySemaphore(m_device, m_handle, nullptr);
}
semaphore::operator VkSemaphore() const
{
return m_handle;
}
event::event(const render_device& dev, sync_domain domain)
: m_device(&dev), m_domain(domain)
{
m_backend = dev.get_synchronization2_support()
? sync_backend::events_v2
: sync_backend::events_v1;
if (domain == sync_domain::host &&
vk::get_driver_vendor() == vk::driver_vendor::AMD &&
vk::get_chip_family() < vk::chip_class::AMD_navi1x)
{
// Events don't work quite right on AMD drivers
m_backend = sync_backend::gpu_label;
m_label = std::make_unique<vk::gpu_label>(globals::get_shared_label_pool(dev));
return;
}
VkEventCreateInfo info
{
.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO,
.pNext = nullptr,
.flags = 0
};
if (domain == sync_domain::gpu && m_backend == sync_backend::events_v2)
{
info.flags = VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR;
}
CHECK_RESULT(vkCreateEvent(dev, &info, nullptr, &m_vk_event));
}
event::~event()
{
if (m_vk_event) [[likely]]
{
vkDestroyEvent(*m_device, m_vk_event, nullptr);
}
}
void event::resolve_dependencies(const command_buffer& cmd, const VkDependencyInfoKHR& dependency)
{
ensure(m_backend != sync_backend::gpu_label);
if (m_backend == sync_backend::events_v2)
{
m_device->_vkCmdPipelineBarrier2KHR(cmd, &dependency);
return;
}
const auto src_stages = v1_utils::gather_src_stages(dependency);
const auto dst_stages = v1_utils::gather_dst_stages(dependency);
const auto memory_barriers = v1_utils::get_memory_barriers(dependency);
const auto image_memory_barriers = v1_utils::get_image_memory_barriers(dependency);
const auto buffer_memory_barriers = v1_utils::get_buffer_memory_barriers(dependency);
vkCmdPipelineBarrier(cmd, src_stages, dst_stages, dependency.dependencyFlags,
::size32(memory_barriers), memory_barriers.data(),
::size32(buffer_memory_barriers), buffer_memory_barriers.data(),
::size32(image_memory_barriers), image_memory_barriers.data());
}
void event::signal(const command_buffer& cmd, const VkDependencyInfoKHR& dependency)
{
if (m_backend == sync_backend::gpu_label)
{
// Fallback path
m_label->signal(cmd, dependency);
return;
}
if (m_domain != sync_domain::host)
{
// As long as host is not involved, keep things consistent.
// The expectation is that this will be awaited using the gpu_wait function.
if (m_backend == sync_backend::events_v2) [[ likely ]]
{
m_device->_vkCmdSetEvent2KHR(cmd, m_vk_event, &dependency);
}
else
{
const auto dst_stages = v1_utils::gather_dst_stages(dependency);
vkCmdSetEvent(cmd, m_vk_event, dst_stages);
}
return;
}
// Host sync doesn't behave intuitively with events, so we use some workarounds.
// 1. Resolve the actual dependencies on a pipeline barrier.
resolve_dependencies(cmd, dependency);
// 2. Signalling won't wait. The caller is responsible for setting up the dependencies correctly.
if (m_backend != sync_backend::events_v2)
{
vkCmdSetEvent(cmd, m_vk_event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
return;
}
// We need a memory barrier to keep AMDVLK from hanging
VkMemoryBarrier2KHR mem_barrier =
{
.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER_2_KHR,
.srcStageMask = VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR,
.srcAccessMask = VK_ACCESS_2_MEMORY_READ_BIT | VK_ACCESS_2_MEMORY_WRITE_BIT
};
// Empty dependency that does nothing
VkDependencyInfoKHR empty_dependency
{
.sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO_KHR,
.memoryBarrierCount = 1,
.pMemoryBarriers = &mem_barrier
};
m_device->_vkCmdSetEvent2KHR(cmd, m_vk_event, &empty_dependency);
}
void event::host_signal() const
{
if (m_backend != sync_backend::gpu_label) [[ likely ]]
{
vkSetEvent(*m_device, m_vk_event);
return;
}
m_label->set();
}
void event::gpu_wait(const command_buffer& cmd, const VkDependencyInfoKHR& dependency) const
{
ensure(m_domain != sync_domain::host);
if (m_backend == sync_backend::events_v2) [[ likely ]]
{
m_device->_vkCmdWaitEvents2KHR(cmd, 1, &m_vk_event, &dependency);
return;
}
const auto src_stages = v1_utils::gather_src_stages(dependency);
const auto dst_stages = v1_utils::gather_dst_stages(dependency);
const auto memory_barriers = v1_utils::get_memory_barriers(dependency);
const auto image_memory_barriers = v1_utils::get_image_memory_barriers(dependency);
const auto buffer_memory_barriers = v1_utils::get_buffer_memory_barriers(dependency);
vkCmdWaitEvents(cmd,
1, &m_vk_event,
src_stages, dst_stages,
::size32(memory_barriers), memory_barriers.data(),
::size32(buffer_memory_barriers), buffer_memory_barriers.data(),
::size32(image_memory_barriers), image_memory_barriers.data());
}
void event::reset() const
{
if (m_backend != sync_backend::gpu_label) [[ likely ]]
{
vkResetEvent(*m_device, m_vk_event);
return;
}
m_label->reset();
}
VkResult event::status() const
{
if (m_backend != sync_backend::gpu_label) [[ likely ]]
{
return vkGetEventStatus(*m_device, m_vk_event);
}
return m_label->signaled() ? VK_EVENT_SET : VK_EVENT_RESET;
}
gpu_label_pool::gpu_label_pool(const vk::render_device& dev, u32 count)
: pdev(&dev), m_count(count)
{}
gpu_label_pool::~gpu_label_pool()
{
if (m_mapped)
{
ensure(m_buffer);
m_buffer->unmap();
}
}
std::tuple<VkBuffer, u64, volatile u32*> gpu_label_pool::allocate()
{
if (!m_buffer || m_offset >= m_count)
{
create_impl();
}
const auto out_offset = m_offset;
m_offset ++;
return { m_buffer->value, out_offset * 4, m_mapped + out_offset };
}
void gpu_label_pool::create_impl()
{
if (m_buffer)
{
m_buffer->unmap();
vk::get_gc()->dispose(m_buffer);
}
m_buffer = std::make_unique<buffer>
(
*pdev,
m_count * 4,
pdev->get_memory_mapping().host_visible_coherent,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
VK_BUFFER_USAGE_TRANSFER_DST_BIT,
0,
VMM_ALLOCATION_POOL_SYSTEM
);
m_mapped = reinterpret_cast<volatile u32*>(m_buffer->map(0, VK_WHOLE_SIZE));
m_offset = 0;
}
gpu_label::gpu_label(gpu_label_pool& pool)
{
std::tie(m_buffer_handle, m_buffer_offset, m_ptr) = pool.allocate();
reset();
}
gpu_label::~gpu_label()
{
m_ptr = nullptr;
m_buffer_offset = 0;
m_buffer_handle = VK_NULL_HANDLE;
}
void gpu_label::signal(const vk::command_buffer& cmd, const VkDependencyInfoKHR& dependency)
{
const auto src_stages = v1_utils::gather_src_stages(dependency);
auto dst_stages = v1_utils::gather_dst_stages(dependency);
auto memory_barriers = v1_utils::get_memory_barriers(dependency);
const auto image_memory_barriers = v1_utils::get_image_memory_barriers(dependency);
const auto buffer_memory_barriers = v1_utils::get_buffer_memory_barriers(dependency);
// Ensure wait before filling the label
dst_stages |= VK_PIPELINE_STAGE_TRANSFER_BIT;
if (memory_barriers.empty())
{
const VkMemoryBarrier signal_barrier =
{
.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
.srcAccessMask = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT,
.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT
};
memory_barriers.push_back(signal_barrier);
}
else
{
auto& barrier = memory_barriers.front();
barrier.dstAccessMask |= VK_ACCESS_TRANSFER_WRITE_BIT;
}
vkCmdPipelineBarrier(cmd, src_stages, dst_stages, dependency.dependencyFlags,
::size32(memory_barriers), memory_barriers.data(),
::size32(buffer_memory_barriers), buffer_memory_barriers.data(),
::size32(image_memory_barriers), image_memory_barriers.data());
vkCmdFillBuffer(cmd, m_buffer_handle, m_buffer_offset, 4, label_constants::set_);
}
gpu_debug_marker::gpu_debug_marker(gpu_debug_marker_pool& pool, std::string message)
: gpu_label(pool), m_message(std::move(message))
{}
gpu_debug_marker::~gpu_debug_marker()
{
if (!m_printed)
{
dump();
}
}
void gpu_debug_marker::dump()
{
if (*m_ptr == gpu_label::label_constants::reset_)
{
rsx_log.error("DEBUG MARKER NOT REACHED: %s", m_message);
}
m_printed = true;
}
void gpu_debug_marker::dump() const
{
if (*m_ptr == gpu_label::label_constants::reset_)
{
rsx_log.error("DEBUG MARKER NOT REACHED: %s", m_message);
}
else
{
rsx_log.error("DEBUG MARKER: %s", m_message);
}
}
void gpu_debug_marker::insert(
const vk::render_device& dev,
const vk::command_buffer& cmd,
std::string message,
VkPipelineStageFlags stages,
VkAccessFlags access)
{
VkMemoryBarrier2KHR barrier =
{
.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER_2_KHR,
.srcStageMask = stages,
.srcAccessMask = access,
.dstStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT_KHR,
.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT
};
VkDependencyInfoKHR dependency =
{
.sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO_KHR,
.memoryBarrierCount = 1,
.pMemoryBarriers = &barrier
};
auto result = std::make_unique<gpu_debug_marker>(globals::get_shared_marker_pool(dev), message);
result->signal(cmd, dependency);
vk::get_gc()->dispose(result);
}
debug_marker_scope::debug_marker_scope(const vk::command_buffer& cmd, const std::string& message)
: m_device(&cmd.get_command_pool().get_owner()), m_cb(&cmd), m_message(message), m_tag(rsx::get_shared_tag())
{
vk::gpu_debug_marker::insert(
*m_device,
*m_cb,
fmt::format("0x%llx: Enter %s", m_tag, m_message)
);
}
debug_marker_scope::~debug_marker_scope()
{
ensure(m_cb && m_cb->is_recording());
vk::gpu_debug_marker::insert(
*m_device,
*m_cb,
fmt::format("0x%x: Exit %s", m_tag, m_message)
);
}
VkResult wait_for_fence(fence* pFence, u64 timeout)
{
pFence->wait_flush();
if (timeout)
{
return vkWaitForFences(*g_render_device, 1, &pFence->handle, VK_FALSE, timeout * 1000ull);
}
else
{
while (auto status = vkGetFenceStatus(*g_render_device, pFence->handle))
{
switch (status)
{
case VK_NOT_READY:
utils::pause();
continue;
default:
die_with_error(status);
return status;
}
}
return VK_SUCCESS;
}
}
VkResult wait_for_event(event* pEvent, u64 timeout)
{
// Convert timeout to TSC cycles. Timeout accuracy isn't super-important, only fast response when event is signaled (within 10us if possible)
const u64 freq = utils::get_tsc_freq();
if (freq)
{
timeout *= (freq / 1'000'000);
}
u64 start = 0;
while (true)
{
switch (const auto status = pEvent->status())
{
case VK_EVENT_SET:
return VK_SUCCESS;
case VK_EVENT_RESET:
break;
default:
die_with_error(status);
return status;
}
if (timeout)
{
const auto now = freq ? utils::get_tsc() : get_system_time();
if (!start)
{
start = now;
continue;
}
if ((now > start) &&
(now - start) > timeout)
{
rsx_log.error("[vulkan] vk::wait_for_event has timed out!");
return VK_TIMEOUT;
}
}
utils::pause();
}
}
}
| 16,171
|
C++
|
.cpp
| 550
| 25.858182
| 143
| 0.698442
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,461
|
device.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/device.cpp
|
#include "device.h"
#include "instance.hpp"
#include "util/logs.hpp"
#include "Emu/system_config.h"
namespace vk
{
// Global shared render device
const render_device* g_render_device = nullptr;
void physical_device::get_physical_device_features(bool allow_extensions)
{
if (!allow_extensions)
{
vkGetPhysicalDeviceFeatures(dev, &features);
return;
}
supported_extensions instance_extensions(supported_extensions::instance);
supported_extensions device_extensions(supported_extensions::device, nullptr, dev);
if (!instance_extensions.is_supported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME))
{
vkGetPhysicalDeviceFeatures(dev, &features);
}
else
{
VkPhysicalDeviceFeatures2KHR features2;
features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
features2.pNext = nullptr;
VkPhysicalDeviceFloat16Int8FeaturesKHR shader_support_info{};
VkPhysicalDeviceDescriptorIndexingFeatures descriptor_indexing_info{};
VkPhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT fbo_loops_info{};
VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR shader_barycentric_info{};
VkPhysicalDeviceCustomBorderColorFeaturesEXT custom_border_color_info{};
if (device_extensions.is_supported(VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME))
{
shader_support_info.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR;
features2.pNext = &shader_support_info;
}
if (device_extensions.is_supported(VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME))
{
descriptor_indexing_info.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT;
descriptor_indexing_info.pNext = features2.pNext;
features2.pNext = &descriptor_indexing_info;
descriptor_indexing_support = true;
}
if (device_extensions.is_supported(VK_EXT_ATTACHMENT_FEEDBACK_LOOP_LAYOUT_EXTENSION_NAME))
{
fbo_loops_info.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ATTACHMENT_FEEDBACK_LOOP_LAYOUT_FEATURES_EXT;
fbo_loops_info.pNext = features2.pNext;
features2.pNext = &fbo_loops_info;
}
if (device_extensions.is_supported(VK_KHR_FRAGMENT_SHADER_BARYCENTRIC_EXTENSION_NAME))
{
shader_barycentric_info.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_KHR;
shader_barycentric_info.pNext = features2.pNext;
features2.pNext = &shader_barycentric_info;
}
if (device_extensions.is_supported(VK_EXT_CUSTOM_BORDER_COLOR_EXTENSION_NAME))
{
custom_border_color_info.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT;
custom_border_color_info.pNext = features2.pNext;
features2.pNext = &custom_border_color_info;
}
auto _vkGetPhysicalDeviceFeatures2KHR = reinterpret_cast<PFN_vkGetPhysicalDeviceFeatures2KHR>(vkGetInstanceProcAddr(parent, "vkGetPhysicalDeviceFeatures2KHR"));
ensure(_vkGetPhysicalDeviceFeatures2KHR); // "vkGetInstanceProcAddress failed to find entry point!"
_vkGetPhysicalDeviceFeatures2KHR(dev, &features2);
shader_types_support.allow_float64 = !!features2.features.shaderFloat64;
shader_types_support.allow_float16 = !!shader_support_info.shaderFloat16;
shader_types_support.allow_int8 = !!shader_support_info.shaderInt8;
optional_features_support.custom_border_color = !!custom_border_color_info.customBorderColors && !!custom_border_color_info.customBorderColorWithoutFormat;
optional_features_support.barycentric_coords = !!shader_barycentric_info.fragmentShaderBarycentric;
optional_features_support.framebuffer_loops = !!fbo_loops_info.attachmentFeedbackLoopLayout;
features = features2.features;
if (descriptor_indexing_support)
{
#define SET_DESCRIPTOR_BITFLAG(field, bit) if (descriptor_indexing_info.field) descriptor_indexing_support.update_after_bind_mask |= (1ull << bit)
SET_DESCRIPTOR_BITFLAG(descriptorBindingUniformBufferUpdateAfterBind, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
SET_DESCRIPTOR_BITFLAG(descriptorBindingSampledImageUpdateAfterBind, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
SET_DESCRIPTOR_BITFLAG(descriptorBindingSampledImageUpdateAfterBind, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE);
SET_DESCRIPTOR_BITFLAG(descriptorBindingStorageImageUpdateAfterBind, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE);
SET_DESCRIPTOR_BITFLAG(descriptorBindingStorageBufferUpdateAfterBind, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
SET_DESCRIPTOR_BITFLAG(descriptorBindingUniformTexelBufferUpdateAfterBind, VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER);
SET_DESCRIPTOR_BITFLAG(descriptorBindingStorageTexelBufferUpdateAfterBind, VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER);
#undef SET_DESCRIPTOR_BITFLAG
}
}
optional_features_support.shader_stencil_export = device_extensions.is_supported(VK_EXT_SHADER_STENCIL_EXPORT_EXTENSION_NAME);
optional_features_support.conditional_rendering = device_extensions.is_supported(VK_EXT_CONDITIONAL_RENDERING_EXTENSION_NAME);
optional_features_support.external_memory_host = device_extensions.is_supported(VK_EXT_EXTERNAL_MEMORY_HOST_EXTENSION_NAME);
optional_features_support.sampler_mirror_clamped = device_extensions.is_supported(VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME);
optional_features_support.synchronization_2 = device_extensions.is_supported(VK_KHR_SYNCHRONIZATION_2_EXTENSION_NAME);
optional_features_support.unrestricted_depth_range = device_extensions.is_supported(VK_EXT_DEPTH_RANGE_UNRESTRICTED_EXTENSION_NAME);
optional_features_support.debug_utils = instance_extensions.is_supported(VK_EXT_DEBUG_UTILS_EXTENSION_NAME);
optional_features_support.surface_capabilities_2 = instance_extensions.is_supported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
}
void physical_device::get_physical_device_properties(bool allow_extensions)
{
vkGetPhysicalDeviceMemoryProperties(dev, &memory_properties);
if (!allow_extensions)
{
vkGetPhysicalDeviceProperties(dev, &props);
return;
}
supported_extensions instance_extensions(supported_extensions::instance);
supported_extensions device_extensions(supported_extensions::device, nullptr, dev);
if (!instance_extensions.is_supported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME))
{
vkGetPhysicalDeviceProperties(dev, &props);
}
else
{
VkPhysicalDeviceProperties2KHR properties2;
properties2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2_KHR;
properties2.pNext = nullptr;
VkPhysicalDeviceDescriptorIndexingPropertiesEXT descriptor_indexing_props{};
if (descriptor_indexing_support)
{
descriptor_indexing_props.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT;
descriptor_indexing_props.pNext = properties2.pNext;
properties2.pNext = &descriptor_indexing_props;
}
if (device_extensions.is_supported(VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME))
{
driver_properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR;
driver_properties.pNext = properties2.pNext;
properties2.pNext = &driver_properties;
}
auto _vkGetPhysicalDeviceProperties2KHR = reinterpret_cast<PFN_vkGetPhysicalDeviceProperties2KHR>(vkGetInstanceProcAddr(parent, "vkGetPhysicalDeviceProperties2KHR"));
ensure(_vkGetPhysicalDeviceProperties2KHR);
_vkGetPhysicalDeviceProperties2KHR(dev, &properties2);
props = properties2.properties;
if (descriptor_indexing_support)
{
if (descriptor_indexing_props.maxUpdateAfterBindDescriptorsInAllPools < 800'000)
{
rsx_log.error("Physical device does not support enough descriptors for deferred updates to work effectively. Deferred updates are disabled.");
descriptor_indexing_support.update_after_bind_mask = 0;
}
else if (descriptor_indexing_props.maxUpdateAfterBindDescriptorsInAllPools < 2'000'000)
{
rsx_log.warning("Physical device reports a low amount of allowed deferred descriptor updates. Draw call threshold will be lowered accordingly.");
descriptor_max_draw_calls = 8192;
}
}
}
}
void physical_device::create(VkInstance context, VkPhysicalDevice pdev, bool allow_extensions)
{
dev = pdev;
parent = context;
get_physical_device_features(allow_extensions);
get_physical_device_properties(allow_extensions);
rsx_log.always()("Found Vulkan-compatible GPU: '%s' running on driver %s", get_name(), get_driver_version());
if (get_driver_vendor() == driver_vendor::RADV && get_name().find("LLVM 8.0.0") != umax)
{
// Serious driver bug causing black screens
// See https://bugs.freedesktop.org/show_bug.cgi?id=110970
rsx_log.fatal("RADV drivers have a major driver bug with LLVM 8.0.0 resulting in no visual output. Upgrade to LLVM version 8.0.1 or greater to avoid this issue.");
}
else if (get_driver_vendor() == driver_vendor::NVIDIA)
{
#ifdef _WIN32
// SPIRV bugs were fixed in 452.28 for windows
const u32 threshold_version = (452u << 22) | (28 << 14);
#else
// SPIRV bugs were fixed in 450.56 for linux/BSD
const u32 threshold_version = (450u << 22) | (56 << 14);
#endif
const auto current_version = props.driverVersion & ~0x3fffu; // Clear patch and revision fields
if (current_version < threshold_version)
{
rsx_log.error("Your current NVIDIA graphics driver version %s has known issues and is unsupported. Update to the latest NVIDIA driver.", get_driver_version());
}
}
if (get_chip_class() == chip_class::AMD_vega && shader_types_support.allow_float16)
{
// Disable fp16 if driver uses LLVM emitter. It does fine with AMD proprietary drivers though.
shader_types_support.allow_float16 = (driver_properties.driverID == VK_DRIVER_ID_AMD_PROPRIETARY_KHR);
}
}
std::string physical_device::get_name() const
{
return props.deviceName;
}
driver_vendor physical_device::get_driver_vendor() const
{
#ifdef __APPLE__
// moltenVK currently returns DRIVER_ID_MOLTENVK (0).
// For now, assume the vendor is moltenVK on Apple devices.
return driver_vendor::MVK;
#endif
if (!driver_properties.driverID)
{
const auto gpu_name = get_name();
if (gpu_name.find("Microsoft Direct3D12") != umax)
{
return driver_vendor::DOZEN;
}
if (gpu_name.find("RADV") != umax)
{
return driver_vendor::RADV;
}
if (gpu_name.find("Radeon") != umax)
{
return driver_vendor::AMD;
}
if (gpu_name.find("NVIDIA") != umax || gpu_name.find("GeForce") != umax || gpu_name.find("Quadro") != umax)
{
if (gpu_name.find("NVK") != umax)
{
return driver_vendor::NVK;
}
return driver_vendor::NVIDIA;
}
if (gpu_name.find("Intel") != umax)
{
#ifdef _WIN32
return driver_vendor::INTEL;
#else
return driver_vendor::ANV;
#endif
}
if (gpu_name.find("llvmpipe") != umax)
{
return driver_vendor::LAVAPIPE;
}
if (gpu_name.find("V3D") != umax)
{
return driver_vendor::V3DV;
}
return driver_vendor::unknown;
}
else
{
switch (driver_properties.driverID)
{
case VK_DRIVER_ID_AMD_PROPRIETARY_KHR:
case VK_DRIVER_ID_AMD_OPEN_SOURCE_KHR:
return driver_vendor::AMD;
case VK_DRIVER_ID_MESA_RADV_KHR:
return driver_vendor::RADV;
case VK_DRIVER_ID_NVIDIA_PROPRIETARY_KHR:
return driver_vendor::NVIDIA;
case VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS_KHR:
return driver_vendor::INTEL;
case VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA_KHR:
return driver_vendor::ANV;
case VK_DRIVER_ID_MESA_DOZEN:
return driver_vendor::DOZEN;
case VK_DRIVER_ID_MESA_LLVMPIPE:
return driver_vendor::LAVAPIPE;
case VK_DRIVER_ID_MESA_NVK:
return driver_vendor::NVK;
case VK_DRIVER_ID_MESA_V3DV:
return driver_vendor::V3DV;
default:
// Mobile?
return driver_vendor::unknown;
}
}
}
std::string physical_device::get_driver_version() const
{
switch (get_driver_vendor())
{
case driver_vendor::NVIDIA:
{
// 10 + 8 + 8 + 6
const auto major_version = props.driverVersion >> 22;
const auto minor_version = (props.driverVersion >> 14) & 0xff;
const auto patch = (props.driverVersion >> 6) & 0xff;
const auto revision = (props.driverVersion & 0x3f);
return fmt::format("%u.%u.%u.%u", major_version, minor_version, patch, revision);
}
default:
{
// 10 + 10 + 12 (standard vulkan encoding created with VK_MAKE_VERSION)
return fmt::format("%u.%u.%u", (props.driverVersion >> 22), (props.driverVersion >> 12) & 0x3ff, (props.driverVersion) & 0x3ff);
}
}
}
chip_class physical_device::get_chip_class() const
{
return get_chip_family(props.vendorID, props.deviceID);
}
u32 physical_device::get_queue_count() const
{
if (!queue_props.empty())
return ::size32(queue_props);
u32 count = 0;
vkGetPhysicalDeviceQueueFamilyProperties(dev, &count, nullptr);
return count;
}
const VkQueueFamilyProperties& physical_device::get_queue_properties(u32 queue)
{
if (queue_props.empty())
{
u32 count = 0;
vkGetPhysicalDeviceQueueFamilyProperties(dev, &count, nullptr);
queue_props.resize(count);
vkGetPhysicalDeviceQueueFamilyProperties(dev, &count, queue_props.data());
}
if (queue >= queue_props.size())
fmt::throw_exception("Bad queue index passed to get_queue_properties (%u)", queue);
return queue_props[queue];
}
const VkPhysicalDeviceMemoryProperties& physical_device::get_memory_properties() const
{
return memory_properties;
}
const VkPhysicalDeviceLimits& physical_device::get_limits() const
{
return props.limits;
}
physical_device::operator VkPhysicalDevice() const
{
return dev;
}
physical_device::operator VkInstance() const
{
return parent;
}
// Render Device - The actual usable device
void render_device::create(vk::physical_device& pdev, u32 graphics_queue_idx, u32 present_queue_idx, u32 transfer_queue_idx)
{
float queue_priorities[1] = { 0.f };
pgpu = &pdev;
ensure(graphics_queue_idx == present_queue_idx || present_queue_idx == umax); // TODO
std::vector<VkDeviceQueueCreateInfo> device_queues;
auto& graphics_queue = device_queues.emplace_back();
graphics_queue.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
graphics_queue.pNext = NULL;
graphics_queue.flags = 0;
graphics_queue.queueFamilyIndex = graphics_queue_idx;
graphics_queue.queueCount = 1;
graphics_queue.pQueuePriorities = queue_priorities;
u32 transfer_queue_sub_index = 0;
if (transfer_queue_idx == umax)
{
// Transfer queue must be a valid device queue
rsx_log.warning("Dedicated transfer+compute queue was not found on this GPU. Will use graphics queue instead.");
transfer_queue_idx = graphics_queue_idx;
// Check if we can at least get a second graphics queue
if (pdev.get_queue_properties(graphics_queue_idx).queueCount > 1)
{
rsx_log.notice("Will use a spare graphics queue to push transfer operations.");
graphics_queue.queueCount++;
transfer_queue_sub_index = 1;
}
}
m_graphics_queue_family = graphics_queue_idx;
m_present_queue_family = present_queue_idx;
m_transfer_queue_family = transfer_queue_idx;
if (graphics_queue_idx != transfer_queue_idx)
{
auto& transfer_queue = device_queues.emplace_back();
transfer_queue.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
transfer_queue.pNext = NULL;
transfer_queue.flags = 0;
transfer_queue.queueFamilyIndex = transfer_queue_idx;
transfer_queue.queueCount = 1;
transfer_queue.pQueuePriorities = queue_priorities;
}
// Set up instance information
std::vector<const char*> requested_extensions = { VK_KHR_SWAPCHAIN_EXTENSION_NAME };
// Enable hardware features manually
// Currently we require:
// 1. Anisotropic sampling
// 2. DXT support
// 3. Indexable storage buffers
VkPhysicalDeviceFeatures enabled_features{};
if (pgpu->shader_types_support.allow_float16)
{
requested_extensions.push_back(VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME);
}
if (pgpu->optional_features_support.conditional_rendering)
{
requested_extensions.push_back(VK_EXT_CONDITIONAL_RENDERING_EXTENSION_NAME);
}
if (pgpu->optional_features_support.unrestricted_depth_range)
{
requested_extensions.push_back(VK_EXT_DEPTH_RANGE_UNRESTRICTED_EXTENSION_NAME);
}
if (pgpu->optional_features_support.external_memory_host)
{
requested_extensions.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME);
requested_extensions.push_back(VK_EXT_EXTERNAL_MEMORY_HOST_EXTENSION_NAME);
}
if (pgpu->optional_features_support.shader_stencil_export)
{
requested_extensions.push_back(VK_EXT_SHADER_STENCIL_EXPORT_EXTENSION_NAME);
}
if (pgpu->optional_features_support.sampler_mirror_clamped)
{
requested_extensions.push_back(VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME);
}
if (pgpu->descriptor_indexing_support)
{
requested_extensions.push_back(VK_KHR_MAINTENANCE3_EXTENSION_NAME);
requested_extensions.push_back(VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME);
}
if (pgpu->optional_features_support.framebuffer_loops)
{
requested_extensions.push_back(VK_EXT_ATTACHMENT_FEEDBACK_LOOP_LAYOUT_EXTENSION_NAME);
}
if (pgpu->optional_features_support.barycentric_coords)
{
requested_extensions.push_back(VK_KHR_FRAGMENT_SHADER_BARYCENTRIC_EXTENSION_NAME);
}
if (pgpu->optional_features_support.custom_border_color)
{
requested_extensions.push_back(VK_EXT_CUSTOM_BORDER_COLOR_EXTENSION_NAME);
}
if (pgpu->optional_features_support.synchronization_2)
{
requested_extensions.push_back(VK_KHR_SYNCHRONIZATION_2_EXTENSION_NAME);
}
enabled_features.robustBufferAccess = VK_TRUE;
enabled_features.fullDrawIndexUint32 = VK_TRUE;
enabled_features.independentBlend = VK_TRUE;
enabled_features.logicOp = VK_TRUE;
enabled_features.depthClamp = VK_TRUE;
enabled_features.depthBounds = VK_TRUE;
enabled_features.wideLines = VK_TRUE;
enabled_features.largePoints = VK_TRUE;
enabled_features.shaderFloat64 = VK_TRUE;
if (g_cfg.video.antialiasing_level != msaa_level::none)
{
// MSAA features
enabled_features.sampleRateShading = VK_TRUE;
enabled_features.alphaToOne = VK_TRUE;
enabled_features.shaderStorageImageMultisample = VK_TRUE;
// enabled_features.shaderStorageImageReadWithoutFormat = VK_TRUE; // Unused currently, may be needed soon
enabled_features.shaderStorageImageWriteWithoutFormat = VK_TRUE;
}
if (g_cfg.video.precise_zpass_count)
{
enabled_features.occlusionQueryPrecise = VK_TRUE;
}
// enabled_features.shaderSampledImageArrayDynamicIndexing = TRUE; // Unused currently but will be needed soon
enabled_features.shaderClipDistance = VK_TRUE;
// enabled_features.shaderCullDistance = VK_TRUE; // Alt notation of clip distance
enabled_features.samplerAnisotropy = VK_TRUE;
enabled_features.textureCompressionBC = VK_TRUE;
enabled_features.shaderStorageBufferArrayDynamicIndexing = VK_TRUE;
// Optionally disable unsupported stuff
if (!pgpu->features.fullDrawIndexUint32)
{
// There's really nothing we can do about PS3 draw indices, just pray your GPU doesn't crash.
rsx_log.error("Your GPU driver does not fully support 32-bit vertex indices. This may result in graphical corruption or crashes in some cases.");
enabled_features.fullDrawIndexUint32 = VK_FALSE;
}
if (!pgpu->features.shaderStorageImageMultisample || !pgpu->features.shaderStorageImageWriteWithoutFormat)
{
// Disable MSAA if any of these two features are unsupported
if (g_cfg.video.antialiasing_level != msaa_level::none)
{
rsx_log.error("Your GPU driver does not support some required MSAA features. MSAA will be disabled.");
g_cfg.video.antialiasing_level.set(msaa_level::none);
}
enabled_features.sampleRateShading = VK_FALSE;
enabled_features.alphaToOne = VK_FALSE;
enabled_features.shaderStorageImageMultisample = VK_FALSE;
enabled_features.shaderStorageImageWriteWithoutFormat = VK_FALSE;
}
if (!pgpu->features.shaderClipDistance)
{
rsx_log.error("Your GPU does not support shader clip distance. Graphics will not render correctly.");
enabled_features.shaderClipDistance = VK_FALSE;
}
if (!pgpu->features.shaderStorageBufferArrayDynamicIndexing)
{
rsx_log.error("Your GPU does not support shader storage buffer array dynamic indexing. Graphics will not render correctly.");
enabled_features.shaderStorageBufferArrayDynamicIndexing = VK_FALSE;
}
if (!pgpu->features.samplerAnisotropy)
{
rsx_log.error("Your GPU does not support anisotropic filtering. Graphics may not render correctly.");
enabled_features.samplerAnisotropy = VK_FALSE;
}
if (!pgpu->features.shaderFloat64)
{
rsx_log.error("Your GPU does not support double precision floats in shaders. Graphics may not render correctly.");
enabled_features.shaderFloat64 = VK_FALSE;
}
if (!pgpu->features.depthBounds)
{
rsx_log.error("Your GPU does not support depth bounds testing. Graphics may not render correctly.");
enabled_features.depthBounds = VK_FALSE;
}
if (!pgpu->features.largePoints)
{
rsx_log.error("Your GPU does not support large points. Graphics may not render correctly.");
enabled_features.largePoints = VK_FALSE;
}
if (!pgpu->features.wideLines)
{
rsx_log.error("Your GPU does not support wide lines. Graphics may not render correctly.");
enabled_features.wideLines = VK_FALSE;
}
if (!pgpu->features.sampleRateShading && enabled_features.sampleRateShading)
{
rsx_log.error("Your GPU does not support sample rate shading for multisampling. Graphics may be inaccurate when MSAA is enabled.");
enabled_features.sampleRateShading = VK_FALSE;
}
if (!pgpu->features.alphaToOne && enabled_features.alphaToOne)
{
// AMD proprietary drivers do not expose alphaToOne support
rsx_log.error("Your GPU does not support alpha-to-one for multisampling. Graphics may be inaccurate when MSAA is enabled.");
enabled_features.alphaToOne = VK_FALSE;
}
if (!pgpu->features.occlusionQueryPrecise && enabled_features.occlusionQueryPrecise)
{
rsx_log.error("Your GPU does not support precise occlusion queries. Graphics may not render correctly.");
enabled_features.occlusionQueryPrecise = VK_FALSE;
}
if (!pgpu->features.logicOp)
{
rsx_log.error("Your GPU does not support framebuffer logical operations. Graphics may not render correctly.");
enabled_features.logicOp = VK_FALSE;
}
if (!pgpu->features.textureCompressionBC && pgpu->get_driver_vendor() == driver_vendor::V3DV)
{
// v3dv supports BC1-BC3 which is all we require, support is reported as false since not all formats are supported
rsx_log.error("Your GPU running on the V3DV driver does not support full texture block compression. Graphics may not render correctly.");
enabled_features.textureCompressionBC = VK_FALSE;
}
VkDeviceCreateInfo device = {};
device.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device.pNext = nullptr;
device.queueCreateInfoCount = ::size32(device_queues);
device.pQueueCreateInfos = device_queues.data();
device.enabledLayerCount = 0;
device.ppEnabledLayerNames = nullptr; // Deprecated
device.enabledExtensionCount = ::size32(requested_extensions);
device.ppEnabledExtensionNames = requested_extensions.data();
device.pEnabledFeatures = &enabled_features;
VkPhysicalDeviceFloat16Int8FeaturesKHR shader_support_info{};
if (pgpu->shader_types_support.allow_float16)
{
// Allow use of f16 type in shaders if possible
shader_support_info.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR;
shader_support_info.shaderFloat16 = VK_TRUE;
shader_support_info.pNext = const_cast<void*>(device.pNext);
device.pNext = &shader_support_info;
rsx_log.notice("GPU/driver supports float16 data types natively. Using native float16_t variables if possible.");
}
else
{
rsx_log.notice("GPU/driver lacks support for float16 data types. All float16_t arithmetic will be emulated with float32_t.");
}
VkPhysicalDeviceDescriptorIndexingFeatures indexing_features{};
if (pgpu->descriptor_indexing_support)
{
#define SET_DESCRIPTOR_BITFLAG(field, bit) if (pgpu->descriptor_indexing_support.update_after_bind_mask & (1ull << bit)) indexing_features.field = VK_TRUE
SET_DESCRIPTOR_BITFLAG(descriptorBindingUniformBufferUpdateAfterBind, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
SET_DESCRIPTOR_BITFLAG(descriptorBindingSampledImageUpdateAfterBind, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
SET_DESCRIPTOR_BITFLAG(descriptorBindingSampledImageUpdateAfterBind, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE);
SET_DESCRIPTOR_BITFLAG(descriptorBindingStorageImageUpdateAfterBind, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE);
SET_DESCRIPTOR_BITFLAG(descriptorBindingStorageBufferUpdateAfterBind, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
SET_DESCRIPTOR_BITFLAG(descriptorBindingUniformTexelBufferUpdateAfterBind, VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER);
SET_DESCRIPTOR_BITFLAG(descriptorBindingStorageTexelBufferUpdateAfterBind, VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER);
#undef SET_DESCRIPTOR_BITFLAG
indexing_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT;
indexing_features.pNext = const_cast<void*>(device.pNext);
device.pNext = &indexing_features;
}
VkPhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT fbo_loop_features{};
if (pgpu->optional_features_support.framebuffer_loops)
{
fbo_loop_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ATTACHMENT_FEEDBACK_LOOP_LAYOUT_FEATURES_EXT;
fbo_loop_features.attachmentFeedbackLoopLayout = VK_TRUE;
fbo_loop_features.pNext = const_cast<void*>(device.pNext);
device.pNext = &fbo_loop_features;
}
VkPhysicalDeviceCustomBorderColorFeaturesEXT custom_border_color_features{};
if (pgpu->optional_features_support.custom_border_color)
{
custom_border_color_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT;
custom_border_color_features.customBorderColors = VK_TRUE;
custom_border_color_features.customBorderColorWithoutFormat = VK_TRUE;
custom_border_color_features.pNext = const_cast<void*>(device.pNext);
device.pNext = &custom_border_color_features;
}
VkPhysicalDeviceSynchronization2FeaturesKHR synchronization2_info{};
if (pgpu->optional_features_support.synchronization_2)
{
synchronization2_info.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES;
synchronization2_info.pNext = const_cast<void*>(device.pNext);
synchronization2_info.synchronization2 = VK_TRUE;
device.pNext = &synchronization2_info;
}
if (auto error = vkCreateDevice(*pgpu, &device, nullptr, &dev))
{
dump_debug_info(requested_extensions, enabled_features);
vk::die_with_error(error);
}
// Dump some diagnostics to the log
rsx_log.notice("%u extensions loaded:", ::size32(requested_extensions));
for (const auto& ext : requested_extensions)
{
rsx_log.notice("** Using %s", ext);
}
// Initialize queues
vkGetDeviceQueue(dev, graphics_queue_idx, 0, &m_graphics_queue);
vkGetDeviceQueue(dev, transfer_queue_idx, transfer_queue_sub_index, &m_transfer_queue);
if (present_queue_idx != umax)
{
vkGetDeviceQueue(dev, present_queue_idx, 0, &m_present_queue);
}
// Import optional function endpoints
if (pgpu->optional_features_support.conditional_rendering)
{
_vkCmdBeginConditionalRenderingEXT = reinterpret_cast<PFN_vkCmdBeginConditionalRenderingEXT>(vkGetDeviceProcAddr(dev, "vkCmdBeginConditionalRenderingEXT"));
_vkCmdEndConditionalRenderingEXT = reinterpret_cast<PFN_vkCmdEndConditionalRenderingEXT>(vkGetDeviceProcAddr(dev, "vkCmdEndConditionalRenderingEXT"));
}
if (pgpu->optional_features_support.debug_utils)
{
_vkSetDebugUtilsObjectNameEXT = reinterpret_cast<PFN_vkSetDebugUtilsObjectNameEXT>(vkGetDeviceProcAddr(dev, "vkSetDebugUtilsObjectNameEXT"));
_vkQueueInsertDebugUtilsLabelEXT = reinterpret_cast<PFN_vkQueueInsertDebugUtilsLabelEXT>(vkGetDeviceProcAddr(dev, "vkQueueInsertDebugUtilsLabelEXT"));
_vkCmdInsertDebugUtilsLabelEXT = reinterpret_cast<PFN_vkCmdInsertDebugUtilsLabelEXT>(vkGetDeviceProcAddr(dev, "vkCmdInsertDebugUtilsLabelEXT"));
}
if (pgpu->optional_features_support.synchronization_2)
{
_vkCmdSetEvent2KHR = reinterpret_cast<PFN_vkCmdSetEvent2KHR>(vkGetDeviceProcAddr(dev, "vkCmdSetEvent2KHR"));
_vkCmdWaitEvents2KHR = reinterpret_cast<PFN_vkCmdWaitEvents2KHR>(vkGetDeviceProcAddr(dev, "vkCmdWaitEvents2KHR"));
_vkCmdPipelineBarrier2KHR = reinterpret_cast<PFN_vkCmdPipelineBarrier2KHR>(vkGetDeviceProcAddr(dev, "vkCmdPipelineBarrier2KHR"));
}
memory_map = vk::get_memory_mapping(pdev);
m_formats_support = vk::get_optimal_tiling_supported_formats(pdev);
m_pipeline_binding_table = vk::get_pipeline_binding_table(pdev);
if (pgpu->optional_features_support.external_memory_host)
{
memory_map._vkGetMemoryHostPointerPropertiesEXT = reinterpret_cast<PFN_vkGetMemoryHostPointerPropertiesEXT>(vkGetDeviceProcAddr(dev, "vkGetMemoryHostPointerPropertiesEXT"));
}
if (g_cfg.video.disable_vulkan_mem_allocator)
{
m_allocator = std::make_unique<vk::mem_allocator_vk>(*this, pdev);
}
else
{
m_allocator = std::make_unique<vk::mem_allocator_vma>(*this, pdev);
}
// Useful for debugging different VRAM configurations
const u64 vram_allocation_limit = g_cfg.video.vk.vram_allocation_limit * 0x100000ull;
memory_map.device_local_total_bytes = std::min(memory_map.device_local_total_bytes, vram_allocation_limit);
}
void render_device::destroy()
{
if (g_render_device == this)
{
g_render_device = nullptr;
}
if (dev && pgpu)
{
if (m_allocator)
{
m_allocator->destroy();
m_allocator.reset();
}
vkDestroyDevice(dev, nullptr);
dev = nullptr;
memory_map = {};
m_formats_support = {};
}
}
const VkFormatProperties render_device::get_format_properties(VkFormat format) const
{
auto found = pgpu->format_properties.find(format);
if (found != pgpu->format_properties.end())
{
return found->second;
}
auto& props = pgpu->format_properties[format];
vkGetPhysicalDeviceFormatProperties(*pgpu, format, &props);
return props;
}
bool render_device::get_compatible_memory_type(u32 typeBits, u32 desired_mask, u32* type_index) const
{
VkPhysicalDeviceMemoryProperties mem_infos = pgpu->get_memory_properties();
for (u32 i = 0; i < 32; i++)
{
if ((typeBits & 1) == 1)
{
if ((mem_infos.memoryTypes[i].propertyFlags & desired_mask) == desired_mask)
{
if (type_index)
{
*type_index = i;
}
return true;
}
}
typeBits >>= 1;
}
return false;
}
void render_device::rebalance_memory_type_usage()
{
// Rebalance device local memory types
memory_map.device_local.rebalance();
}
void render_device::dump_debug_info(
const std::vector<const char*>& requested_extensions,
const VkPhysicalDeviceFeatures& requested_features) const
{
rsx_log.notice("Dumping requested extensions...");
auto device_extensions = vk::supported_extensions(vk::supported_extensions::enumeration_class::device, nullptr, *pgpu);
for (const auto& ext : requested_extensions)
{
rsx_log.notice("[%s] %s", device_extensions.is_supported(ext) ? "Supported" : "Not supported", ext);
}
rsx_log.notice("Dumping requested features...");
const auto& supported_features = pgpu->features;
#define TEST_VK_FEATURE(name) \
if (requested_features.name) {\
if (supported_features.name) \
rsx_log.notice("[Supported] "#name); \
else \
rsx_log.error("[Not supported] "#name); \
}
TEST_VK_FEATURE(robustBufferAccess);
TEST_VK_FEATURE(fullDrawIndexUint32);
TEST_VK_FEATURE(imageCubeArray);
TEST_VK_FEATURE(independentBlend);
TEST_VK_FEATURE(geometryShader);
TEST_VK_FEATURE(tessellationShader);
TEST_VK_FEATURE(sampleRateShading);
TEST_VK_FEATURE(dualSrcBlend);
TEST_VK_FEATURE(logicOp);
TEST_VK_FEATURE(multiDrawIndirect);
TEST_VK_FEATURE(drawIndirectFirstInstance);
TEST_VK_FEATURE(depthClamp);
TEST_VK_FEATURE(depthBiasClamp);
TEST_VK_FEATURE(fillModeNonSolid);
TEST_VK_FEATURE(depthBounds);
TEST_VK_FEATURE(wideLines);
TEST_VK_FEATURE(largePoints);
TEST_VK_FEATURE(alphaToOne);
TEST_VK_FEATURE(multiViewport);
TEST_VK_FEATURE(samplerAnisotropy);
TEST_VK_FEATURE(textureCompressionETC2);
TEST_VK_FEATURE(textureCompressionASTC_LDR);
TEST_VK_FEATURE(textureCompressionBC);
TEST_VK_FEATURE(occlusionQueryPrecise);
TEST_VK_FEATURE(pipelineStatisticsQuery);
TEST_VK_FEATURE(vertexPipelineStoresAndAtomics);
TEST_VK_FEATURE(fragmentStoresAndAtomics);
TEST_VK_FEATURE(shaderTessellationAndGeometryPointSize);
TEST_VK_FEATURE(shaderImageGatherExtended);
TEST_VK_FEATURE(shaderStorageImageExtendedFormats);
TEST_VK_FEATURE(shaderStorageImageMultisample);
TEST_VK_FEATURE(shaderStorageImageReadWithoutFormat);
TEST_VK_FEATURE(shaderStorageImageWriteWithoutFormat);
TEST_VK_FEATURE(shaderUniformBufferArrayDynamicIndexing);
TEST_VK_FEATURE(shaderSampledImageArrayDynamicIndexing);
TEST_VK_FEATURE(shaderStorageBufferArrayDynamicIndexing);
TEST_VK_FEATURE(shaderStorageImageArrayDynamicIndexing);
TEST_VK_FEATURE(shaderClipDistance);
TEST_VK_FEATURE(shaderCullDistance);
TEST_VK_FEATURE(shaderFloat64);
TEST_VK_FEATURE(shaderInt64);
TEST_VK_FEATURE(shaderInt16);
TEST_VK_FEATURE(shaderResourceResidency);
TEST_VK_FEATURE(shaderResourceMinLod);
TEST_VK_FEATURE(sparseBinding);
TEST_VK_FEATURE(sparseResidencyBuffer);
TEST_VK_FEATURE(sparseResidencyImage2D);
TEST_VK_FEATURE(sparseResidencyImage3D);
TEST_VK_FEATURE(sparseResidency2Samples);
TEST_VK_FEATURE(sparseResidency4Samples);
TEST_VK_FEATURE(sparseResidency8Samples);
TEST_VK_FEATURE(sparseResidency16Samples);
TEST_VK_FEATURE(sparseResidencyAliased);
TEST_VK_FEATURE(variableMultisampleRate);
TEST_VK_FEATURE(inheritedQueries);
#undef TEST_VK_FEATURE
}
// Shared Util
memory_type_mapping get_memory_mapping(const vk::physical_device& dev)
{
VkPhysicalDevice pdev = dev;
VkPhysicalDeviceMemoryProperties memory_properties;
vkGetPhysicalDeviceMemoryProperties(pdev, &memory_properties);
memory_type_mapping result;
result.device_local_total_bytes = 0;
result.host_visible_total_bytes = 0;
result.device_bar_total_bytes = 0;
// Sort the confusingly laid out heap-type map into something easier to scan.
// Not performance-critical, this method is called once at initialization.
struct memory_type
{
u32 type_index;
VkFlags flags;
VkDeviceSize size;
};
struct heap_type_map_entry
{
VkMemoryHeap heap;
std::vector<memory_type> types;
};
std::vector<heap_type_map_entry> memory_heap_map;
for (u32 i = 0; i < memory_properties.memoryHeapCount; ++i)
{
memory_heap_map.push_back(
{
.heap = memory_properties.memoryHeaps[i],
.types = {}
});
}
for (u32 i = 0; i < memory_properties.memoryTypeCount; i++)
{
auto& type_info = memory_properties.memoryTypes[i];
memory_heap_map[type_info.heapIndex].types.push_back({ i, type_info.propertyFlags, 0 });
}
auto find_memory_type_with_property = [&memory_heap_map](VkFlags desired_flags, VkFlags excluded_flags)
{
std::vector<memory_type> results;
for (auto& heap : memory_heap_map)
{
for (auto &type : heap.types)
{
if (((type.flags & desired_flags) == desired_flags) && !(type.flags & excluded_flags))
{
// Match, only once allowed per heap!
results.push_back({ type.type_index, type.flags, heap.heap.size });
break;
}
}
}
return results;
};
auto device_local_types = find_memory_type_with_property(
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
(VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD));
auto host_coherent_types = find_memory_type_with_property(
(VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT),
0);
auto bar_memory_types = find_memory_type_with_property(
(VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT),
0);
if (host_coherent_types.empty())
{
rsx_log.warning("[Performance Warning] Could not identify a cached upload heap. Will fall back to uncached transport.");
host_coherent_types = find_memory_type_with_property(
(VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT),
0);
}
ensure(!device_local_types.empty());
ensure(!host_coherent_types.empty());
// BAR heap, currently parked for future use, I have some plans for it (kd-11)
for (auto& type : bar_memory_types)
{
result.device_bar.push(type.type_index, type.size);
result.device_bar_total_bytes += type.size;
}
// Generic VRAM access, requires some minor prioritization based on flags
// Most devices have a 'PURE' device local type, pin that as the first priority
// Internally, there will be some reshuffling based on memory load later, but this is rare
if (device_local_types.size() > 1)
{
std::sort(device_local_types.begin(), device_local_types.end(), [](const auto& a, const auto& b)
{
if (a.flags == b.flags)
{
return a.size > b.size;
}
return (a.flags == VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) || (b.flags != VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT && a.size > b.size);
});
}
for (auto& type : device_local_types)
{
result.device_local.push(type.type_index, type.size);
result.device_local_total_bytes += type.size;
}
// Sort upload heap entries based on size.
if (host_coherent_types.size() > 1)
{
std::sort(host_coherent_types.begin(), host_coherent_types.end(), FN(x.size > y.size));
}
for (auto& type : host_coherent_types)
{
result.host_visible_coherent.push(type.type_index, type.size);
result.host_visible_total_bytes += type.size;
}
rsx_log.notice("Detected %llu MB of device local memory", result.device_local_total_bytes / (0x100000));
rsx_log.notice("Detected %llu MB of host coherent memory", result.host_visible_total_bytes / (0x100000));
rsx_log.notice("Detected %llu MB of BAR memory", result.device_bar_total_bytes / (0x100000));
return result;
}
gpu_formats_support get_optimal_tiling_supported_formats(const physical_device& dev)
{
const auto test_format_features = [&dev](VkFormat format, VkFlags required_features, VkBool32 linear_features) -> bool
{
VkFormatProperties props;
vkGetPhysicalDeviceFormatProperties(dev, format, &props);
const auto supported_features_mask = (linear_features) ? props.linearTilingFeatures : props.optimalTilingFeatures;
return (supported_features_mask & required_features) == required_features;
};
gpu_formats_support result = {};
const VkFlags required_zbuffer_features = (VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT);
const VkFlags required_colorbuffer_features = (VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT | VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT | VK_FORMAT_FEATURE_BLIT_SRC_BIT | VK_FORMAT_FEATURE_BLIT_DST_BIT);
// Check supported depth formats
result.d24_unorm_s8 = test_format_features(VK_FORMAT_D24_UNORM_S8_UINT, required_zbuffer_features, VK_FALSE);
result.d32_sfloat_s8 = test_format_features(VK_FORMAT_D32_SFLOAT_S8_UINT, required_zbuffer_features, VK_FALSE);
// Hide d24_s8 if force high precision z buffer is enabled
if (g_cfg.video.force_high_precision_z_buffer && result.d32_sfloat_s8)
{
result.d24_unorm_s8 = false;
}
// Checks if linear BGRA8 images can be used for present
result.bgra8_linear = test_format_features(VK_FORMAT_B8G8R8A8_UNORM, VK_FORMAT_FEATURE_BLIT_SRC_BIT, VK_TRUE);
// Check if device supports RGBA8 format for rendering
if (!test_format_features(VK_FORMAT_R8G8B8A8_UNORM, required_colorbuffer_features, VK_FALSE))
{
// Non-fatal. Most games use BGRA layout due to legacy reasons as old GPUs typically supported BGRA and RGBA was emulated.
rsx_log.error("Your GPU and/or driver does not support RGBA8 format. This can cause problems in some rare games that use this memory layout.");
}
// Check if linear RGBA8 images can be used for present
result.argb8_linear = test_format_features(VK_FORMAT_R8G8B8A8_UNORM, VK_FORMAT_FEATURE_BLIT_SRC_BIT, VK_TRUE);
return result;
}
pipeline_binding_table get_pipeline_binding_table(const vk::physical_device& dev)
{
pipeline_binding_table result{};
// Need to check how many samplers are supported by the driver
const auto usable_samplers = std::min(dev.get_limits().maxPerStageDescriptorSampledImages, 32u);
result.vertex_textures_first_bind_slot = result.textures_first_bind_slot + usable_samplers;
result.total_descriptor_bindings = result.vertex_textures_first_bind_slot + 4;
return result;
}
}
| 40,884
|
C++
|
.cpp
| 930
| 40.35914
| 193
| 0.759499
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,462
|
image.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/image.cpp
|
#include "stdafx.h"
#include "barriers.h"
#include "device.h"
#include "image.h"
#include "image_helpers.h"
#include "../VKResourceManager.h"
#include <memory>
namespace vk
{
void image::validate(const vk::render_device& dev, const VkImageCreateInfo& info) const
{
const auto gpu_limits = dev.gpu().get_limits();
u32 longest_dim, dim_limit;
switch (info.imageType)
{
case VK_IMAGE_TYPE_1D:
longest_dim = info.extent.width;
dim_limit = gpu_limits.maxImageDimension1D;
break;
case VK_IMAGE_TYPE_2D:
longest_dim = std::max(info.extent.width, info.extent.height);
dim_limit = (info.flags == VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT) ? gpu_limits.maxImageDimensionCube : gpu_limits.maxImageDimension2D;
break;
case VK_IMAGE_TYPE_3D:
longest_dim = std::max({ info.extent.width, info.extent.height, info.extent.depth });
dim_limit = gpu_limits.maxImageDimension3D;
break;
default:
fmt::throw_exception("Unreachable");
}
if (longest_dim > dim_limit)
{
// Longest dimension exceeds the limit. Can happen when using MSAA + very high resolution scaling
// Just kill the application at this point.
fmt::throw_exception(
"The renderer requested an image larger than the limit allowed for by your GPU hardware. "
"Turn down your resolution scale and/or disable MSAA to fit within the image budget.");
}
}
image::image(const vk::render_device& dev,
const memory_type_info& memory_type,
u32 access_flags,
VkImageType image_type,
VkFormat format,
u32 width, u32 height, u32 depth,
u32 mipmaps, u32 layers,
VkSampleCountFlagBits samples,
VkImageLayout initial_layout,
VkImageTiling tiling,
VkImageUsageFlags usage,
VkImageCreateFlags image_flags,
vmm_allocation_pool allocation_pool,
rsx::format_class format_class)
: m_device(dev)
{
info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
info.imageType = image_type;
info.format = format;
info.extent = { width, height, depth };
info.mipLevels = mipmaps;
info.arrayLayers = layers;
info.samples = samples;
info.tiling = tiling;
info.usage = usage;
info.flags = image_flags;
info.initialLayout = initial_layout;
info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
std::array<u32, 2> concurrency_queue_families = {
dev.get_graphics_queue_family(),
dev.get_transfer_queue_family()
};
if (image_flags & VK_IMAGE_CREATE_SHAREABLE_RPCS3)
{
info.sharingMode = VK_SHARING_MODE_CONCURRENT;
info.queueFamilyIndexCount = ::size32(concurrency_queue_families);
info.pQueueFamilyIndices = concurrency_queue_families.data();
}
create_impl(dev, access_flags, memory_type, allocation_pool);
m_storage_aspect = get_aspect_flags(format);
if (format_class == RSX_FORMAT_CLASS_UNDEFINED)
{
if (m_storage_aspect != VK_IMAGE_ASPECT_COLOR_BIT)
{
rsx_log.error("Depth/stencil textures must have format class explicitly declared");
}
else
{
format_class = RSX_FORMAT_CLASS_COLOR;
}
}
m_format_class = format_class;
}
// TODO: Ctor that uses a provided memory heap
image::~image()
{
vkDestroyImage(m_device, value, nullptr);
}
void image::create_impl(const vk::render_device& dev, u32 access_flags, const memory_type_info& memory_type, vmm_allocation_pool allocation_pool)
{
ensure(!value && !memory);
validate(dev, info);
const bool nullable = !!(info.flags & VK_IMAGE_CREATE_ALLOW_NULL_RPCS3);
info.flags &= ~VK_IMAGE_CREATE_SPECIAL_FLAGS_RPCS3;
CHECK_RESULT(vkCreateImage(m_device, &info, nullptr, &value));
VkMemoryRequirements memory_req;
vkGetImageMemoryRequirements(m_device, value, &memory_req);
const auto allocation_type_info = memory_type.get(dev, access_flags, memory_req.memoryTypeBits);
if (!allocation_type_info)
{
fmt::throw_exception("No compatible memory type was found!");
}
memory = std::make_shared<vk::memory_block>(m_device, memory_req.size, memory_req.alignment, allocation_type_info, allocation_pool, nullable);
if (auto device_mem = memory->get_vk_device_memory();
device_mem != VK_NULL_HANDLE) [[likely]]
{
CHECK_RESULT(vkBindImageMemory(m_device, value, device_mem, memory->get_vk_device_memory_offset()));
current_layout = info.initialLayout;
}
else
{
ensure(nullable);
vkDestroyImage(m_device, value, nullptr);
value = VK_NULL_HANDLE;
}
}
u32 image::width() const
{
return info.extent.width;
}
u32 image::height() const
{
return info.extent.height;
}
u32 image::depth() const
{
return info.extent.depth;
}
u32 image::mipmaps() const
{
return info.mipLevels;
}
u32 image::layers() const
{
return info.arrayLayers;
}
u8 image::samples() const
{
return u8(info.samples);
}
VkFormat image::format() const
{
return info.format;
}
VkImageType image::type() const
{
return info.imageType;
}
VkSharingMode image::sharing_mode() const
{
return info.sharingMode;
}
VkImageAspectFlags image::aspect() const
{
return m_storage_aspect;
}
rsx::format_class image::format_class() const
{
return m_format_class;
}
void image::push_layout(const command_buffer& cmd, VkImageLayout layout)
{
ensure(current_queue_family == VK_QUEUE_FAMILY_IGNORED || current_queue_family == cmd.get_queue_family());
m_layout_stack.push(current_layout);
change_image_layout(cmd, this, layout);
}
void image::push_barrier(const command_buffer& cmd, VkImageLayout layout)
{
ensure(current_queue_family == VK_QUEUE_FAMILY_IGNORED || current_queue_family == cmd.get_queue_family());
m_layout_stack.push(current_layout);
insert_texture_barrier(cmd, this, layout);
}
void image::pop_layout(const command_buffer& cmd)
{
ensure(current_queue_family == VK_QUEUE_FAMILY_IGNORED || current_queue_family == cmd.get_queue_family());
ensure(!m_layout_stack.empty());
auto layout = m_layout_stack.top();
m_layout_stack.pop();
change_image_layout(cmd, this, layout);
}
void image::queue_acquire(const command_buffer& cmd, VkImageLayout new_layout)
{
ensure(m_layout_stack.empty());
ensure(current_queue_family != cmd.get_queue_family());
if (info.sharingMode == VK_SHARING_MODE_EXCLUSIVE || current_layout != new_layout)
{
VkImageSubresourceRange range = { aspect(), 0, mipmaps(), 0, layers() };
const u32 src_queue_family = info.sharingMode == VK_SHARING_MODE_EXCLUSIVE ? current_queue_family : VK_QUEUE_FAMILY_IGNORED;
const u32 dst_queue_family = info.sharingMode == VK_SHARING_MODE_EXCLUSIVE ? cmd.get_queue_family() : VK_QUEUE_FAMILY_IGNORED;
change_image_layout(cmd, value, current_layout, new_layout, range, src_queue_family, dst_queue_family, 0u, ~0u);
}
current_layout = new_layout;
current_queue_family = cmd.get_queue_family();
}
void image::queue_release(const command_buffer& src_queue_cmd, u32 dst_queue_family, VkImageLayout new_layout)
{
ensure(current_queue_family == src_queue_cmd.get_queue_family());
ensure(m_layout_stack.empty());
if (info.sharingMode == VK_SHARING_MODE_EXCLUSIVE || current_layout != new_layout)
{
VkImageSubresourceRange range = { aspect(), 0, mipmaps(), 0, layers() };
const u32 src_queue_family = info.sharingMode == VK_SHARING_MODE_EXCLUSIVE ? current_queue_family : VK_QUEUE_FAMILY_IGNORED;
const u32 dst_queue_family2 = info.sharingMode == VK_SHARING_MODE_EXCLUSIVE ? dst_queue_family : VK_QUEUE_FAMILY_IGNORED;
change_image_layout(src_queue_cmd, value, current_layout, new_layout, range, src_queue_family, dst_queue_family2, ~0u, 0u);
}
current_layout = new_layout;
current_queue_family = dst_queue_family;
}
void image::change_layout(const command_buffer& cmd, VkImageLayout new_layout)
{
// This is implicitly an acquire op
if (const auto new_queue_family = cmd.get_queue_family();
current_queue_family == VK_QUEUE_FAMILY_IGNORED)
{
current_queue_family = new_queue_family;
}
else if (current_queue_family != new_queue_family)
{
queue_acquire(cmd, new_layout);
return;
}
if (current_layout == new_layout)
{
return;
}
ensure(m_layout_stack.empty());
change_image_layout(cmd, this, new_layout);
current_queue_family = cmd.get_queue_family();
}
void image::set_debug_name(const std::string& name)
{
if (g_render_device->get_debug_utils_support())
{
VkDebugUtilsObjectNameInfoEXT name_info{};
name_info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT;
name_info.objectType = VK_OBJECT_TYPE_IMAGE;
name_info.objectHandle = reinterpret_cast<u64>(value);
name_info.pObjectName = name.c_str();
g_render_device->_vkSetDebugUtilsObjectNameEXT(m_device, &name_info);
}
}
image_view::image_view(VkDevice dev, VkImage image, VkImageViewType view_type, VkFormat format, VkComponentMapping mapping, VkImageSubresourceRange range)
: m_device(dev)
{
info.format = format;
info.image = image;
info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
info.components = mapping;
info.viewType = view_type;
info.subresourceRange = range;
create_impl();
}
image_view::image_view(VkDevice dev, VkImageViewCreateInfo create_info)
: info(create_info)
, m_device(dev)
{
create_impl();
}
image_view::image_view(VkDevice dev, vk::image* resource, VkImageViewType view_type, const VkComponentMapping& mapping, const VkImageSubresourceRange& range)
: m_device(dev), m_resource(resource)
{
info.format = resource->info.format;
info.image = resource->value;
info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
info.components = mapping;
info.subresourceRange = range;
if (view_type == VK_IMAGE_VIEW_TYPE_MAX_ENUM)
{
switch (resource->info.imageType)
{
case VK_IMAGE_TYPE_1D:
info.viewType = VK_IMAGE_VIEW_TYPE_1D;
break;
case VK_IMAGE_TYPE_2D:
if (resource->info.flags == VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT)
info.viewType = VK_IMAGE_VIEW_TYPE_CUBE;
else if (resource->info.arrayLayers == 1)
info.viewType = VK_IMAGE_VIEW_TYPE_2D;
else
info.viewType = VK_IMAGE_VIEW_TYPE_2D_ARRAY;
break;
case VK_IMAGE_TYPE_3D:
info.viewType = VK_IMAGE_VIEW_TYPE_3D;
break;
default:
fmt::throw_exception("Unreachable");
}
info.subresourceRange.layerCount = resource->info.arrayLayers;
}
else
{
info.viewType = view_type;
}
create_impl();
}
image_view::~image_view()
{
vkDestroyImageView(m_device, value, nullptr);
}
u32 image_view::encoded_component_map() const
{
#if (VK_DISABLE_COMPONENT_SWIZZLE)
u32 result = static_cast<u32>(info.components.a) - 1;
result |= (static_cast<u32>(info.components.r) - 1) << 3;
result |= (static_cast<u32>(info.components.g) - 1) << 6;
result |= (static_cast<u32>(info.components.b) - 1) << 9;
return result;
#else
return 0;
#endif
}
vk::image* image_view::image() const
{
return m_resource;
}
void image_view::create_impl()
{
#if (VK_DISABLE_COMPONENT_SWIZZLE)
// Force identity
const auto mapping = info.components;
info.components = { VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY };
#endif
CHECK_RESULT(vkCreateImageView(m_device, &info, nullptr, &value));
#if (VK_DISABLE_COMPONENT_SWIZZLE)
// Restore requested mapping
info.components = mapping;
#endif
}
viewable_image* viewable_image::clone()
{
// Destructive cloning. The clone grabs the GPU objects owned by this instance.
// This instance can be rebuilt in-place by calling create_impl() which will create a duplicate now owned by this.
auto result = new viewable_image();
result->m_device = this->m_device;
result->info = this->info;
result->value = this->value;
result->memory = std::move(this->memory);
result->views = std::move(this->views);
this->value = VK_NULL_HANDLE;
return result;
}
image_view* viewable_image::get_view(const rsx::texture_channel_remap_t& remap, VkImageAspectFlags mask)
{
u32 remap_encoding = remap.encoded;
if (remap_encoding == VK_REMAP_IDENTITY)
{
if (native_component_map.a == VK_COMPONENT_SWIZZLE_A &&
native_component_map.r == VK_COMPONENT_SWIZZLE_R &&
native_component_map.g == VK_COMPONENT_SWIZZLE_G &&
native_component_map.b == VK_COMPONENT_SWIZZLE_B)
{
remap_encoding = RSX_TEXTURE_REMAP_IDENTITY;
}
}
const u64 storage_key = remap_encoding | (static_cast<u64>(mask) << 32);
auto found = views.find(storage_key);
if (found != views.end())
{
ensure(found->second->info.subresourceRange.aspectMask & mask);
return found->second.get();
}
VkComponentMapping real_mapping;
switch (remap_encoding)
{
case VK_REMAP_IDENTITY:
real_mapping = { VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY };
break;
case RSX_TEXTURE_REMAP_IDENTITY:
real_mapping = native_component_map;
break;
default:
real_mapping = vk::apply_swizzle_remap
(
{ native_component_map.a, native_component_map.r, native_component_map.g, native_component_map.b },
remap
);
break;
}
const VkImageSubresourceRange range = { aspect() & mask, 0, info.mipLevels, 0, info.arrayLayers };
ensure(range.aspectMask);
auto view = std::make_unique<vk::image_view>(*g_render_device, this, VK_IMAGE_VIEW_TYPE_MAX_ENUM, real_mapping, range);
auto result = view.get();
views.emplace(storage_key, std::move(view));
return result;
}
void viewable_image::set_native_component_layout(VkComponentMapping new_layout)
{
if (new_layout.r != native_component_map.r ||
new_layout.g != native_component_map.g ||
new_layout.b != native_component_map.b ||
new_layout.a != native_component_map.a)
{
native_component_map = new_layout;
// Safely discard existing views
auto gc = vk::get_resource_manager();
for (auto& p : views)
{
gc->dispose(p.second);
}
views.clear();
}
}
}
| 14,337
|
C++
|
.cpp
| 412
| 30.463592
| 159
| 0.701966
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,463
|
memory.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/memory.cpp
|
#include "device.h"
#include "memory.h"
namespace
{
// Copied from rsx_utils.h. Move to a more convenient location
template<typename T, typename U>
static inline T align2(T value, U alignment)
{
return ((value + alignment - 1) / alignment) * alignment;
}
}
namespace vk
{
memory_type_info::memory_type_info(u32 index, u64 size)
{
push(index, size);
}
void memory_type_info::push(u32 index, u64 size)
{
type_ids.push_back(index);
type_sizes.push_back(size);
}
memory_type_info::const_iterator memory_type_info::begin() const
{
return type_ids.data();
}
memory_type_info::const_iterator memory_type_info::end() const
{
return type_ids.data() + type_ids.size();
}
u32 memory_type_info::first() const
{
ensure(!type_ids.empty());
return type_ids.front();
}
size_t memory_type_info::count() const
{
return type_ids.size();
}
memory_type_info::operator bool() const
{
return !type_ids.empty();
}
bool memory_type_info::operator == (const memory_type_info& other) const
{
if (type_ids.size() != other.type_ids.size())
{
return false;
}
switch (type_ids.size())
{
case 1:
return type_ids[0] == other.type_ids[0];
case 2:
return ((type_ids[0] == other.type_ids[0] && type_ids[1] == other.type_ids[1]) ||
(type_ids[0] == other.type_ids[1] && type_ids[1] == other.type_ids[0]));
default:
for (const auto& id : other.type_ids)
{
if (std::find(type_ids.begin(), type_ids.end(), id) == type_ids.end())
{
return false;
}
}
return true;
}
}
memory_type_info memory_type_info::get(const render_device& dev, u32 access_flags, u32 type_mask) const
{
memory_type_info result{};
for (size_t i = 0; i < type_ids.size(); ++i)
{
if (type_mask & (1 << type_ids[i]))
{
result.push(type_ids[i], type_sizes[i]);
}
}
if (!result)
{
u32 type;
if (dev.get_compatible_memory_type(type_mask, access_flags, &type))
{
result = { type, 0ull };
}
}
return result;
}
void memory_type_info::rebalance()
{
// Re-order indices with the least used one first.
// This will avoid constant pressure on the memory budget in low memory systems.
if (type_ids.size() <= 1)
{
// Nothing to do
return;
}
std::vector<std::pair<u32, u64>> free_memory_map;
const auto num_types = type_ids.size();
u64 last_free = UINT64_MAX;
bool to_reorder = false;
for (u32 i = 0; i < num_types; ++i)
{
const auto heap_size = type_sizes[i];
const auto type_id = type_ids[i];
ensure(heap_size > 0);
const u64 used_mem = vmm_get_application_memory_usage({ type_id, 0ull });
const u64 free_mem = (used_mem >= heap_size) ? 0ull : (heap_size - used_mem);
to_reorder |= (free_mem > last_free);
last_free = free_mem;
free_memory_map.push_back({ i, free_mem });
}
if (!to_reorder) [[likely]]
{
return;
}
ensure(free_memory_map.size() == num_types);
std::sort(free_memory_map.begin(), free_memory_map.end(), FN(x.second > y.second));
std::vector<u32> new_type_ids(num_types);
std::vector<u64> new_type_sizes(num_types);
for (u32 i = 0; i < num_types; ++i)
{
const u32 ref = free_memory_map[i].first;
new_type_ids[i] = type_ids[ref];
new_type_sizes[i] = type_sizes[ref];
}
type_ids = new_type_ids;
type_sizes = new_type_sizes;
rsx_log.warning("Rebalanced memory types successfully");
}
mem_allocator_base::mem_allocator_base(const vk::render_device& dev, VkPhysicalDevice)
: m_device(dev), m_allocation_flags(0)
{}
mem_allocator_vma::mem_allocator_vma(const vk::render_device& dev, VkPhysicalDevice pdev)
: mem_allocator_base(dev, pdev)
{
// Initialize stats pool
std::fill(stats.begin(), stats.end(), VmaBudget{});
VmaAllocatorCreateInfo allocatorInfo = {};
allocatorInfo.physicalDevice = pdev;
allocatorInfo.device = dev;
std::vector<VkDeviceSize> heap_limits;
const auto vram_allocation_limit = g_cfg.video.vk.vram_allocation_limit * 0x100000ull;
if (vram_allocation_limit < dev.get_memory_mapping().device_local_total_bytes)
{
VkPhysicalDeviceMemoryProperties memory_properties;
vkGetPhysicalDeviceMemoryProperties(pdev, &memory_properties);
for (u32 i = 0; i < memory_properties.memoryHeapCount; ++i)
{
const u64 max_sz = (memory_properties.memoryHeaps[i].flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
? vram_allocation_limit
: VK_WHOLE_SIZE;
heap_limits.push_back(max_sz);
}
allocatorInfo.pHeapSizeLimit = heap_limits.data();
}
CHECK_RESULT(vmaCreateAllocator(&allocatorInfo, &m_allocator));
// Allow fastest possible allocation on start
set_fastest_allocation_flags();
}
void mem_allocator_vma::destroy()
{
vmaDestroyAllocator(m_allocator);
}
mem_allocator_vk::mem_handle_t mem_allocator_vma::alloc(u64 block_sz, u64 alignment, const memory_type_info& memory_type, vmm_allocation_pool pool, bool throw_on_fail)
{
VmaAllocation vma_alloc;
VkMemoryRequirements mem_req = {};
VmaAllocationCreateInfo create_info = {};
VkResult error_code = VK_ERROR_UNKNOWN;
auto do_vma_alloc = [&]() -> std::tuple<VkResult, u32>
{
for (const auto& memory_type_index : memory_type)
{
mem_req.memoryTypeBits = 1u << memory_type_index;
mem_req.size = ::align2(block_sz, alignment);
mem_req.alignment = alignment;
create_info.memoryTypeBits = 1u << memory_type_index;
create_info.flags = m_allocation_flags;
error_code = vmaAllocateMemory(m_allocator, &mem_req, &create_info, &vma_alloc, nullptr);
if (error_code == VK_SUCCESS)
{
return { VK_SUCCESS, memory_type_index };
}
}
return { error_code, ~0u };
};
// On successful allocation, simply tag the transaction and carry on.
{
const auto [status, type] = do_vma_alloc();
if (status == VK_SUCCESS)
{
vmm_notify_memory_allocated(vma_alloc, type, block_sz, pool);
return vma_alloc;
}
}
const auto severity = (throw_on_fail) ? rsx::problem_severity::fatal : rsx::problem_severity::severe;
if (error_code == VK_ERROR_OUT_OF_DEVICE_MEMORY &&
vmm_handle_memory_pressure(severity))
{
// Out of memory. Try again.
const auto [status, type] = do_vma_alloc();
if (status == VK_SUCCESS)
{
rsx_log.warning("Renderer ran out of video memory but successfully recovered.");
vmm_notify_memory_allocated(vma_alloc, type, block_sz, pool);
return vma_alloc;
}
}
if (!throw_on_fail)
{
return VK_NULL_HANDLE;
}
die_with_error(error_code);
fmt::throw_exception("Unreachable! Error_code=0x%x", static_cast<u32>(error_code));
}
void mem_allocator_vma::free(mem_handle_t mem_handle)
{
vmm_notify_memory_freed(mem_handle);
vmaFreeMemory(m_allocator, static_cast<VmaAllocation>(mem_handle));
}
void* mem_allocator_vma::map(mem_handle_t mem_handle, u64 offset, u64 /*size*/)
{
void* data = nullptr;
CHECK_RESULT(vmaMapMemory(m_allocator, static_cast<VmaAllocation>(mem_handle), &data));
// Add offset
data = static_cast<u8*>(data) + offset;
return data;
}
void mem_allocator_vma::unmap(mem_handle_t mem_handle)
{
vmaUnmapMemory(m_allocator, static_cast<VmaAllocation>(mem_handle));
}
VkDeviceMemory mem_allocator_vma::get_vk_device_memory(mem_handle_t mem_handle)
{
if (!mem_handle)
{
return VK_NULL_HANDLE;
}
VmaAllocationInfo alloc_info;
vmaGetAllocationInfo(m_allocator, static_cast<VmaAllocation>(mem_handle), &alloc_info);
return alloc_info.deviceMemory;
}
u64 mem_allocator_vma::get_vk_device_memory_offset(mem_handle_t mem_handle)
{
VmaAllocationInfo alloc_info;
vmaGetAllocationInfo(m_allocator, static_cast<VmaAllocation>(mem_handle), &alloc_info);
return alloc_info.offset;
}
f32 mem_allocator_vma::get_memory_usage()
{
vmaGetBudget(m_allocator, stats.data());
float max_usage = 0.f;
for (const auto& info : stats)
{
if (!info.budget)
{
break;
}
const float this_usage = (info.usage * 100.f) / info.budget;
max_usage = std::max(max_usage, this_usage);
}
return max_usage;
}
void mem_allocator_vma::set_safest_allocation_flags()
{
m_allocation_flags = VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT;
}
void mem_allocator_vma::set_fastest_allocation_flags()
{
m_allocation_flags = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
}
mem_allocator_vk::mem_handle_t mem_allocator_vk::alloc(u64 block_sz, u64 /*alignment*/, const memory_type_info& memory_type, vmm_allocation_pool pool, bool throw_on_fail)
{
VkResult error_code = VK_ERROR_UNKNOWN;
VkDeviceMemory memory;
VkMemoryAllocateInfo info = {};
info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
info.allocationSize = block_sz;
auto do_vk_alloc = [&]() -> std::tuple<VkResult, u32>
{
for (const auto& memory_type_index : memory_type)
{
info.memoryTypeIndex = memory_type_index;
error_code = vkAllocateMemory(m_device, &info, nullptr, &memory);
if (error_code == VK_SUCCESS)
{
return { error_code, memory_type_index };
}
}
return { error_code, ~0u };
};
{
const auto [status, type] = do_vk_alloc();
if (status == VK_SUCCESS)
{
vmm_notify_memory_allocated(memory, type, block_sz, pool);
return memory;
}
}
const auto severity = (throw_on_fail) ? rsx::problem_severity::fatal : rsx::problem_severity::severe;
if (error_code == VK_ERROR_OUT_OF_DEVICE_MEMORY &&
vmm_handle_memory_pressure(severity))
{
// Out of memory. Try again.
const auto [status, type] = do_vk_alloc();
if (status == VK_SUCCESS)
{
rsx_log.warning("Renderer ran out of video memory but successfully recovered.");
vmm_notify_memory_allocated(memory, type, block_sz, pool);
return memory;
}
}
if (!throw_on_fail)
{
return VK_NULL_HANDLE;
}
die_with_error(error_code);
fmt::throw_exception("Unreachable! Error_code=0x%x", static_cast<u32>(error_code));
}
void mem_allocator_vk::free(mem_handle_t mem_handle)
{
vmm_notify_memory_freed(mem_handle);
vkFreeMemory(m_device, static_cast<VkDeviceMemory>(mem_handle), nullptr);
}
void* mem_allocator_vk::map(mem_handle_t mem_handle, u64 offset, u64 size)
{
void* data = nullptr;
CHECK_RESULT(vkMapMemory(m_device, static_cast<VkDeviceMemory>(mem_handle), offset, std::max<u64>(size, 1u), 0, &data));
return data;
}
void mem_allocator_vk::unmap(mem_handle_t mem_handle)
{
vkUnmapMemory(m_device, static_cast<VkDeviceMemory>(mem_handle));
}
VkDeviceMemory mem_allocator_vk::get_vk_device_memory(mem_handle_t mem_handle)
{
return static_cast<VkDeviceMemory>(mem_handle);
}
u64 mem_allocator_vk::get_vk_device_memory_offset(mem_handle_t /*mem_handle*/)
{
return 0;
}
f32 mem_allocator_vk::get_memory_usage()
{
return 0.f;
}
mem_allocator_base* get_current_mem_allocator()
{
return g_render_device->get_allocator();
}
memory_block::memory_block(VkDevice dev, u64 block_sz, u64 alignment, const memory_type_info& memory_type, vmm_allocation_pool pool, bool nullable)
: m_device(dev), m_size(block_sz)
{
m_mem_allocator = get_current_mem_allocator();
m_mem_handle = m_mem_allocator->alloc(block_sz, alignment, memory_type, pool, !nullable);
}
memory_block::~memory_block()
{
if (m_mem_allocator && m_mem_handle)
{
m_mem_allocator->free(m_mem_handle);
}
}
memory_block_host::memory_block_host(VkDevice dev, void* host_pointer, u64 size, const memory_type_info& memory_type) :
m_device(dev), m_mem_handle(VK_NULL_HANDLE), m_host_pointer(host_pointer)
{
VkMemoryAllocateInfo alloc_info{};
VkImportMemoryHostPointerInfoEXT import_info{};
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.memoryTypeIndex = memory_type.first();
alloc_info.allocationSize = size;
alloc_info.pNext = &import_info;
import_info.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT;
import_info.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT;
import_info.pHostPointer = host_pointer;
CHECK_RESULT(vkAllocateMemory(m_device, &alloc_info, nullptr, &m_mem_handle));
}
memory_block_host::~memory_block_host()
{
vkFreeMemory(m_device, m_mem_handle, nullptr);
}
VkDeviceMemory memory_block_host::get_vk_device_memory()
{
return m_mem_handle;
}
u64 memory_block_host::get_vk_device_memory_offset()
{
return 0ull;
}
void* memory_block_host::map(u64 offset, u64 /*size*/)
{
return reinterpret_cast<char*>(m_host_pointer) + offset;
}
void memory_block_host::unmap()
{
// NOP
}
VkDeviceMemory memory_block::get_vk_device_memory()
{
return m_mem_allocator->get_vk_device_memory(m_mem_handle);
}
u64 memory_block::get_vk_device_memory_offset()
{
return m_mem_allocator->get_vk_device_memory_offset(m_mem_handle);
}
u64 memory_block::size() const
{
return m_size;
}
void* memory_block::map(u64 offset, u64 size)
{
return m_mem_allocator->map(m_mem_handle, offset, size);
}
void memory_block::unmap()
{
m_mem_allocator->unmap(m_mem_handle);
}
}
| 12,966
|
C++
|
.cpp
| 418
| 27.801435
| 171
| 0.69882
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,464
|
commands.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/commands.cpp
|
#include "commands.h"
#include "device.h"
#include "shared.h"
#include "sync.h"
namespace vk
{
// This queue flushing method to be implemented by the backend as behavior depends on config
void queue_submit(const queue_submit_t& submit_info, VkBool32 flush);
void command_pool::create(vk::render_device& dev, u32 queue_family_id)
{
owner = &dev;
queue_family = queue_family_id;
VkCommandPoolCreateInfo infos = {};
infos.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT | VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
infos.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
infos.queueFamilyIndex = queue_family;
CHECK_RESULT(vkCreateCommandPool(dev, &infos, nullptr, &pool));
}
void command_pool::destroy()
{
if (!pool)
return;
vkDestroyCommandPool((*owner), pool, nullptr);
pool = nullptr;
}
vk::render_device& command_pool::get_owner() const
{
return (*owner);
}
u32 command_pool::get_queue_family() const
{
return queue_family;
}
command_pool::operator VkCommandPool() const
{
return pool;
}
void command_buffer::create(command_pool& cmd_pool)
{
VkCommandBufferAllocateInfo infos = {};
infos.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
infos.commandBufferCount = 1;
infos.commandPool = +cmd_pool;
infos.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
CHECK_RESULT(vkAllocateCommandBuffers(cmd_pool.get_owner(), &infos, &commands));
m_submit_fence = new fence(cmd_pool.get_owner());
pool = &cmd_pool;
}
void command_buffer::destroy()
{
vkFreeCommandBuffers(pool->get_owner(), (*pool), 1, &commands);
if (m_submit_fence)
{
//vkDestroyFence(pool->get_owner(), m_submit_fence, nullptr);
delete m_submit_fence;
m_submit_fence = nullptr;
}
}
void command_buffer::begin()
{
if (m_submit_fence && is_pending)
{
wait_for_fence(m_submit_fence);
is_pending = false;
//CHECK_RESULT(vkResetFences(pool->get_owner(), 1, &m_submit_fence));
m_submit_fence->reset();
CHECK_RESULT(vkResetCommandBuffer(commands, 0));
}
if (is_open)
return;
VkCommandBufferInheritanceInfo inheritance_info = {};
inheritance_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
VkCommandBufferBeginInfo begin_infos = {};
begin_infos.pInheritanceInfo = &inheritance_info;
begin_infos.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
begin_infos.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
CHECK_RESULT(vkBeginCommandBuffer(commands, &begin_infos));
is_open = true;
}
void command_buffer::end()
{
if (!is_open)
{
rsx_log.error("commandbuffer->end was called but commandbuffer is not in a recording state");
return;
}
CHECK_RESULT(vkEndCommandBuffer(commands));
is_open = false;
}
void command_buffer::submit(queue_submit_t& submit_info, VkBool32 flush)
{
if (is_open)
{
rsx_log.error("commandbuffer->submit was called whilst the command buffer is in a recording state");
return;
}
// Check for hanging queries to avoid driver hang
ensure((flags & cb_has_open_query) == 0); // "close and submit of commandbuffer with a hanging query!"
if (!submit_info.pfence)
{
submit_info.pfence = m_submit_fence;
is_pending = bool(submit_info.pfence);
}
submit_info.commands = this->commands;
queue_submit(submit_info, flush);
clear_flags();
}
}
| 3,356
|
C++
|
.cpp
| 108
| 28.12963
| 114
| 0.724256
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
5,465
|
sampler.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/sampler.cpp
|
#include "memory.h"
#include "sampler.h"
#include "../../color_utils.h"
#include "../../rsx_utils.h"
namespace vk
{
extern VkBorderColor get_border_color(u32);
static VkBorderColor get_closest_border_color_enum(const color4f& color4)
{
if ((color4.r + color4.g + color4.b) > 1.35f)
{
//If color elements are brighter than roughly 0.5 average, use white border
return VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE;
}
if (color4.a > 0.5f)
return VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK;
return VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK;
}
border_color_t::border_color_t(const color4f& color, VkFormat fmt, VkImageAspectFlags aspect)
: format(fmt), aspect(aspect), color_value(color)
{
const auto encoded_color = rsx::encode_color_to_storage_key(color);
value = vk::get_border_color(encoded_color);
if (value != VK_BORDER_COLOR_FLOAT_CUSTOM_EXT)
{
// Nothing to do
return;
}
if (!g_render_device->get_custom_border_color_support())
{
value = get_closest_border_color_enum(color_value);
return;
}
if (aspect == VK_IMAGE_ASPECT_STENCIL_BIT)
{
// We must use INT color format for stencil
value = VK_BORDER_COLOR_INT_CUSTOM_EXT;
auto int_color = static_cast<s32>(color_value.z * 255.f);
color_value = color4f(std::bit_cast<f32>(int_color));
}
ensure(aspect <= VK_IMAGE_ASPECT_METADATA_BIT);
storage_key = static_cast<u64>(encoded_color)
| (static_cast<u64>(aspect) << 32)
| (static_cast<u64>(fmt) << 34);
}
border_color_t::border_color_t(VkBorderColor value)
: storage_key(0)
, value(value)
, format(VK_FORMAT_UNDEFINED)
, aspect(VK_IMAGE_ASPECT_COLOR_BIT)
, color_value(0.f)
{}
sampler::sampler(const vk::render_device& dev, VkSamplerAddressMode clamp_u, VkSamplerAddressMode clamp_v, VkSamplerAddressMode clamp_w,
VkBool32 unnormalized_coordinates, float mipLodBias, float max_anisotropy, float min_lod, float max_lod,
VkFilter min_filter, VkFilter mag_filter, VkSamplerMipmapMode mipmap_mode, const vk::border_color_t& border_color,
VkBool32 depth_compare, VkCompareOp depth_compare_mode)
: m_device(dev), m_border_color(border_color)
{
info.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
info.addressModeU = clamp_u;
info.addressModeV = clamp_v;
info.addressModeW = clamp_w;
info.anisotropyEnable = max_anisotropy >= 2. && dev.get_anisotropic_filtering_support();
info.compareEnable = depth_compare;
info.unnormalizedCoordinates = unnormalized_coordinates;
info.mipLodBias = mipLodBias;
info.maxAnisotropy = max_anisotropy;
info.maxLod = max_lod;
info.minLod = min_lod;
info.magFilter = mag_filter;
info.minFilter = min_filter;
info.mipmapMode = mipmap_mode;
info.compareOp = depth_compare_mode;
info.borderColor = border_color.value;
VkSamplerCustomBorderColorCreateInfoEXT custom_color_info;
if (border_color.value >= VK_BORDER_COLOR_FLOAT_CUSTOM_EXT)
{
custom_color_info =
{
.sType = VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT,
.format = border_color.format
};
std::memcpy(custom_color_info.customBorderColor.float32, border_color.color_value.rgba, sizeof(float) * 4);
info.pNext = &custom_color_info;
}
CHECK_RESULT(vkCreateSampler(m_device, &info, nullptr, &value));
vmm_notify_object_allocated(VMM_ALLOCATION_POOL_SAMPLER);
}
sampler::~sampler()
{
vkDestroySampler(m_device, value, nullptr);
vmm_notify_object_freed(VMM_ALLOCATION_POOL_SAMPLER);
}
bool sampler::matches(VkSamplerAddressMode clamp_u, VkSamplerAddressMode clamp_v, VkSamplerAddressMode clamp_w,
VkBool32 unnormalized_coordinates, float mipLodBias, float max_anisotropy, float min_lod, float max_lod,
VkFilter min_filter, VkFilter mag_filter, VkSamplerMipmapMode mipmap_mode, const vk::border_color_t& border_color,
VkBool32 depth_compare, VkCompareOp depth_compare_mode)
{
if (info.magFilter != mag_filter || info.minFilter != min_filter || info.mipmapMode != mipmap_mode ||
info.addressModeU != clamp_u || info.addressModeV != clamp_v || info.addressModeW != clamp_w ||
info.compareEnable != depth_compare || info.unnormalizedCoordinates != unnormalized_coordinates ||
!rsx::fcmp(info.maxLod, max_lod) || !rsx::fcmp(info.mipLodBias, mipLodBias) || !rsx::fcmp(info.minLod, min_lod) ||
!rsx::fcmp(info.maxAnisotropy, max_anisotropy) ||
info.compareOp != depth_compare_mode || m_border_color != border_color)
return false;
return true;
}
sampler_pool_key_t sampler_pool_t::compute_storage_key(
VkSamplerAddressMode clamp_u, VkSamplerAddressMode clamp_v, VkSamplerAddressMode clamp_w,
VkBool32 unnormalized_coordinates, float mipLodBias, float max_anisotropy, float min_lod, float max_lod,
VkFilter min_filter, VkFilter mag_filter, VkSamplerMipmapMode mipmap_mode, const vk::border_color_t& border_color,
VkBool32 depth_compare, VkCompareOp depth_compare_mode)
{
sampler_pool_key_t key{};
bool use_border_encoding = false;
if (border_color.value > VK_BORDER_COLOR_INT_OPAQUE_WHITE)
{
// If there is no clamp to border in use, we can ignore the border color entirely
if (clamp_u == VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER ||
clamp_v == VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER ||
clamp_w == VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER)
{
use_border_encoding = true;
}
}
key.base_key = u16(clamp_u) | u64(clamp_v) << 3 | u64(clamp_w) << 6;
key.base_key |= u64(unnormalized_coordinates) << 9; // 1 bit
key.base_key |= u64(min_filter) << 10 | u64(mag_filter) << 11; // 1 bit each
key.base_key |= u64(mipmap_mode) << 12; // 1 bit
if (!use_border_encoding)
{
// Bits 13-16 are reserved for border color encoding
key.base_key |= u64(border_color.value) << 13;
}
else
{
key.border_color_key = border_color.storage_key;
}
key.base_key |= u64(depth_compare) << 16; // 1 bit
key.base_key |= u64(depth_compare_mode) << 17; // 3 bits
key.base_key |= u64(rsx::encode_fx12(min_lod)) << 20; // 12 bits
key.base_key |= u64(rsx::encode_fx12(max_lod)) << 32; // 12 bits
key.base_key |= u64(rsx::encode_fx12<true>(mipLodBias)) << 44; // 13 bits (fx12 + sign)
key.base_key |= u64(max_anisotropy) << 57; // 4 bits
return key;
}
void sampler_pool_t::clear()
{
m_generic_sampler_pool.clear();
m_custom_color_sampler_pool.clear();
}
cached_sampler_object_t* sampler_pool_t::find(const sampler_pool_key_t& key) const
{
if (!key.border_color_key) [[ likely ]]
{
const auto found = m_generic_sampler_pool.find(key.base_key);
return found == m_generic_sampler_pool.end() ? nullptr : found->second.get();
}
const auto block = m_custom_color_sampler_pool.equal_range(key.base_key);
for (auto it = block.first; it != block.second; ++it)
{
if (it->second->key.border_color_key == key.border_color_key)
{
return it->second.get();
}
}
return nullptr;
}
cached_sampler_object_t* sampler_pool_t::emplace(const sampler_pool_key_t& key, std::unique_ptr<cached_sampler_object_t>& object)
{
object->key = key;
if (!key.border_color_key) [[ likely ]]
{
const auto [iterator, _unused] = m_generic_sampler_pool.emplace(key.base_key, std::move(object));
return iterator->second.get();
}
const auto [iterator, _unused] = m_custom_color_sampler_pool.emplace(key.base_key, std::move(object));
return iterator->second.get();
}
std::vector<std::unique_ptr<cached_sampler_object_t>> sampler_pool_t::collect(std::function<bool(const cached_sampler_object_t&)> predicate)
{
std::vector<std::unique_ptr<cached_sampler_object_t>> result;
const auto collect_fn = [&](auto& container)
{
for (auto it = container.begin(); it != container.end();)
{
if (!predicate(*it->second))
{
it++;
continue;
}
result.emplace_back(std::move(it->second));
it = container.erase(it);
}
};
collect_fn(m_generic_sampler_pool);
collect_fn(m_custom_color_sampler_pool);
return result;
}
}
| 7,977
|
C++
|
.cpp
| 200
| 36.525
| 141
| 0.708732
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,466
|
chip_class.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/chip_class.cpp
|
#include "chip_class.h"
#include "util/logs.hpp"
namespace vk
{
static const chip_family_table s_AMD_family_tree = []()
{
chip_family_table table;
table.default_ = chip_class::AMD_gcn_generic;
// AMD cards. See https://github.com/torvalds/linux/blob/master/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
table.add(0x67C0, 0x67FF, chip_class::AMD_polaris); // Polaris 10/11
table.add(0x6FDF, chip_class::AMD_polaris); // RX 580 2048SP
table.add(0x6980, 0x699F, chip_class::AMD_polaris); // Polaris 12
table.add(0x694C, 0x694F, chip_class::AMD_vega); // Vega M
table.add(0x6860, 0x686F, chip_class::AMD_vega); // Vega Pro
table.add(0x687F, chip_class::AMD_vega); // Vega 56/64
table.add(0x69A0, 0x69AF, chip_class::AMD_vega); // Vega 12
table.add(0x66A0, 0x66AF, chip_class::AMD_vega); // Vega 20
table.add(0x15DD, chip_class::AMD_vega); // Raven Ridge
table.add(0x15D8, chip_class::AMD_vega); // Raven Ridge
table.add(0x7310, 0x731F, chip_class::AMD_navi1x); // Navi 10
table.add(0x7360, 0x7362, chip_class::AMD_navi1x); // Navi 12
table.add(0x7340, 0x734F, chip_class::AMD_navi1x); // Navi 14
table.add(0x73A0, 0x73BF, chip_class::AMD_navi2x); // Navi 21 (Sienna Cichlid)
table.add(0x73C0, 0x73DF, chip_class::AMD_navi2x); // Navi 22 (Navy Flounder)
table.add(0x73E0, 0x73FF, chip_class::AMD_navi2x); // Navi 23 (Dimgrey Cavefish)
table.add(0x7420, 0x743F, chip_class::AMD_navi2x); // Navi 24 (Beige Goby)
table.add(0x163F, chip_class::AMD_navi2x); // Navi 2X (Van Gogh)
table.add(0x164D, 0x1681, chip_class::AMD_navi2x); // Navi 2X (Yellow Carp)
table.add(0x7440, 0x745F, chip_class::AMD_navi3x); // Navi 31 (Only 744c, NAVI31XTX is confirmed)
table.add(0x7460, 0x747F, chip_class::AMD_navi3x); // Navi 32 (Unverified)
table.add(0x7480, 0x749F, chip_class::AMD_navi3x); // Navi 33 (Unverified)
return table;
}();
static const chip_family_table s_NV_family_tree = []()
{
chip_family_table table;
table.default_ = chip_class::NV_generic;
// NV cards. See https://envytools.readthedocs.io/en/latest/hw/pciid.html
// NOTE: Since NV device IDs are linearly incremented per generation, there is no need to carefully check all the ranges
table.add(0x1180, 0x11FA, chip_class::NV_kepler); // GK104, 106
table.add(0x0FC0, 0x0FFF, chip_class::NV_kepler); // GK107
table.add(0x1003, 0x102F, chip_class::NV_kepler); // GK110, GK210
table.add(0x1280, 0x12BA, chip_class::NV_kepler); // GK208
table.add(0x1381, 0x13B0, chip_class::NV_maxwell); // GM107
table.add(0x1340, 0x134D, chip_class::NV_maxwell); // GM108
table.add(0x13C0, 0x13D9, chip_class::NV_maxwell); // GM204
table.add(0x1401, 0x1427, chip_class::NV_maxwell); // GM206
table.add(0x15F7, 0x15F9, chip_class::NV_pascal); // GP100 (Tesla P100)
table.add(0x1B00, 0x1D80, chip_class::NV_pascal);
table.add(0x1D81, 0x1DBA, chip_class::NV_volta);
table.add(0x1E02, 0x1F54, chip_class::NV_turing); // TU102, TU104, TU106, TU106M, TU106GL (RTX 20 series)
table.add(0x1F82, 0x1FB9, chip_class::NV_turing); // TU117, TU117M, TU117GL
table.add(0x2182, 0x21D1, chip_class::NV_turing); // TU116, TU116M, TU116GL
table.add(0x20B0, 0x20BE, chip_class::NV_ampere); // GA100
table.add(0x2204, 0x25AF, chip_class::NV_ampere); // GA10x (RTX 30 series)
table.add(0x2684, 0x27FF, chip_class::NV_lovelace); // AD102, AD103 (RTX40 series)
return table;
}();
static const chip_family_table s_INTEL_family_tree = []()
{
chip_family_table table;
table.default_ = chip_class::INTEL_generic; // UHD and other older chips we don't care about
// INTEL DG2+ cards. See https://github.com/torvalds/linux/blob/d88520ad73b79e71e3ddf08de335b8520ae41c5c/include/drm/i915_pciids.h#L702
// Naming on DG2 is pretty consistent, XX9X is mobile arc, XXAX is desktop and XXBX is Pro
table.add(0x5690, 0x5692, chip_class::INTEL_alchemist); // G10M
table.add(0x56A0, 0x56A2, chip_class::INTEL_alchemist); // G10
table.add(0x5693, 0x5695, chip_class::INTEL_alchemist); // G11M
table.add(0x56A5, 0x56A6, chip_class::INTEL_alchemist); // G11
table.add(0x56B0, 0x56B1, chip_class::INTEL_alchemist); // G11-Pro
table.add(0x5696, 0x5697, chip_class::INTEL_alchemist); // G12M
table.add(0x56A3, 0x56A4, chip_class::INTEL_alchemist); // G12
table.add(0x56B2, 0x56B3, chip_class::INTEL_alchemist); // G12-Pro
return table;
}();
chip_class g_chip_class = chip_class::unknown;
chip_class get_chip_family()
{
return g_chip_class;
}
chip_class get_chip_family(u32 vendor_id, u32 device_id)
{
if (vendor_id == 0x10DE)
{
return s_NV_family_tree.find(device_id);
}
if (vendor_id == 0x1002)
{
return s_AMD_family_tree.find(device_id);
}
if (vendor_id == 0x106B)
{
return chip_class::MVK_apple;
}
if (vendor_id == 0x8086)
{
return s_INTEL_family_tree.find(device_id);
}
return chip_class::unknown;
}
void chip_family_table::add(u32 first, u32 last, chip_class family)
{
for (auto i = first; i <= last; ++i)
{
lut[i] = family;
}
}
void chip_family_table::add(u32 id, chip_class family)
{
lut[id] = family;
}
chip_class chip_family_table::find(u32 device_id) const
{
if (auto found = lut.find(device_id); found != lut.end())
{
return found->second;
}
rsx_log.warning("Unknown chip with device ID 0x%x", device_id);
return default_;
}
}
| 5,442
|
C++
|
.cpp
| 120
| 42.425
| 137
| 0.693135
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,467
|
buffer_object.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/buffer_object.cpp
|
#include "buffer_object.h"
#include "device.h"
#include "shared.h"
namespace vk
{
buffer_view::buffer_view(VkDevice dev, VkBuffer buffer, VkFormat format, VkDeviceSize offset, VkDeviceSize size)
: m_device(dev)
{
info.buffer = buffer;
info.format = format;
info.offset = offset;
info.range = size;
info.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO;
CHECK_RESULT(vkCreateBufferView(m_device, &info, nullptr, &value));
}
buffer_view::~buffer_view()
{
vkDestroyBufferView(m_device, value, nullptr);
}
bool buffer_view::in_range(u32 address, u32 size, u32& offset) const
{
if (address < info.offset)
return false;
const u32 _offset = address - static_cast<u32>(info.offset);
if (info.range < _offset)
return false;
const auto remaining = info.range - _offset;
if (size <= remaining)
{
offset = _offset;
return true;
}
return false;
}
buffer::buffer(const vk::render_device& dev, u64 size, const memory_type_info& memory_type, u32 access_flags, VkBufferUsageFlags usage, VkBufferCreateFlags flags, vmm_allocation_pool allocation_pool)
: m_device(dev)
{
info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
info.flags = flags;
info.size = size;
info.usage = usage;
info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
CHECK_RESULT(vkCreateBuffer(m_device, &info, nullptr, &value));
// Allocate vram for this buffer
VkMemoryRequirements memory_reqs;
vkGetBufferMemoryRequirements(m_device, value, &memory_reqs);
memory_type_info allocation_type_info = memory_type.get(dev, access_flags, memory_reqs.memoryTypeBits);
if (!allocation_type_info)
{
fmt::throw_exception("No compatible memory type was found!");
}
memory = std::make_unique<memory_block>(m_device, memory_reqs.size, memory_reqs.alignment, allocation_type_info, allocation_pool);
vkBindBufferMemory(dev, value, memory->get_vk_device_memory(), memory->get_vk_device_memory_offset());
}
buffer::buffer(const vk::render_device& dev, VkBufferUsageFlags usage, void* host_pointer, u64 size)
: m_device(dev)
{
info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
info.flags = 0;
info.size = size;
info.usage = usage;
info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VkExternalMemoryBufferCreateInfoKHR ex_info;
ex_info.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR;
ex_info.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT;
ex_info.pNext = nullptr;
info.pNext = &ex_info;
CHECK_RESULT(vkCreateBuffer(m_device, &info, nullptr, &value));
auto& memory_map = dev.get_memory_mapping();
ensure(memory_map._vkGetMemoryHostPointerPropertiesEXT);
VkMemoryHostPointerPropertiesEXT memory_properties{};
memory_properties.sType = VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT;
CHECK_RESULT(memory_map._vkGetMemoryHostPointerPropertiesEXT(dev, VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT, host_pointer, &memory_properties));
VkMemoryRequirements memory_reqs;
vkGetBufferMemoryRequirements(m_device, value, &memory_reqs);
auto required_memory_type_bits = memory_reqs.memoryTypeBits & memory_properties.memoryTypeBits;
if (!required_memory_type_bits)
{
// AMD driver bug. Buffers created with external memory extension return type bits of 0
rsx_log.warning("Could not match buffer requirements and host pointer properties.");
required_memory_type_bits = memory_properties.memoryTypeBits;
}
const auto allocation_type_info = memory_map.host_visible_coherent.get(dev,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
required_memory_type_bits);
if (!allocation_type_info)
{
fmt::throw_exception("No compatible memory type was found!");
}
memory = std::make_unique<memory_block_host>(m_device, host_pointer, size, allocation_type_info);
CHECK_RESULT(vkBindBufferMemory(dev, value, memory->get_vk_device_memory(), memory->get_vk_device_memory_offset()));
}
buffer::~buffer()
{
vkDestroyBuffer(m_device, value, nullptr);
}
void* buffer::map(u64 offset, u64 size)
{
return memory->map(offset, size);
}
void buffer::unmap()
{
memory->unmap();
}
u32 buffer::size() const
{
return static_cast<u32>(info.size);
}
}
| 4,229
|
C++
|
.cpp
| 109
| 35.825688
| 200
| 0.751648
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
5,468
|
data_heap.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/data_heap.cpp
|
#include "barriers.h"
#include "data_heap.h"
#include "device.h"
#include "../../RSXOffload.h"
#include "../VKHelpers.h"
#include "../VKResourceManager.h"
#include "Emu/IdManager.h"
#include <memory>
namespace vk
{
data_heap g_upload_heap;
void data_heap::create(VkBufferUsageFlags usage, usz size, const char* name, usz guard, VkBool32 notify)
{
::data_heap::init(size, name, guard);
const auto& memory_map = g_render_device->get_memory_mapping();
VkFlags memory_flags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
auto memory_index = memory_map.host_visible_coherent;
if (!(get_heap_compatible_buffer_types() & usage))
{
rsx_log.warning("Buffer usage %u is not heap-compatible using this driver, explicit staging buffer in use", usage);
shadow = std::make_unique<buffer>(*g_render_device, size, memory_index, memory_flags, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, 0, VMM_ALLOCATION_POOL_SYSTEM);
usage |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
memory_flags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
memory_index = memory_map.device_local;
}
heap = std::make_unique<buffer>(*g_render_device, size, memory_index, memory_flags, usage, 0, VMM_ALLOCATION_POOL_SYSTEM);
initial_size = size;
notify_on_grow = bool(notify);
}
void data_heap::destroy()
{
if (mapped)
{
unmap(true);
}
heap.reset();
shadow.reset();
}
bool data_heap::grow(usz size)
{
if (shadow)
{
// Shadowed. Growing this can be messy as it requires double allocation (macOS only)
rsx_log.error("[%s] Auto-grow of shadowed heaps is not currently supported. This error should typically only be seen on MacOS.", m_name);
return false;
}
// Create new heap. All sizes are aligned up by 64M, upto 1GiB
const usz size_limit = 1024 * 0x100000;
usz aligned_new_size = utils::align(m_size + size, 64 * 0x100000);
if (aligned_new_size >= size_limit)
{
// Too large, try to swap out the heap instead of growing.
rsx_log.error("[%s] Pool limit was reached. Will attempt to swap out the current heap.", m_name);
aligned_new_size = size_limit;
}
// Wait for DMA activity to end
g_fxo->get<rsx::dma_manager>().sync();
if (mapped)
{
// Force reset mapping
unmap(true);
}
VkBufferUsageFlags usage = heap->info.usage;
const auto& memory_map = g_render_device->get_memory_mapping();
VkFlags memory_flags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
auto memory_index = memory_map.host_visible_coherent;
// Update heap information and reset the allocator
::data_heap::init(aligned_new_size, m_name, m_min_guard_size);
// Discard old heap and create a new one. Old heap will be garbage collected when no longer needed
get_resource_manager()->dispose(heap);
heap = std::make_unique<buffer>(*g_render_device, aligned_new_size, memory_index, memory_flags, usage, 0, VMM_ALLOCATION_POOL_SYSTEM);
if (notify_on_grow)
{
raise_status_interrupt(vk::heap_changed);
}
return true;
}
void* data_heap::map(usz offset, usz size)
{
if (!_ptr)
{
if (shadow)
_ptr = shadow->map(0, shadow->size());
else
_ptr = heap->map(0, heap->size());
mapped = true;
}
if (shadow)
{
dirty_ranges.push_back({ offset, offset, size });
raise_status_interrupt(runtime_state::heap_dirty);
}
return static_cast<u8*>(_ptr) + offset;
}
void data_heap::unmap(bool force)
{
if (force)
{
if (shadow)
shadow->unmap();
else
heap->unmap();
mapped = false;
_ptr = nullptr;
}
}
void data_heap::sync(const vk::command_buffer& cmd)
{
if (!dirty_ranges.empty())
{
ensure(shadow);
ensure(heap);
vkCmdCopyBuffer(cmd, shadow->value, heap->value, ::size32(dirty_ranges), dirty_ranges.data());
dirty_ranges.clear();
insert_buffer_memory_barrier(cmd, heap->value, 0, heap->size(),
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT);
}
}
bool data_heap::is_dirty() const
{
return !dirty_ranges.empty();
}
bool data_heap::is_critical() const
{
if (!::data_heap::is_critical())
return false;
// By default, allow the size to grow upto 8x larger
// This value is arbitrary, theoretically it is possible to allow infinite stretching to improve performance
const usz soft_limit = initial_size * 8;
if ((m_size + m_min_guard_size) < soft_limit)
return false;
return true;
}
data_heap* get_upload_heap()
{
if (!g_upload_heap.heap)
{
g_upload_heap.create(VK_BUFFER_USAGE_TRANSFER_SRC_BIT, 64 * 0x100000, "auxilliary upload heap", 0x100000);
}
return &g_upload_heap;
}
}
| 4,868
|
C++
|
.cpp
| 143
| 29.566434
| 155
| 0.679639
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
5,469
|
scratch.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/scratch.cpp
|
#include "barriers.h"
#include "buffer_object.h"
#include "image.h"
#include "sampler.h"
#include "../VKResourceManager.h"
#include "Utilities/address_range.h"
#include <util/asm.hpp>
namespace vk
{
std::unordered_map<VkImageViewType, std::unique_ptr<viewable_image>> g_null_image_views;
std::unordered_map<u32, std::unique_ptr<image>> g_typeless_textures;
VkSampler g_null_sampler = nullptr;
// Scratch memory handling. Use double-buffered resource to significantly cut down on GPU stalls
struct scratch_buffer_pool_t
{
std::array<std::unique_ptr<buffer>, 2> scratch_buffers;
u32 current_index = 0;
std::unique_ptr<buffer>& get_buf()
{
auto& ret = scratch_buffers[current_index];
current_index ^= 1;
return ret;
}
};
std::unordered_map<u32, scratch_buffer_pool_t> g_scratch_buffers_pool;
VkSampler null_sampler()
{
if (g_null_sampler)
return g_null_sampler;
VkSamplerCreateInfo sampler_info = {};
sampler_info.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
sampler_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT;
sampler_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT;
sampler_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT;
sampler_info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST;
sampler_info.anisotropyEnable = VK_FALSE;
sampler_info.compareEnable = VK_FALSE;
sampler_info.unnormalizedCoordinates = VK_FALSE;
sampler_info.mipLodBias = 0;
sampler_info.maxAnisotropy = 0;
sampler_info.magFilter = VK_FILTER_NEAREST;
sampler_info.minFilter = VK_FILTER_NEAREST;
sampler_info.compareOp = VK_COMPARE_OP_NEVER;
sampler_info.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE;
vkCreateSampler(*g_render_device, &sampler_info, nullptr, &g_null_sampler);
return g_null_sampler;
}
vk::image_view* null_image_view(const vk::command_buffer& cmd, VkImageViewType type)
{
if (auto found = g_null_image_views.find(type);
found != g_null_image_views.end())
{
return found->second->get_view(rsx::default_remap_vector.with_encoding(VK_REMAP_IDENTITY));
}
VkImageType image_type;
u32 num_layers = 1;
u32 flags = 0;
u16 size = 4;
switch (type)
{
case VK_IMAGE_VIEW_TYPE_1D:
image_type = VK_IMAGE_TYPE_1D;
size = 1;
break;
case VK_IMAGE_VIEW_TYPE_2D:
image_type = VK_IMAGE_TYPE_2D;
break;
case VK_IMAGE_VIEW_TYPE_3D:
image_type = VK_IMAGE_TYPE_3D;
break;
case VK_IMAGE_VIEW_TYPE_CUBE:
image_type = VK_IMAGE_TYPE_2D;
flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
num_layers = 6;
break;
case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
image_type = VK_IMAGE_TYPE_2D;
num_layers = 2;
break;
default:
rsx_log.fatal("Unexpected image view type 0x%x", static_cast<u32>(type));
return nullptr;
}
auto& tex = g_null_image_views[type];
tex = std::make_unique<viewable_image>(*g_render_device, g_render_device->get_memory_mapping().device_local, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
image_type, VK_FORMAT_B8G8R8A8_UNORM, size, size, 1, 1, num_layers, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, flags | VK_IMAGE_CREATE_ALLOW_NULL_RPCS3, VMM_ALLOCATION_POOL_SCRATCH);
if (!tex->value)
{
// If we cannot create a 1x1 placeholder, things are truly hopeless.
// The null view is 'nullable' because it is meant for use in emergency situations and we do not wish to invalidate any handles.
fmt::throw_exception("Renderer is out of memory. We could not even squeeze in a 1x1 texture, things are bad.");
}
// Initialize memory to transparent black
tex->change_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
VkClearColorValue clear_color = {};
VkImageSubresourceRange range = { VK_IMAGE_ASPECT_COLOR_BIT, 0, tex->mipmaps(), 0, tex->layers() };
vkCmdClearColorImage(cmd, tex->value, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear_color, 1, &range);
// Prep for shader access
tex->change_layout(cmd, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
// Return view
return tex->get_view(rsx::default_remap_vector.with_encoding(VK_REMAP_IDENTITY));
}
vk::image* get_typeless_helper(VkFormat format, rsx::format_class format_class, u32 requested_width, u32 requested_height)
{
auto create_texture = [&]()
{
u32 new_width = utils::align(requested_width, 256u);
u32 new_height = utils::align(requested_height, 256u);
return new vk::image(*g_render_device, g_render_device->get_memory_mapping().device_local, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
VK_IMAGE_TYPE_2D, format, new_width, new_height, 1, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, 0, VMM_ALLOCATION_POOL_SCRATCH,
format_class);
};
const u32 key = (format_class << 24u) | format;
auto& ptr = g_typeless_textures[key];
if (!ptr || ptr->width() < requested_width || ptr->height() < requested_height)
{
if (ptr)
{
requested_width = std::max(requested_width, ptr->width());
requested_height = std::max(requested_height, ptr->height());
get_resource_manager()->dispose(ptr);
}
ptr.reset(create_texture());
ptr->set_debug_name(fmt::format("Scratch: Format=0x%x", static_cast<u32>(format)));
}
return ptr.get();
}
std::pair<vk::buffer*, bool> get_scratch_buffer(u32 queue_family, u64 min_required_size)
{
auto& scratch_buffer = g_scratch_buffers_pool[queue_family].get_buf();
bool is_new = false;
if (scratch_buffer && scratch_buffer->size() < min_required_size)
{
// Scratch heap cannot fit requirements. Discard it and allocate a new one.
vk::get_resource_manager()->dispose(scratch_buffer);
}
if (!scratch_buffer)
{
// Choose optimal size
const u64 alloc_size = utils::align(min_required_size, 0x100000);
scratch_buffer = std::make_unique<vk::buffer>(*g_render_device, alloc_size,
g_render_device->get_memory_mapping().device_local, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, 0, VMM_ALLOCATION_POOL_SCRATCH);
is_new = true;
}
return { scratch_buffer.get(), is_new };
}
vk::buffer* get_scratch_buffer(const vk::command_buffer& cmd, u64 min_required_size, bool zero_memory)
{
const auto [buf, init_mem] = get_scratch_buffer(cmd.get_queue_family(), min_required_size);
if (init_mem || zero_memory)
{
// Zero-initialize the allocated VRAM
const u64 zero_length = init_mem ? buf->size() : utils::align(min_required_size, 4);
vkCmdFillBuffer(cmd, buf->value, 0, zero_length, 0);
insert_buffer_memory_barrier(cmd, buf->value, 0, zero_length,
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT);
}
return buf;
}
void clear_scratch_resources()
{
g_null_image_views.clear();
g_scratch_buffers_pool.clear();
g_typeless_textures.clear();
if (g_null_sampler)
{
vkDestroySampler(*g_render_device, g_null_sampler, nullptr);
g_null_sampler = nullptr;
}
}
}
| 7,459
|
C++
|
.cpp
| 175
| 38.068571
| 162
| 0.705466
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,470
|
shared.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/shared.cpp
|
#include "shared.h"
#include "util/logs.hpp"
#ifndef _WIN32
#include <signal.h>
#endif
namespace vk
{
extern void print_debug_markers();
void die_with_error(VkResult error_code, std::string message, std::source_location src_loc)
{
std::string error_message;
int severity = 0; // 0 - die, 1 - warn, 2 - nothing
switch (error_code)
{
case VK_SUCCESS:
case VK_EVENT_SET:
case VK_EVENT_RESET:
case VK_INCOMPLETE:
return;
case VK_SUBOPTIMAL_KHR:
error_message = "Present surface is suboptimal (VK_SUBOPTIMAL_KHR)";
severity = 1;
break;
case VK_NOT_READY:
error_message = "Device or resource busy (VK_NOT_READY)";
break;
case VK_TIMEOUT:
error_message = "Timeout event (VK_TIMEOUT)";
break;
case VK_ERROR_OUT_OF_HOST_MEMORY:
error_message = "Out of host memory (system RAM) (VK_ERROR_OUT_OF_HOST_MEMORY)";
break;
case VK_ERROR_OUT_OF_DEVICE_MEMORY:
error_message = "Out of video memory (VRAM) (VK_ERROR_OUT_OF_DEVICE_MEMORY)";
break;
case VK_ERROR_INITIALIZATION_FAILED:
error_message = "Initialization failed (VK_ERROR_INITIALIZATION_FAILED)";
break;
case VK_ERROR_DEVICE_LOST:
error_message = "Device lost (Driver crashed with unspecified error or stopped responding and recovered) (VK_ERROR_DEVICE_LOST)";
break;
case VK_ERROR_MEMORY_MAP_FAILED:
error_message = "Memory map failed (VK_ERROR_MEMORY_MAP_FAILED)";
break;
case VK_ERROR_LAYER_NOT_PRESENT:
error_message = "Requested layer is not available (Try disabling debug output or install vulkan SDK) (VK_ERROR_LAYER_NOT_PRESENT)";
break;
case VK_ERROR_EXTENSION_NOT_PRESENT:
error_message = "Requested extension not available (VK_ERROR_EXTENSION_NOT_PRESENT)";
break;
case VK_ERROR_FEATURE_NOT_PRESENT:
error_message = "Requested feature not available (VK_ERROR_FEATURE_NOT_PRESENT)";
break;
case VK_ERROR_INCOMPATIBLE_DRIVER:
error_message = "Incompatible driver (VK_ERROR_INCOMPATIBLE_DRIVER)";
break;
case VK_ERROR_TOO_MANY_OBJECTS:
error_message = "Too many objects created (Out of handles) (VK_ERROR_TOO_MANY_OBJECTS)";
break;
case VK_ERROR_FORMAT_NOT_SUPPORTED:
error_message = "Format not supported (VK_ERROR_FORMAT_NOT_SUPPORTED)";
break;
case VK_ERROR_FRAGMENTED_POOL:
error_message = "Fragmented pool (VK_ERROR_FRAGMENTED_POOL)";
break;
case VK_ERROR_SURFACE_LOST_KHR:
error_message = "Surface lost (VK_ERROR_SURFACE_LOST)";
break;
case VK_ERROR_NATIVE_WINDOW_IN_USE_KHR:
error_message = "Native window in use (VK_ERROR_NATIVE_WINDOW_IN_USE_KHR)";
break;
case VK_ERROR_OUT_OF_DATE_KHR:
error_message = "Present surface is out of date (VK_ERROR_OUT_OF_DATE_KHR)";
severity = 1;
break;
case VK_ERROR_INCOMPATIBLE_DISPLAY_KHR:
error_message = "Incompatible display (VK_ERROR_INCOMPATIBLE_DISPLAY_KHR)";
break;
case VK_ERROR_VALIDATION_FAILED_EXT:
error_message = "Validation failed (VK_ERROR_INCOMPATIBLE_DISPLAY_KHR)";
break;
case VK_ERROR_INVALID_SHADER_NV:
error_message = "Invalid shader code (VK_ERROR_INVALID_SHADER_NV)";
break;
case VK_ERROR_OUT_OF_POOL_MEMORY_KHR:
error_message = "Out of pool memory (VK_ERROR_OUT_OF_POOL_MEMORY_KHR)";
break;
case VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR:
error_message = "Invalid external handle (VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR)";
break;
case VK_ERROR_FRAGMENTATION_EXT:
error_message = "Descriptor pool creation failed (VK_ERROR_FRAGMENTATION)";
break;
default:
error_message = fmt::format("Unknown Code (%Xh, %d)%s", static_cast<s32>(error_code), static_cast<s32>(error_code), src_loc);
break;
}
switch (severity)
{
default:
case 0:
print_debug_markers();
if (!message.empty()) message += "\n\n";
fmt::throw_exception("%sAssertion Failed! Vulkan API call failed with unrecoverable error: %s%s", message, error_message, src_loc);
case 1:
rsx_log.error("Vulkan API call has failed with an error but will continue: %s%s", error_message, src_loc);
break;
case 2:
break;
}
}
VKAPI_ATTR VkBool32 VKAPI_CALL dbgFunc(VkFlags msgFlags, VkDebugReportObjectTypeEXT /*objType*/,
u64 /*srcObject*/, usz /*location*/, s32 msgCode,
const char* pLayerPrefix, const char* pMsg, void* /*pUserData*/)
{
if (msgFlags & VK_DEBUG_REPORT_ERROR_BIT_EXT)
{
if (strstr(pMsg, "IMAGE_VIEW_TYPE_1D"))
return false;
rsx_log.error("ERROR: [%s] Code %d : %s", pLayerPrefix, msgCode, pMsg);
}
else if (msgFlags & VK_DEBUG_REPORT_WARNING_BIT_EXT)
{
rsx_log.warning("WARNING: [%s] Code %d : %s", pLayerPrefix, msgCode, pMsg);
}
else
{
return false;
}
// Let the app crash..
return false;
}
// Temporarily
#ifndef _MSC_VER
#pragma GCC diagnostic ignored "-Wunused-parameter"
#endif
VkBool32 BreakCallback(VkFlags msgFlags, VkDebugReportObjectTypeEXT objType,
u64 srcObject, usz location, s32 msgCode,
const char* pLayerPrefix, const char* pMsg, void* pUserData)
{
#ifdef _WIN32
DebugBreak();
#else
raise(SIGTRAP);
#endif
return false;
}
}
| 5,118
|
C++
|
.cpp
| 148
| 30.810811
| 134
| 0.710165
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,471
|
image_helpers.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/image_helpers.cpp
|
#include "stdafx.h"
#include "image_helpers.h"
#include "image.h"
#include "util/logs.hpp"
#include "../VKRenderPass.h"
#include "../../color_utils.h"
#include "../../gcm_enums.h"
namespace vk
{
VkComponentMapping default_component_map =
{
VK_COMPONENT_SWIZZLE_R,
VK_COMPONENT_SWIZZLE_G,
VK_COMPONENT_SWIZZLE_B,
VK_COMPONENT_SWIZZLE_A
};
VkImageAspectFlags get_aspect_flags(VkFormat format)
{
switch (format)
{
default:
return VK_IMAGE_ASPECT_COLOR_BIT;
case VK_FORMAT_D16_UNORM:
case VK_FORMAT_D32_SFLOAT:
return VK_IMAGE_ASPECT_DEPTH_BIT;
case VK_FORMAT_D24_UNORM_S8_UINT:
case VK_FORMAT_D32_SFLOAT_S8_UINT:
return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
}
}
VkComponentMapping apply_swizzle_remap(const std::array<VkComponentSwizzle, 4>& base_remap, const rsx::texture_channel_remap_t& remap_vector)
{
const auto final_mapping = remap_vector.remap(base_remap, VK_COMPONENT_SWIZZLE_ZERO, VK_COMPONENT_SWIZZLE_ONE);
return { final_mapping[1], final_mapping[2], final_mapping[3], final_mapping[0] };
}
void change_image_layout(const vk::command_buffer& cmd, VkImage image, VkImageLayout current_layout, VkImageLayout new_layout, const VkImageSubresourceRange& range,
u32 src_queue_family, u32 dst_queue_family, u32 src_access_mask_bits, u32 dst_access_mask_bits)
{
if (vk::is_renderpass_open(cmd))
{
vk::end_renderpass(cmd);
}
//Prepare an image to match the new layout..
VkImageMemoryBarrier barrier = {};
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.newLayout = new_layout;
barrier.oldLayout = current_layout;
barrier.image = image;
barrier.srcAccessMask = 0;
barrier.dstAccessMask = 0;
barrier.srcQueueFamilyIndex = src_queue_family;
barrier.dstQueueFamilyIndex = dst_queue_family;
barrier.subresourceRange = range;
VkPipelineStageFlags src_stage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
VkPipelineStageFlags dst_stage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
switch (+new_layout)
{
case VK_IMAGE_LAYOUT_GENERAL:
// Avoid this layout as it is unoptimized
barrier.dstAccessMask =
{
VK_ACCESS_TRANSFER_READ_BIT |
VK_ACCESS_TRANSFER_WRITE_BIT |
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
VK_ACCESS_SHADER_READ_BIT |
VK_ACCESS_INPUT_ATTACHMENT_READ_BIT
};
dst_stage =
{
VK_PIPELINE_STAGE_TRANSFER_BIT |
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
};
break;
case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
dst_stage = VK_PIPELINE_STAGE_TRANSFER_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR:
barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
dst_stage = VK_PIPELINE_STAGE_TRANSFER_BIT;
break;
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
dst_stage = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
barrier.dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
dst_stage = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT;
break;
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_ATTACHMENT_FEEDBACK_LOOP_OPTIMAL_EXT:
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
dst_stage = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
break;
case VK_IMAGE_LAYOUT_UNDEFINED:
case VK_IMAGE_LAYOUT_PREINITIALIZED:
default:
fmt::throw_exception("Attempted to transition to an invalid layout");
}
switch (+current_layout)
{
case VK_IMAGE_LAYOUT_GENERAL:
// Avoid this layout as it is unoptimized
if (new_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL ||
new_layout == VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL)
{
if (range.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT)
{
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
src_stage = VK_PIPELINE_STAGE_TRANSFER_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
}
else
{
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
src_stage = VK_PIPELINE_STAGE_TRANSFER_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
}
}
else if (new_layout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL ||
new_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
{
// Finish reading before writing
barrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT;
src_stage = VK_PIPELINE_STAGE_TRANSFER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
}
else
{
barrier.srcAccessMask =
{
VK_ACCESS_TRANSFER_READ_BIT |
VK_ACCESS_TRANSFER_WRITE_BIT |
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
VK_ACCESS_SHADER_READ_BIT |
VK_ACCESS_INPUT_ATTACHMENT_READ_BIT
};
src_stage =
{
VK_PIPELINE_STAGE_TRANSFER_BIT |
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |
VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
};
}
break;
case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
src_stage = VK_PIPELINE_STAGE_TRANSFER_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR:
barrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
src_stage = VK_PIPELINE_STAGE_TRANSFER_BIT;
break;
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
src_stage = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
barrier.srcAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
src_stage = VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
break;
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_ATTACHMENT_FEEDBACK_LOOP_OPTIMAL_EXT:
barrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
src_stage = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
break;
default:
break; //TODO Investigate what happens here
}
barrier.srcAccessMask &= src_access_mask_bits;
barrier.dstAccessMask &= dst_access_mask_bits;
if (!barrier.srcAccessMask) src_stage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
if (!barrier.dstAccessMask) dst_stage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
vkCmdPipelineBarrier(cmd, src_stage, dst_stage, 0, 0, nullptr, 0, nullptr, 1, &barrier);
}
void change_image_layout(const vk::command_buffer& cmd, vk::image* image, VkImageLayout new_layout, const VkImageSubresourceRange& range)
{
if (image->current_layout == new_layout) return;
change_image_layout(cmd, image->value, image->current_layout, new_layout, range);
image->current_layout = new_layout;
}
void change_image_layout(const vk::command_buffer& cmd, vk::image* image, VkImageLayout new_layout)
{
if (image->current_layout == new_layout) return;
change_image_layout(cmd, image->value, image->current_layout, new_layout, { image->aspect(), 0, image->mipmaps(), 0, image->layers() });
image->current_layout = new_layout;
}
}
| 7,631
|
C++
|
.cpp
| 192
| 35
| 166
| 0.723444
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,472
|
descriptors.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/descriptors.cpp
|
#include "Emu/IdManager.h"
#include "descriptors.h"
#include "garbage_collector.h"
namespace vk
{
// Error handler callback
extern void on_descriptor_pool_fragmentation(bool fatal);
namespace descriptors
{
class dispatch_manager
{
public:
inline void flush_all()
{
for (auto& set : m_notification_list)
{
set->flush();
}
}
void register_(descriptor_set* set)
{
// Rare event, upon creation of a new set tracker.
// Check for spurious 'new' events when the aux context is taking over
for (const auto& set_ : m_notification_list)
{
if (set_ == set) return;
}
m_notification_list.push_back(set);
rsx_log.warning("[descriptor_manager::register] Now monitoring %u descriptor sets", m_notification_list.size());
}
void deregister(descriptor_set* set)
{
for (auto it = m_notification_list.begin(); it != m_notification_list.end(); ++it)
{
if (*it == set)
{
*it = m_notification_list.back();
m_notification_list.pop_back();
break;
}
}
rsx_log.warning("[descriptor_manager::deregister] Now monitoring %u descriptor sets", m_notification_list.size());
}
dispatch_manager() = default;
private:
rsx::simple_array<descriptor_set*> m_notification_list;
dispatch_manager(const dispatch_manager&) = delete;
dispatch_manager& operator = (const dispatch_manager&) = delete;
};
void init()
{
g_fxo->init<dispatch_manager>();
}
void flush()
{
g_fxo->get<dispatch_manager>().flush_all();
}
VkDescriptorSetLayout create_layout(const rsx::simple_array<VkDescriptorSetLayoutBinding>& bindings)
{
VkDescriptorSetLayoutCreateInfo infos = {};
infos.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
infos.pBindings = bindings.data();
infos.bindingCount = ::size32(bindings);
VkDescriptorSetLayoutBindingFlagsCreateInfo binding_infos = {};
rsx::simple_array<VkDescriptorBindingFlags> binding_flags;
if (g_render_device->get_descriptor_indexing_support())
{
const auto deferred_mask = g_render_device->get_descriptor_update_after_bind_support();
binding_flags.resize(::size32(bindings));
for (u32 i = 0; i < binding_flags.size(); ++i)
{
if ((1ull << bindings[i].descriptorType) & ~deferred_mask)
{
binding_flags[i] = 0u;
}
else
{
binding_flags[i] = VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT;
}
}
binding_infos.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT;
binding_infos.pNext = nullptr;
binding_infos.bindingCount = ::size32(binding_flags);
binding_infos.pBindingFlags = binding_flags.data();
infos.pNext = &binding_infos;
infos.flags |= VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT;
}
VkDescriptorSetLayout result;
CHECK_RESULT(vkCreateDescriptorSetLayout(*g_render_device, &infos, nullptr, &result));
return result;
}
}
void descriptor_pool::create(const vk::render_device& dev, const rsx::simple_array<VkDescriptorPoolSize>& pool_sizes, u32 max_sets)
{
ensure(max_sets > 16);
m_create_info_pool_sizes = pool_sizes;
for (auto& size : m_create_info_pool_sizes)
{
ensure(size.descriptorCount < 128); // Sanity check. Remove before commit.
size.descriptorCount *= max_sets;
}
m_create_info.flags = dev.get_descriptor_update_after_bind_support() ? VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT : 0;
m_create_info.maxSets = max_sets;
m_create_info.poolSizeCount = m_create_info_pool_sizes.size();
m_create_info.pPoolSizes = m_create_info_pool_sizes.data();
m_create_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
m_owner = &dev;
next_subpool();
}
void descriptor_pool::destroy()
{
if (m_device_subpools.empty()) return;
for (auto& pool : m_device_subpools)
{
vkDestroyDescriptorPool((*m_owner), pool.handle, nullptr);
pool.handle = VK_NULL_HANDLE;
}
m_owner = nullptr;
}
void descriptor_pool::reset(u32 subpool_id, VkDescriptorPoolResetFlags flags)
{
std::lock_guard lock(m_subpool_lock);
CHECK_RESULT(vkResetDescriptorPool(*m_owner, m_device_subpools[subpool_id].handle, flags));
m_device_subpools[subpool_id].busy = VK_FALSE;
}
VkDescriptorSet descriptor_pool::allocate(VkDescriptorSetLayout layout, VkBool32 use_cache)
{
if (use_cache)
{
if (m_descriptor_set_cache.empty())
{
// For optimal cache utilization, each pool should only allocate one layout
m_cached_layout = layout;
}
else if (m_cached_layout != layout)
{
use_cache = VK_FALSE;
}
else
{
return m_descriptor_set_cache.pop_back();
}
}
if (!can_allocate(use_cache ? 4 : 1, m_current_subpool_offset))
{
next_subpool();
}
VkDescriptorSet new_descriptor_set;
VkDescriptorSetAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
alloc_info.descriptorPool = m_current_pool_handle;
alloc_info.descriptorSetCount = 1;
alloc_info.pSetLayouts = &layout;
if (use_cache)
{
const auto alloc_size = std::min<u32>(m_create_info.maxSets - m_current_subpool_offset, max_cache_size);
m_allocation_request_cache.resize(alloc_size);
for (auto& layout_ : m_allocation_request_cache)
{
layout_ = m_cached_layout;
}
ensure(m_descriptor_set_cache.empty());
alloc_info.descriptorSetCount = alloc_size;
alloc_info.pSetLayouts = m_allocation_request_cache.data();
m_descriptor_set_cache.resize(alloc_size);
CHECK_RESULT(vkAllocateDescriptorSets(*m_owner, &alloc_info, m_descriptor_set_cache.data()));
m_current_subpool_offset += alloc_size;
new_descriptor_set = m_descriptor_set_cache.pop_back();
}
else
{
m_current_subpool_offset++;
CHECK_RESULT(vkAllocateDescriptorSets(*m_owner, &alloc_info, &new_descriptor_set));
}
return new_descriptor_set;
}
void descriptor_pool::next_subpool()
{
if (m_current_subpool_index != umax)
{
// Enqueue release using gc
auto release_func = [subpool_index=m_current_subpool_index, this]()
{
this->reset(subpool_index, 0);
};
auto cleanup_obj = std::make_unique<gc_callback_t>(release_func);
vk::get_gc()->dispose(cleanup_obj);
}
m_current_subpool_offset = 0;
m_current_subpool_index = umax;
const int max_retries = 2;
int retries = max_retries;
do
{
for (u32 index = 0; index < m_device_subpools.size(); ++index)
{
if (!m_device_subpools[index].busy)
{
m_current_subpool_index = index;
goto done; // Nested break
}
}
VkDescriptorPool subpool = VK_NULL_HANDLE;
if (VkResult result = vkCreateDescriptorPool(*m_owner, &m_create_info, nullptr, &subpool))
{
if (retries-- && (result == VK_ERROR_FRAGMENTATION_EXT))
{
rsx_log.warning("Descriptor pool creation failed with fragmentation error. Will attempt to recover.");
vk::on_descriptor_pool_fragmentation(!retries);
continue;
}
vk::die_with_error(result);
fmt::throw_exception("Unreachable");
}
// New subpool created successfully
std::lock_guard lock(m_subpool_lock);
m_device_subpools.push_back(
{
.handle = subpool,
.busy = VK_FALSE
});
m_current_subpool_index = m_device_subpools.size() - 1;
} while (m_current_subpool_index == umax);
done:
m_device_subpools[m_current_subpool_index].busy = VK_TRUE;
m_current_pool_handle = m_device_subpools[m_current_subpool_index].handle;
}
descriptor_set::descriptor_set(VkDescriptorSet set)
{
flush();
m_handle = set;
}
descriptor_set::~descriptor_set()
{
if (m_update_after_bind_mask)
{
g_fxo->get<descriptors::dispatch_manager>().deregister(this);
}
}
void descriptor_set::init(VkDescriptorSet new_set)
{
if (!m_in_use) [[unlikely]]
{
m_image_info_pool.reserve(m_pool_size);
m_buffer_view_pool.reserve(m_pool_size);
m_buffer_info_pool.reserve(m_pool_size);
m_in_use = true;
m_update_after_bind_mask = g_render_device->get_descriptor_update_after_bind_support();
if (m_update_after_bind_mask)
{
g_fxo->get<descriptors::dispatch_manager>().register_(this);
}
}
else if (m_push_type_mask & ~m_update_after_bind_mask)
{
flush();
}
m_handle = new_set;
}
void descriptor_set::swap(descriptor_set& other)
{
const auto other_handle = other.m_handle;
other.flush();
other.m_handle = m_handle;
init(other_handle);
}
descriptor_set& descriptor_set::operator = (VkDescriptorSet set)
{
init(set);
return *this;
}
VkDescriptorSet* descriptor_set::ptr()
{
if (!m_in_use) [[likely]]
{
init(m_handle);
}
return &m_handle;
}
VkDescriptorSet descriptor_set::value() const
{
return m_handle;
}
void descriptor_set::push(const VkBufferView& buffer_view, VkDescriptorType type, u32 binding)
{
m_push_type_mask |= (1ull << type);
m_buffer_view_pool.push_back(buffer_view);
m_pending_writes.emplace_back(
VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, // sType
nullptr, // pNext
m_handle, // dstSet
binding, // dstBinding
0, // dstArrayElement
1, // descriptorCount
type, // descriptorType
nullptr, // pImageInfo
nullptr, // pBufferInfo
&m_buffer_view_pool.back() // pTexelBufferView
);
}
void descriptor_set::push(const VkDescriptorBufferInfo& buffer_info, VkDescriptorType type, u32 binding)
{
m_push_type_mask |= (1ull << type);
m_buffer_info_pool.push_back(buffer_info);
m_pending_writes.emplace_back(
VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, // sType
nullptr, // pNext
m_handle, // dstSet
binding, // dstBinding
0, // dstArrayElement
1, // descriptorCount
type, // descriptorType
nullptr, // pImageInfo
&m_buffer_info_pool.back(), // pBufferInfo
nullptr // pTexelBufferView
);
}
void descriptor_set::push(const VkDescriptorImageInfo& image_info, VkDescriptorType type, u32 binding)
{
m_push_type_mask |= (1ull << type);
m_image_info_pool.push_back(image_info);
m_pending_writes.emplace_back(
VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, // sType
nullptr, // pNext
m_handle, // dstSet
binding, // dstBinding
0, // dstArrayElement
1, // descriptorCount
type, // descriptorType
&m_image_info_pool.back(), // pImageInfo
nullptr, // pBufferInfo
nullptr // pTexelBufferView
);
}
void descriptor_set::push(const VkDescriptorImageInfo* image_info, u32 count, VkDescriptorType type, u32 binding)
{
VkWriteDescriptorSet writer =
{
VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, // sType
nullptr, // pNext
m_handle, // dstSet
binding, // dstBinding
0, // dstArrayElement
count, // descriptorCount
type, // descriptorType
image_info, // pImageInfo
nullptr, // pBufferInfo
nullptr // pTexelBufferView
};
vkUpdateDescriptorSets(*g_render_device, 1, &writer, 0, nullptr);
}
void descriptor_set::push(rsx::simple_array<VkCopyDescriptorSet>& copy_cmd, u32 type_mask)
{
m_push_type_mask |= type_mask;
if (m_pending_copies.empty()) [[likely]]
{
m_pending_copies = std::move(copy_cmd);
}
else
{
const auto old_size = m_pending_copies.size();
const auto new_size = copy_cmd.size() + old_size;
m_pending_copies.resize(new_size);
std::copy(copy_cmd.begin(), copy_cmd.end(), m_pending_copies.begin() + old_size);
}
}
void descriptor_set::bind(const vk::command_buffer& cmd, VkPipelineBindPoint bind_point, VkPipelineLayout layout)
{
if ((m_push_type_mask & ~m_update_after_bind_mask) || (m_pending_writes.size() >= max_cache_size))
{
flush();
}
vkCmdBindDescriptorSets(cmd, bind_point, layout, 0, 1, &m_handle, 0, nullptr);
}
void descriptor_set::flush()
{
if (!m_push_type_mask)
{
return;
}
const auto num_writes = ::size32(m_pending_writes);
const auto num_copies = ::size32(m_pending_copies);
vkUpdateDescriptorSets(*g_render_device, num_writes, m_pending_writes.data(), num_copies, m_pending_copies.data());
m_push_type_mask = 0;
m_pending_writes.clear();
m_pending_copies.clear();
m_image_info_pool.clear();
m_buffer_info_pool.clear();
m_buffer_view_pool.clear();
}
}
| 13,359
|
C++
|
.cpp
| 391
| 30.452685
| 132
| 0.631803
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,473
|
barriers.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/VK/vkutils/barriers.cpp
|
#include "barriers.h"
#include "commands.h"
#include "image.h"
#include "../../rsx_methods.h"
#include "../VKRenderPass.h"
namespace vk
{
void insert_image_memory_barrier(
const vk::command_buffer& cmd, VkImage image,
VkImageLayout current_layout, VkImageLayout new_layout,
VkPipelineStageFlags src_stage, VkPipelineStageFlags dst_stage,
VkAccessFlags src_mask, VkAccessFlags dst_mask,
const VkImageSubresourceRange& range,
bool preserve_renderpass)
{
if (!preserve_renderpass && vk::is_renderpass_open(cmd))
{
vk::end_renderpass(cmd);
}
VkImageMemoryBarrier barrier = {};
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.newLayout = new_layout;
barrier.oldLayout = current_layout;
barrier.image = image;
barrier.srcAccessMask = src_mask;
barrier.dstAccessMask = dst_mask;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.subresourceRange = range;
vkCmdPipelineBarrier(cmd, src_stage, dst_stage, 0, 0, nullptr, 0, nullptr, 1, &barrier);
}
void insert_buffer_memory_barrier(
const vk::command_buffer& cmd,
VkBuffer buffer,
VkDeviceSize offset, VkDeviceSize length,
VkPipelineStageFlags src_stage, VkPipelineStageFlags dst_stage,
VkAccessFlags src_mask, VkAccessFlags dst_mask,
bool preserve_renderpass)
{
if (!preserve_renderpass && vk::is_renderpass_open(cmd))
{
vk::end_renderpass(cmd);
}
VkBufferMemoryBarrier barrier = {};
barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
barrier.buffer = buffer;
barrier.offset = offset;
barrier.size = length;
barrier.srcAccessMask = src_mask;
barrier.dstAccessMask = dst_mask;
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
vkCmdPipelineBarrier(cmd, src_stage, dst_stage, 0, 0, nullptr, 1, &barrier, 0, nullptr);
}
void insert_global_memory_barrier(
const vk::command_buffer& cmd,
VkPipelineStageFlags src_stage, VkPipelineStageFlags dst_stage,
VkAccessFlags src_access, VkAccessFlags dst_access,
bool preserve_renderpass)
{
if (!preserve_renderpass && vk::is_renderpass_open(cmd))
{
vk::end_renderpass(cmd);
}
VkMemoryBarrier barrier = {};
barrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER;
barrier.srcAccessMask = src_access;
barrier.dstAccessMask = dst_access;
vkCmdPipelineBarrier(cmd, src_stage, dst_stage, 0, 1, &barrier, 0, nullptr, 0, nullptr);
}
void insert_texture_barrier(
const vk::command_buffer& cmd,
VkImage image,
VkImageLayout current_layout, VkImageLayout new_layout,
VkImageSubresourceRange range,
bool preserve_renderpass)
{
// NOTE: Sampling from an attachment in ATTACHMENT_OPTIMAL layout on some hw ends up with garbage output
// Transition to GENERAL if this resource is both input and output
// TODO: This implicitly makes the target incompatible with the renderpass declaration; investigate a proper workaround
// TODO: This likely throws out hw optimizations on the rest of the renderpass, manage carefully
if (!preserve_renderpass && vk::is_renderpass_open(cmd))
{
vk::end_renderpass(cmd);
}
VkAccessFlags src_access;
VkPipelineStageFlags src_stage;
if (range.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT)
{
src_access = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
src_stage = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
}
else
{
src_access = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
src_stage = VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
}
VkImageMemoryBarrier barrier = {};
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.newLayout = new_layout;
barrier.oldLayout = current_layout;
barrier.image = image;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.subresourceRange = range;
barrier.srcAccessMask = src_access;
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
vkCmdPipelineBarrier(cmd, src_stage, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier);
}
void insert_texture_barrier(const vk::command_buffer& cmd, vk::image* image, VkImageLayout new_layout, bool preserve_renderpass)
{
insert_texture_barrier(cmd, image->value, image->current_layout, new_layout, { image->aspect(), 0, 1, 0, 1 }, preserve_renderpass);
image->current_layout = new_layout;
}
}
| 4,582
|
C++
|
.cpp
| 115
| 35.8
| 157
| 0.744312
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,474
|
ProgramStateCache.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Program/ProgramStateCache.cpp
|
#include "stdafx.h"
#include "ProgramStateCache.h"
#include "Emu/system_config.h"
#include <stack>
#include "util/v128.hpp"
#include "util/asm.hpp"
#if defined(ARCH_X64)
#include "emmintrin.h"
#include "immintrin.h"
#endif
#ifdef ARCH_ARM64
#ifndef _MSC_VER
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wold-style-cast"
#endif
#include "Emu/CPU/sse2neon.h"
#ifndef _MSC_VER
#pragma GCC diagnostic pop
#endif
#endif
using namespace program_hash_util;
usz vertex_program_utils::get_vertex_program_ucode_hash(const RSXVertexProgram &program)
{
// 64-bit Fowler/Noll/Vo FNV-1a hash code
usz hash = 0xCBF29CE484222325ULL;
const void* instbuffer = program.data.data();
usz instIndex = 0;
for (unsigned i = 0; i < program.data.size() / 4; i++)
{
if (program.instruction_mask[i])
{
const auto inst = v128::loadu(instbuffer, instIndex);
hash ^= inst._u64[0];
hash += (hash << 1) + (hash << 4) + (hash << 5) + (hash << 7) + (hash << 8) + (hash << 40);
hash ^= inst._u64[1];
hash += (hash << 1) + (hash << 4) + (hash << 5) + (hash << 7) + (hash << 8) + (hash << 40);
}
instIndex++;
}
return hash;
}
vertex_program_utils::vertex_program_metadata vertex_program_utils::analyse_vertex_program(const u32* data, u32 entry, RSXVertexProgram& dst_prog)
{
vertex_program_utils::vertex_program_metadata result{};
//u32 last_instruction_address = 0;
//u32 first_instruction_address = entry;
std::bitset<rsx::max_vertex_program_instructions> instructions_to_patch;
std::pair<u32, u32> instruction_range{ umax, 0 };
bool has_branch_instruction = false;
std::stack<u32> call_stack;
D3 d3;
D2 d2;
D1 d1;
D0 d0;
std::function<void(u32, bool)> walk_function = [&](u32 start, bool fast_exit)
{
u32 current_instruction = start;
std::set<u32> conditional_targets;
while (true)
{
ensure(current_instruction < rsx::max_vertex_program_instructions);
if (result.instruction_mask[current_instruction])
{
if (!fast_exit)
{
// This can be harmless if a dangling RET was encountered before.
// This can also be legal in case of BRB...BRI loops since BRIs are conditional. Might just be a loop with exit cond.
rsx_log.warning("vp_analyser: Possible infinite loop detected");
}
// There is never any reason to continue scanning after self-intersecting on the control-flow tree.
break;
}
const auto instruction = v128::loadu(&data[current_instruction * 4]);
d1.HEX = instruction._u32[1];
d2.HEX = instruction._u32[2];
d3.HEX = instruction._u32[3];
// Touch current instruction
result.instruction_mask[current_instruction] = true;
instruction_range.first = std::min(current_instruction, instruction_range.first);
instruction_range.second = std::max(current_instruction, instruction_range.second);
// Whether to check if the current instruction references an input stream
auto input_attribute_ref = [&]()
{
if (!d1.input_src)
{
// It is possible to reference ATTR0, but this is mandatory anyway. No need to explicitly test for it
return;
}
const auto ref_mask = (1u << d1.input_src);
if ((result.referenced_inputs_mask & ref_mask) == 0)
{
// Type is encoded in the first 2 bits of each block
const auto src0 = d2.src0l & 0x3;
const auto src1 = d2.src1 & 0x3;
const auto src2 = d3.src2l & 0x3;
if ((src0 == RSX_VP_REGISTER_TYPE_INPUT) ||
(src1 == RSX_VP_REGISTER_TYPE_INPUT) ||
(src2 == RSX_VP_REGISTER_TYPE_INPUT))
{
result.referenced_inputs_mask |= ref_mask;
}
}
};
auto branch_to = [&](const u32 target)
{
input_attribute_ref();
current_instruction = target;
};
// Basic vec op analysis, must be done before flow analysis
switch (d1.vec_opcode)
{
case RSX_VEC_OPCODE_NOP:
{
break;
}
case RSX_VEC_OPCODE_TXL:
{
result.referenced_textures_mask |= (1 << d2.tex_num);
break;
}
default:
{
input_attribute_ref();
break;
}
}
bool static_jump = false;
bool function_call = true;
switch (d1.sca_opcode)
{
case RSX_SCA_OPCODE_NOP:
{
break;
}
case RSX_SCA_OPCODE_BRI:
{
d0.HEX = instruction._u32[0];
static_jump = (d0.cond == 0x7);
[[fallthrough]];
}
case RSX_SCA_OPCODE_BRB:
{
function_call = false;
[[fallthrough]];
}
case RSX_SCA_OPCODE_CAL:
case RSX_SCA_OPCODE_CLI:
case RSX_SCA_OPCODE_CLB:
{
// Need to patch the jump address to be consistent wherever the program is located
instructions_to_patch[current_instruction] = true;
has_branch_instruction = true;
d0.HEX = instruction._u32[0];
const u32 jump_address = (d0.iaddrh2 << 9) | (d2.iaddrh << 3) | d3.iaddrl;
if (function_call)
{
call_stack.push(current_instruction + 1);
branch_to(jump_address);
continue;
}
else if (static_jump)
{
// NOTE: This will skip potential jump target blocks between current->target
branch_to(jump_address);
continue;
}
else
{
// Set possible end address and proceed as usual
conditional_targets.emplace(jump_address);
instruction_range.second = std::max(jump_address, instruction_range.second);
}
break;
}
case RSX_SCA_OPCODE_RET:
{
if (call_stack.empty())
{
rsx_log.error("vp_analyser: RET found outside subroutine call");
}
else
{
branch_to(call_stack.top());
call_stack.pop();
continue;
}
break;
}
default:
{
input_attribute_ref();
break;
}
}
// Check exit conditions...
if (d3.end)
{
// We have seen an end of instructions marker.
// Multiple exits may exist, usually skipped over by branching. Do not exit on end unless there is no branching.
if (!has_branch_instruction || fast_exit || current_instruction >= instruction_range.second)
{
// Conditions:
// 1. No branching so far. This will always be the exit.
// 2. Fast exit flag is set. This happens when walking through subroutines.
// 3. We've gone beyond the known instruction range. In this scenario, this is the furthest end marker seen so far. It has to be reached by some earlier branch.
break;
}
}
else if ((current_instruction + 1) == rsx::max_vertex_program_instructions)
{
// No more instructions to read.
break;
}
current_instruction++;
}
for (const u32 target : conditional_targets)
{
if (!result.instruction_mask[target])
{
walk_function(target, true);
}
}
};
if (g_cfg.video.debug_program_analyser)
{
fs::file dump(fs::get_cache_dir() + "shaderlog/vp_analyser.bin", fs::rewrite);
dump.write(&entry, 4);
dump.write(data, rsx::max_vertex_program_instructions * 16);
dump.close();
}
walk_function(entry, false);
const u32 instruction_count = (instruction_range.second - instruction_range.first + 1);
result.ucode_length = instruction_count * 16;
dst_prog.base_address = instruction_range.first;
dst_prog.entry = entry;
dst_prog.data.resize(instruction_count * 4);
dst_prog.instruction_mask = (result.instruction_mask >> instruction_range.first);
if (!has_branch_instruction)
{
ensure(instruction_range.first == entry);
std::memcpy(dst_prog.data.data(), data + (instruction_range.first * 4), result.ucode_length);
}
else
{
for (u32 i = instruction_range.first, count = 0; i <= instruction_range.second; ++i, ++count)
{
const u32* instruction = &data[i * 4];
u32* dst = &dst_prog.data[count * 4];
if (result.instruction_mask[i])
{
v128::storeu(v128::loadu(instruction), dst);
if (instructions_to_patch[i])
{
d0.HEX = dst[0];
d2.HEX = dst[2];
d3.HEX = dst[3];
u32 address = (d0.iaddrh2 << 9) | (d2.iaddrh << 3) | d3.iaddrl;
address -= instruction_range.first;
d0.iaddrh2 = (address >> 9) & 0x1;
d2.iaddrh = (address >> 3) & 0x3F;
d3.iaddrl = (address & 0x7);
dst[0] = d0.HEX;
dst[2] = d2.HEX;
dst[3] = d3.HEX;
dst_prog.jump_table.emplace(address);
}
}
else
{
v128::storeu({}, dst);
}
}
// Typical ubershaders have the dispatch at the top with subroutines following. However...
// some games have the dispatch block at the end and the subroutines above them.
// We need to simulate a jump-to-entry in this situation
// Normally this condition is handled by the conditional_targets walk, but sometimes this doesn't work due to cyclic branches
if (instruction_range.first < dst_prog.entry)
{
// Is there a subroutine that jumps into the entry? If not, add to jump table
const auto target = dst_prog.entry - instruction_range.first;
dst_prog.jump_table.insert(target);
}
// Verification
for (const u32 target : dst_prog.jump_table)
{
if (!dst_prog.instruction_mask[target])
{
rsx_log.error("vp_analyser: Failed, branch target 0x%x was not resolved", target);
}
}
}
result.referenced_inputs_mask |= 1u; // VPOS is always enabled, else no rendering can happen
return result;
}
usz vertex_program_storage_hash::operator()(const RSXVertexProgram &program) const
{
usz hash = vertex_program_utils::get_vertex_program_ucode_hash(program);
hash ^= program.output_mask;
hash ^= program.texture_state.texture_dimensions;
hash ^= program.texture_state.multisampled_textures;
return hash;
}
bool vertex_program_compare::operator()(const RSXVertexProgram &binary1, const RSXVertexProgram &binary2) const
{
if (binary1.output_mask != binary2.output_mask)
return false;
if (binary1.texture_state != binary2.texture_state)
return false;
if (binary1.data.size() != binary2.data.size())
return false;
if (binary1.jump_table != binary2.jump_table)
return false;
const void* instBuffer1 = binary1.data.data();
const void* instBuffer2 = binary2.data.data();
usz instIndex = 0;
for (unsigned i = 0; i < binary1.data.size() / 4; i++)
{
const auto active = binary1.instruction_mask[instIndex];
if (active != binary2.instruction_mask[instIndex])
{
return false;
}
if (active)
{
const auto inst1 = v128::loadu(instBuffer1, instIndex);
const auto inst2 = v128::loadu(instBuffer2, instIndex);
if (inst1._u ^ inst2._u)
{
return false;
}
}
instIndex++;
}
return true;
}
bool fragment_program_utils::is_constant(u32 sourceOperand)
{
return ((sourceOperand >> 8) & 0x3) == 2;
}
usz fragment_program_utils::get_fragment_program_ucode_size(const void* ptr)
{
const auto instBuffer = ptr;
usz instIndex = 0;
while (true)
{
const v128 inst = v128::loadu(instBuffer, instIndex);
bool isSRC0Constant = is_constant(inst._u32[1]);
bool isSRC1Constant = is_constant(inst._u32[2]);
bool isSRC2Constant = is_constant(inst._u32[3]);
bool end = (inst._u32[0] >> 8) & 0x1;
if (isSRC0Constant || isSRC1Constant || isSRC2Constant)
{
instIndex += 2;
if (end)
return instIndex * 4 * 4;
continue;
}
instIndex++;
if (end)
return (instIndex)* 4 * 4;
}
}
fragment_program_utils::fragment_program_metadata fragment_program_utils::analyse_fragment_program(const void* ptr)
{
fragment_program_utils::fragment_program_metadata result{};
result.program_start_offset = -1;
const auto instBuffer = ptr;
s32 index = 0;
while (true)
{
const auto inst = v128::loadu(instBuffer, index);
// Check for opcode high bit which indicates a branch instructions (opcode 0x40...0x45)
if (inst._u32[2] & (1 << 23))
{
// NOTE: Jump instructions are not yet proved to work outside of loops and if/else blocks
// Otherwise we would need to follow the execution chain
result.has_branch_instructions = true;
}
else
{
const u32 opcode = (inst._u32[0] >> 16) & 0x3F;
if (opcode)
{
if (result.program_start_offset == umax)
{
result.program_start_offset = index * 16;
}
switch (opcode)
{
case RSX_FP_OPCODE_TEX:
case RSX_FP_OPCODE_TEXBEM:
case RSX_FP_OPCODE_TXP:
case RSX_FP_OPCODE_TXPBEM:
case RSX_FP_OPCODE_TXD:
case RSX_FP_OPCODE_TXB:
case RSX_FP_OPCODE_TXL:
{
//Bits 17-20 of word 1, swapped within u16 sections
//Bits 16-23 are swapped into the upper 8 bits (24-31)
const u32 tex_num = (inst._u32[0] >> 25) & 15;
result.referenced_textures_mask |= (1 << tex_num);
break;
}
case RSX_FP_OPCODE_PK4:
case RSX_FP_OPCODE_UP4:
case RSX_FP_OPCODE_PK2:
case RSX_FP_OPCODE_UP2:
case RSX_FP_OPCODE_PKB:
case RSX_FP_OPCODE_UPB:
case RSX_FP_OPCODE_PK16:
case RSX_FP_OPCODE_UP16:
case RSX_FP_OPCODE_PKG:
case RSX_FP_OPCODE_UPG:
{
result.has_pack_instructions = true;
break;
}
}
}
if (is_constant(inst._u32[1]) || is_constant(inst._u32[2]) || is_constant(inst._u32[3]))
{
//Instruction references constant, skip one slot occupied by data
index++;
result.program_ucode_length += 16;
result.program_constants_buffer_length += 16;
}
}
if (result.program_start_offset != umax)
{
result.program_ucode_length += 16;
}
if ((inst._u32[0] >> 8) & 0x1)
{
if (result.program_start_offset == umax)
{
result.program_start_offset = index * 16;
result.program_ucode_length = 16;
result.is_nop_shader = true;
}
break;
}
index++;
}
return result;
}
usz fragment_program_utils::get_fragment_program_ucode_hash(const RSXFragmentProgram& program)
{
// 64-bit Fowler/Noll/Vo FNV-1a hash code
usz hash = 0xCBF29CE484222325ULL;
const void* instbuffer = program.get_data();
usz instIndex = 0;
while (true)
{
const auto inst = v128::loadu(instbuffer, instIndex);
hash ^= inst._u64[0];
hash += (hash << 1) + (hash << 4) + (hash << 5) + (hash << 7) + (hash << 8) + (hash << 40);
hash ^= inst._u64[1];
hash += (hash << 1) + (hash << 4) + (hash << 5) + (hash << 7) + (hash << 8) + (hash << 40);
instIndex++;
// Skip constants
if (fragment_program_utils::is_constant(inst._u32[1]) ||
fragment_program_utils::is_constant(inst._u32[2]) ||
fragment_program_utils::is_constant(inst._u32[3]))
instIndex++;
bool end = (inst._u32[0] >> 8) & 0x1;
if (end)
return hash;
}
return 0;
}
usz fragment_program_storage_hash::operator()(const RSXFragmentProgram& program) const
{
usz hash = fragment_program_utils::get_fragment_program_ucode_hash(program);
hash ^= program.ctrl;
hash ^= +program.two_sided_lighting;
hash ^= program.texture_state.texture_dimensions;
hash ^= program.texture_state.shadow_textures;
hash ^= program.texture_state.redirected_textures;
hash ^= program.texture_state.multisampled_textures;
hash ^= program.texcoord_control_mask;
return hash;
}
bool fragment_program_compare::operator()(const RSXFragmentProgram& binary1, const RSXFragmentProgram& binary2) const
{
if (binary1.ctrl != binary2.ctrl || binary1.texture_state != binary2.texture_state ||
binary1.texcoord_control_mask != binary2.texcoord_control_mask ||
binary1.two_sided_lighting != binary2.two_sided_lighting)
return false;
const void* instBuffer1 = binary1.get_data();
const void* instBuffer2 = binary2.get_data();
usz instIndex = 0;
while (true)
{
const auto inst1 = v128::loadu(instBuffer1, instIndex);
const auto inst2 = v128::loadu(instBuffer2, instIndex);
if (inst1._u ^ inst2._u)
return false;
instIndex++;
// Skip constants
if (fragment_program_utils::is_constant(inst1._u32[1]) ||
fragment_program_utils::is_constant(inst1._u32[2]) ||
fragment_program_utils::is_constant(inst1._u32[3]))
instIndex++;
bool end = ((inst1._u32[0] >> 8) & 0x1) && ((inst2._u32[0] >> 8) & 0x1);
if (end)
return true;
}
}
namespace rsx
{
#if defined(ARCH_X64) || defined(ARCH_ARM64)
static inline void write_fragment_constants_to_buffer_sse2(const std::span<f32>& buffer, const RSXFragmentProgram& rsx_prog, const std::vector<usz>& offsets_cache, bool sanitize)
{
f32* dst = buffer.data();
for (usz offset_in_fragment_program : offsets_cache)
{
char* data = static_cast<char*>(rsx_prog.get_data()) + offset_in_fragment_program;
const __m128i vector = _mm_loadu_si128(reinterpret_cast<__m128i*>(data));
const __m128i shuffled_vector = _mm_or_si128(_mm_slli_epi16(vector, 8), _mm_srli_epi16(vector, 8));
if (sanitize)
{
//Convert NaNs and Infs to 0
const auto masked = _mm_and_si128(shuffled_vector, _mm_set1_epi32(0x7fffffff));
const auto valid = _mm_cmplt_epi32(masked, _mm_set1_epi32(0x7f800000));
const auto result = _mm_and_si128(shuffled_vector, valid);
_mm_stream_si128(utils::bless<__m128i>(dst), result);
}
else
{
_mm_stream_si128(utils::bless<__m128i>(dst), shuffled_vector);
}
dst += 4;
}
}
#else
static inline void write_fragment_constants_to_buffer_fallback(const std::span<f32>& buffer, const RSXFragmentProgram& rsx_prog, const std::vector<usz>& offsets_cache, bool sanitize)
{
f32* dst = buffer.data();
for (usz offset_in_fragment_program : offsets_cache)
{
char* data = static_cast<char*>(rsx_prog.get_data()) + offset_in_fragment_program;
for (u32 i = 0; i < 4; i++)
{
const u32 value = reinterpret_cast<u32*>(data)[i];
const u32 shuffled = ((value >> 8) & 0xff00ff) | ((value << 8) & 0xff00ff00);
if (sanitize && (shuffled & 0x7fffffff) >= 0x7f800000)
{
dst[i] = 0.f;
}
else
{
dst[i] = std::bit_cast<f32>(shuffled);
}
}
dst += 4;
}
}
#endif
void write_fragment_constants_to_buffer(const std::span<f32>& buffer, const RSXFragmentProgram& rsx_prog, const std::vector<usz>& offsets_cache, bool sanitize)
{
#if defined(ARCH_X64) || defined(ARCH_ARM64)
write_fragment_constants_to_buffer_sse2(buffer, rsx_prog, offsets_cache, sanitize);
#else
write_fragment_constants_to_buffer_fallback(buffer, rsx_prog, offsets_cache, sanitize);
#endif
}
}
| 17,884
|
C++
|
.cpp
| 568
| 27.804577
| 183
| 0.678368
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,475
|
FragmentProgramDecompiler.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Program/FragmentProgramDecompiler.cpp
|
#include "stdafx.h"
#include "Emu/System.h"
#include "../rsx_methods.h"
#include "FragmentProgramDecompiler.h"
#include <algorithm>
namespace rsx
{
namespace fragment_program
{
static const std::string reg_table[] =
{
"wpos",
"diff_color", "spec_color",
"fogc",
"tc0", "tc1", "tc2", "tc3", "tc4", "tc5", "tc6", "tc7", "tc8", "tc9",
"ssa"
};
}
}
using namespace rsx::fragment_program;
FragmentProgramDecompiler::FragmentProgramDecompiler(const RSXFragmentProgram &prog, u32& size)
: m_size(size)
, m_prog(prog)
, m_ctrl(prog.ctrl)
{
m_size = 0;
}
void FragmentProgramDecompiler::SetDst(std::string code, u32 flags)
{
if (!src0.exec_if_eq && !src0.exec_if_gr && !src0.exec_if_lt) return;
if (src1.scale)
{
std::string modifier;
switch (src1.scale)
{
case 0: break;
case 1: code = "(" + code + " * "; modifier = "2."; break;
case 2: code = "(" + code + " * "; modifier = "4."; break;
case 3: code = "(" + code + " * "; modifier = "8."; break;
case 5: code = "(" + code + " / "; modifier = "2."; break;
case 6: code = "(" + code + " / "; modifier = "4."; break;
case 7: code = "(" + code + " / "; modifier = "8."; break;
default:
rsx_log.error("Bad scale: %d", u32{ src1.scale });
break;
}
if (flags & OPFLAGS::skip_type_cast && dst.fp16 && device_props.has_native_half_support)
{
modifier = getHalfTypeName(1) + "(" + modifier + ")";
}
if (!modifier.empty())
{
code = code + modifier + ")";
}
}
if (!dst.no_dest)
{
if (dst.fp16 && device_props.has_native_half_support && !(flags & OPFLAGS::skip_type_cast))
{
// Cast to native data type
code = ClampValue(code, RSX_FP_PRECISION_HALF);
}
if (dst.saturate)
{
code = ClampValue(code, RSX_FP_PRECISION_SATURATE);
}
else if (dst.prec)
{
switch (dst.opcode)
{
case RSX_FP_OPCODE_NRM:
case RSX_FP_OPCODE_MAX:
case RSX_FP_OPCODE_MIN:
case RSX_FP_OPCODE_COS:
case RSX_FP_OPCODE_SIN:
case RSX_FP_OPCODE_REFL:
case RSX_FP_OPCODE_FRC:
case RSX_FP_OPCODE_LIT:
case RSX_FP_OPCODE_LIF:
case RSX_FP_OPCODE_LG2:
break;
case RSX_FP_OPCODE_MOV:
// NOTE: Sometimes varying inputs from VS are out of range so do not exempt any input types, unless fp16 (Naruto UNS)
if (dst.fp16 && src0.fp16 && src0.reg_type == RSX_FP_REGISTER_TYPE_TEMP)
break;
[[fallthrough]];
default:
{
// fp16 precsion flag on f32 register; ignore
if (dst.prec == 1 && !dst.fp16)
break;
// Native type already has fp16 clamped (input must have been cast)
if (dst.prec == 1 && dst.fp16 && device_props.has_native_half_support)
break;
// clamp value to allowed range
code = ClampValue(code, dst.prec);
break;
}
}
}
}
opflags = flags;
code += (flags & OPFLAGS::no_src_mask) ? "" : "$m";
if (dst.no_dest)
{
if (dst.set_cond)
{
AddCode("$ifcond " + m_parr.AddParam(PF_PARAM_NONE, getFloatTypeName(4), "cc" + std::to_string(src0.cond_mod_reg_index)) + "$m = " + code + ";");
}
else
{
AddCode("$ifcond " + code + ";");
}
return;
}
const std::string dest = AddReg(dst.dest_reg, !!dst.fp16) + "$m";
const std::string decoded_dest = Format(dest);
AddCodeCond(decoded_dest, code);
//AddCode("$ifcond " + dest + code + (append_mask ? "$m;" : ";"));
if (dst.set_cond)
{
AddCode(m_parr.AddParam(PF_PARAM_NONE, getFloatTypeName(4), "cc" + std::to_string(src0.cond_mod_reg_index)) + "$m = " + dest + ";");
}
u32 reg_index = dst.fp16 ? dst.dest_reg >> 1 : dst.dest_reg;
ensure(reg_index < temp_registers.size());
if (dst.opcode == RSX_FP_OPCODE_MOV &&
src0.reg_type == RSX_FP_REGISTER_TYPE_TEMP &&
src0.tmp_reg_index == reg_index)
{
// The register did not acquire any new data
// Common in code with structures like r0.xy = r0.xy
// Unsure why such code would exist, maybe placeholders for dynamically generated shader code?
if (decoded_dest == Format(code))
{
return;
}
}
temp_registers[reg_index].tag(dst.dest_reg, !!dst.fp16, dst.mask_x, dst.mask_y, dst.mask_z, dst.mask_w);
}
void FragmentProgramDecompiler::AddFlowOp(const std::string& code)
{
//Flow operations can only consider conditionals and have no dst
if (src0.exec_if_gr && src0.exec_if_lt && src0.exec_if_eq)
{
AddCode(code + ";");
return;
}
else if (!src0.exec_if_gr && !src0.exec_if_lt && !src0.exec_if_eq)
{
AddCode("//" + code + ";");
return;
}
//We have a conditional expression
std::string cond = GetRawCond();
AddCode("if (any(" + cond + ")) " + code + ";");
}
void FragmentProgramDecompiler::AddCode(const std::string& code)
{
main.append(m_code_level, '\t') += Format(code) + "\n";
}
std::string FragmentProgramDecompiler::GetMask() const
{
std::string ret;
ret.reserve(5);
static constexpr std::string_view dst_mask = "xyzw";
ret += '.';
if (dst.mask_x) ret += dst_mask[0];
if (dst.mask_y) ret += dst_mask[1];
if (dst.mask_z) ret += dst_mask[2];
if (dst.mask_w) ret += dst_mask[3];
return ret == "."sv || ret == ".xyzw"sv ? "" : (ret);
}
std::string FragmentProgramDecompiler::AddReg(u32 index, bool fp16)
{
const std::string type_name = (fp16 && device_props.has_native_half_support)? getHalfTypeName(4) : getFloatTypeName(4);
const std::string reg_name = std::string(fp16 ? "h" : "r") + std::to_string(index);
return m_parr.AddParam(PF_PARAM_NONE, type_name, reg_name, type_name + "(0.)");
}
bool FragmentProgramDecompiler::HasReg(u32 index, bool fp16)
{
const std::string type_name = (fp16 && device_props.has_native_half_support)? getHalfTypeName(4) : getFloatTypeName(4);
const std::string reg_name = std::string(fp16 ? "h" : "r") + std::to_string(index);
return m_parr.HasParam(PF_PARAM_NONE, type_name, reg_name);
}
std::string FragmentProgramDecompiler::AddCond()
{
return m_parr.AddParam(PF_PARAM_NONE, getFloatTypeName(4), "cc" + std::to_string(src0.cond_reg_index));
}
std::string FragmentProgramDecompiler::AddConst()
{
const std::string name = std::string("fc") + std::to_string(m_size + 4 * 4);
const std::string type = getFloatTypeName(4);
if (m_parr.HasParam(PF_PARAM_UNIFORM, type, name))
{
return name;
}
auto data = reinterpret_cast<be_t<u32>*>(reinterpret_cast<uptr>(m_prog.get_data()) + m_size + 4 * sizeof(u32));
m_offset = 2 * 4 * sizeof(u32);
u32 x = GetData(data[0]);
u32 y = GetData(data[1]);
u32 z = GetData(data[2]);
u32 w = GetData(data[3]);
const auto var = fmt::format("%s(%f, %f, %f, %f)", type, std::bit_cast<f32>(x), std::bit_cast<f32>(y), std::bit_cast<f32>(z), std::bit_cast<f32>(w));
return m_parr.AddParam(PF_PARAM_UNIFORM, type, name, var);
}
std::string FragmentProgramDecompiler::AddTex()
{
properties.has_tex_op = true;
std::string sampler;
switch (m_prog.get_texture_dimension(dst.tex_num))
{
case rsx::texture_dimension_extended::texture_dimension_1d:
properties.has_tex1D = true;
sampler = "sampler1D";
break;
case rsx::texture_dimension_extended::texture_dimension_cubemap:
properties.has_tex3D = true;
sampler = "samplerCube";
break;
case rsx::texture_dimension_extended::texture_dimension_2d:
properties.has_tex2D = true;
sampler = "sampler2D";
break;
case rsx::texture_dimension_extended::texture_dimension_3d:
properties.has_tex3D = true;
sampler = "sampler3D";
break;
}
opflags |= OPFLAGS::texture_ref;
return m_parr.AddParam(PF_PARAM_UNIFORM, sampler, std::string("tex") + std::to_string(dst.tex_num));
}
std::string FragmentProgramDecompiler::AddX2d()
{
return m_parr.AddParam(PF_PARAM_NONE, getFloatTypeName(4), "x2d", getFloatTypeName(4) + "(0.)");
}
std::string FragmentProgramDecompiler::ClampValue(const std::string& code, u32 precision)
{
// FP16 is expected to overflow a lot easier at 0+-65504
// FP32 can still work up to 0+-3.4E38
// See http://http.download.nvidia.com/developer/Papers/2005/FP_Specials/FP_Specials.pdf
if (precision > 1 && precision < 5)
{
// Define precision_clamp
properties.has_clamp = true;
}
switch (precision)
{
case RSX_FP_PRECISION_REAL:
// Full 32-bit precision
break;
case RSX_FP_PRECISION_HALF:
return "clamp16(" + code + ")";
case RSX_FP_PRECISION_FIXED12:
return "precision_clamp(" + code + ", -2., 2.)";
case RSX_FP_PRECISION_FIXED9:
return "precision_clamp(" + code + ", -1., 1.)";
case RSX_FP_PRECISION_SATURATE:
return "precision_clamp(" + code + ", 0., 1.)";
case RSX_FP_PRECISION_UNKNOWN:
// Doesn't seem to do anything to the input from hw tests, same as 0
break;
default:
rsx_log.error("Unexpected precision modifier (%d)\n", precision);
break;
}
return code;
}
bool FragmentProgramDecompiler::DstExpectsSca() const
{
int writes = 0;
if (dst.mask_x) writes++;
if (dst.mask_y) writes++;
if (dst.mask_z) writes++;
if (dst.mask_w) writes++;
return (writes == 1);
}
std::string FragmentProgramDecompiler::Format(const std::string& code, bool ignore_redirects)
{
const std::pair<std::string_view, std::function<std::string()>> repl_list[] =
{
{ "$$", []() -> std::string { return "$"; } },
{ "$0", [this]() -> std::string {return GetSRC<SRC0>(src0);} },
{ "$1", [this]() -> std::string {return GetSRC<SRC1>(src1);} },
{ "$2", [this]() -> std::string {return GetSRC<SRC2>(src2);} },
{ "$t", [this]() -> std::string { return "tex" + std::to_string(dst.tex_num);} },
{ "$_i", [this]() -> std::string {return std::to_string(dst.tex_num);} },
{ "$m", std::bind(std::mem_fn(&FragmentProgramDecompiler::GetMask), this) },
{ "$ifcond ", [this]() -> std::string
{
const std::string& cond = GetCond();
if (cond == "true") return "";
return "if(" + cond + ") ";
}
},
{ "$cond", std::bind(std::mem_fn(&FragmentProgramDecompiler::GetCond), this) },
{ "$_c", std::bind(std::mem_fn(&FragmentProgramDecompiler::AddConst), this) },
{ "$float4", [this]() -> std::string { return getFloatTypeName(4); } },
{ "$float3", [this]() -> std::string { return getFloatTypeName(3); } },
{ "$float2", [this]() -> std::string { return getFloatTypeName(2); } },
{ "$float_t", [this]() -> std::string { return getFloatTypeName(1); } },
{ "$half4", [this]() -> std::string { return getHalfTypeName(4); } },
{ "$half3", [this]() -> std::string { return getHalfTypeName(3); } },
{ "$half2", [this]() -> std::string { return getHalfTypeName(2); } },
{ "$half_t", [this]() -> std::string { return getHalfTypeName(1); } },
{ "$Ty", [this]() -> std::string { return (!device_props.has_native_half_support || !dst.fp16)? getFloatTypeName(4) : getHalfTypeName(4); } }
};
if (!ignore_redirects)
{
//Special processing redirects
switch (dst.opcode)
{
case RSX_FP_OPCODE_TEXBEM:
case RSX_FP_OPCODE_TXPBEM:
{
//Redirect parameter 0 to the x2d temp register for TEXBEM
//TODO: Organize this a little better
std::pair<std::string_view, std::string> repl[] = { { "$0", "x2d" } };
std::string result = fmt::replace_all(code, repl);
return fmt::replace_all(result, repl_list);
}
}
}
return fmt::replace_all(code, repl_list);
}
std::string FragmentProgramDecompiler::GetRawCond()
{
static constexpr std::string_view f = "xyzw";
const auto zero = getFloatTypeName(4) + "(0.)";
std::string swizzle, cond;
swizzle.reserve(5);
swizzle += '.';
swizzle += f[src0.cond_swizzle_x];
swizzle += f[src0.cond_swizzle_y];
swizzle += f[src0.cond_swizzle_z];
swizzle += f[src0.cond_swizzle_w];
if (swizzle == ".xyzw"sv)
{
swizzle.clear();
}
if (src0.exec_if_gr && src0.exec_if_eq)
cond = compareFunction(COMPARE::SGE, AddCond() + swizzle, zero);
else if (src0.exec_if_lt && src0.exec_if_eq)
cond = compareFunction(COMPARE::SLE, AddCond() + swizzle, zero);
else if (src0.exec_if_gr && src0.exec_if_lt)
cond = compareFunction(COMPARE::SNE, AddCond() + swizzle, zero);
else if (src0.exec_if_gr)
cond = compareFunction(COMPARE::SGT, AddCond() + swizzle, zero);
else if (src0.exec_if_lt)
cond = compareFunction(COMPARE::SLT, AddCond() + swizzle, zero);
else //if(src0.exec_if_eq)
cond = compareFunction(COMPARE::SEQ, AddCond() + swizzle, zero);
return cond;
}
std::string FragmentProgramDecompiler::GetCond()
{
if (src0.exec_if_gr && src0.exec_if_lt && src0.exec_if_eq)
{
return "true";
}
else if (!src0.exec_if_gr && !src0.exec_if_lt && !src0.exec_if_eq)
{
return "false";
}
return "any(" + GetRawCond() + ")";
}
void FragmentProgramDecompiler::AddCodeCond(const std::string& lhs, const std::string& rhs)
{
if (src0.exec_if_gr && src0.exec_if_lt && src0.exec_if_eq)
{
AddCode(lhs + " = " + rhs + ";");
return;
}
if (!src0.exec_if_gr && !src0.exec_if_lt && !src0.exec_if_eq)
{
AddCode("//" + lhs + " = " + rhs + ";");
return;
}
std::string src_prefix;
if (device_props.has_native_half_support && !this->dst.fp16)
{
// Target is not fp16 but src might be
// Usually vecX a = f16vecX b is fine, but causes operator overload issues when used in a mix/lerp function
// mix(f32, f16, bvec) causes compiler issues
// NOTE: If dst is fp16 the src will already have been cast to match so this is not a problem in that case
bool src_is_fp16 = false;
if ((opflags & (OPFLAGS::texture_ref | OPFLAGS::src_cast_f32)) == 0 &&
rhs.find("$0") != umax)
{
// Texture sample operations are full-width and are exempt
src_is_fp16 = (src0.fp16 && src0.reg_type == RSX_FP_REGISTER_TYPE_TEMP);
if (src_is_fp16 && rhs.find("$1") != umax)
{
// References operand 1
src_is_fp16 = (src1.fp16 && src1.reg_type == RSX_FP_REGISTER_TYPE_TEMP);
if (src_is_fp16 && rhs.find("$2") != umax)
{
// References operand 2
src_is_fp16 = (src2.fp16 && src2.reg_type == RSX_FP_REGISTER_TYPE_TEMP);
}
}
}
if (src_is_fp16)
{
// LHS argument is of native half type, need to cast to proper type!
if (rhs[0] != '(')
{
// Upcast inputs to processing function instead
opflags |= OPFLAGS::src_cast_f32;
}
else
{
// No need to add explicit casts all over the place, just cast the result once
src_prefix = "$Ty";
}
}
}
// NOTE: x = _select(x, y, cond) is equivalent to x = cond? y : x;
const auto dst_var = ShaderVariable(lhs);
const auto raw_cond = dst_var.add_mask(GetRawCond());
const auto cond = dst_var.match_size(raw_cond);
AddCode(lhs + " = _select(" + lhs + ", " + src_prefix + rhs + ", " + cond + ");");
}
template<typename T> std::string FragmentProgramDecompiler::GetSRC(T src)
{
std::string ret;
u32 precision_modifier = 0;
if constexpr (std::is_same_v<T, SRC0>)
{
precision_modifier = src1.src0_prec_mod;
}
else if constexpr (std::is_same_v<T, SRC1>)
{
precision_modifier = src1.src1_prec_mod;
}
else if constexpr (std::is_same_v<T, SRC2>)
{
precision_modifier = src1.src2_prec_mod;
}
switch (src.reg_type)
{
case RSX_FP_REGISTER_TYPE_TEMP:
if (!src.fp16)
{
if (dst.opcode == RSX_FP_OPCODE_UP16 ||
dst.opcode == RSX_FP_OPCODE_UP2 ||
dst.opcode == RSX_FP_OPCODE_UP4 ||
dst.opcode == RSX_FP_OPCODE_UPB ||
dst.opcode == RSX_FP_OPCODE_UPG)
{
auto ® = temp_registers[src.tmp_reg_index];
if (reg.requires_gather(src.swizzle_x))
{
properties.has_gather_op = true;
AddReg(src.tmp_reg_index, src.fp16);
ret = getFloatTypeName(4) + reg.gather_r();
break;
}
}
}
else if (precision_modifier == RSX_FP_PRECISION_HALF)
{
// clamp16() is not a cheap operation when emulated; avoid at all costs
precision_modifier = RSX_FP_PRECISION_REAL;
}
ret += AddReg(src.tmp_reg_index, src.fp16);
if (opflags & OPFLAGS::src_cast_f32 && src.fp16 && device_props.has_native_half_support)
{
// Upconvert if there is a chance for ambiguity
ret = getFloatTypeName(4) + "(" + ret + ")";
}
break;
case RSX_FP_REGISTER_TYPE_INPUT:
{
// NOTE: Hw testing showed the following:
// 1. Reading from registers 1 and 2 (COL0 and COL1) is clamped to (0, 1)
// 2. Reading from registers 4-12 (inclusive) is not clamped, but..
// 3. If the texcoord control mask is enabled, the last 2 values are always 0 and hpos.w!
// 4. [A0 + N] addressing can be applied to dynamically sample texture coordinates.
// - This is explained in NV_fragment_program2 specification page, Fragment Attributes section.
// - There is no instruction that writes to the address register directly, it is supposed to be the loop counter!
u32 register_id = src2.use_index_reg ? (src2.addr_reg + 4) : dst.src_attr_reg_num;
const std::string reg_var = (register_id < std::size(reg_table))? reg_table[register_id] : "unk";
bool insert = true;
if (reg_var == "unk")
{
m_is_valid_ucode = false;
insert = false;
}
if (src2.use_index_reg && m_loop_count)
{
// Dynamically load the input
register_id = 0xFF;
}
switch (register_id)
{
case 0x00:
{
// WPOS
ret += reg_table[0];
insert = false;
break;
}
case 0x01:
case 0x02:
{
// COL0, COL1
ret += "_saturate(" + reg_var + ")";
precision_modifier = RSX_FP_PRECISION_REAL;
break;
}
case 0x03:
{
// FOGC
ret += reg_var;
break;
}
case 0x4:
case 0x5:
case 0x6:
case 0x7:
case 0x8:
case 0x9:
case 0xA:
case 0xB:
case 0xC:
case 0xD:
{
// TEX0 - TEX9
// Texcoord 2d mask seems to reset the last 2 arguments to 0 and w if set
const u8 texcoord = u8(register_id) - 4;
if (m_prog.texcoord_is_point_coord(texcoord))
{
// Point sprite coord generation. Stacks with the 2D override mask.
if (m_prog.texcoord_is_2d(texcoord))
{
ret += getFloatTypeName(4) + "(gl_PointCoord, 0., in_w)";
properties.has_w_access = true;
}
else
{
ret += getFloatTypeName(4) + "(gl_PointCoord, 1., 0.)";
}
}
else if (src2.perspective_corr)
{
// Perspective correct flag multiplies the result by 1/w
if (m_prog.texcoord_is_2d(texcoord))
{
ret += getFloatTypeName(4) + "(" + reg_var + ".xy * gl_FragCoord.w, 0., 1.)";
}
else
{
ret += "(" + reg_var + " * gl_FragCoord.w)";
}
}
else
{
if (m_prog.texcoord_is_2d(texcoord))
{
ret += getFloatTypeName(4) + "(" + reg_var + ".xy, 0., in_w)";
properties.has_w_access = true;
}
else
{
ret += reg_var;
}
}
break;
}
case 0xFF:
{
if (m_loop_count > 1)
{
// Afaik there is only one address/loop register on NV40
rsx_log.error("Nested loop with indexed load was detected. Report this to developers!");
}
if (m_prog.texcoord_control_mask)
{
// This would require more work if it exists. It cannot be determined at compile time and has to be part of _indexed_load() subroutine.
rsx_log.error("Indexed load with control override mask detected. Report this to developers!");
}
const auto load_cmd = fmt::format("_indexed_load(i%u + %u)", m_loop_count - 1, src2.addr_reg);
properties.has_dynamic_register_load = true;
insert = false;
if (src2.perspective_corr)
{
ret += "(" + load_cmd + " * gl_FragCoord.w)";
}
else
{
ret += load_cmd;
}
break;
}
default:
{
// SSA (winding direction register)
// UNK
if (reg_var == "unk")
{
rsx_log.error("Bad src reg num: %d", u32{ register_id });
}
ret += reg_var;
precision_modifier = RSX_FP_PRECISION_REAL;
break;
}
}
if (insert)
{
m_parr.AddParam(PF_PARAM_IN, getFloatTypeName(4), reg_var);
}
properties.in_register_mask |= (1 << register_id);
}
break;
case RSX_FP_REGISTER_TYPE_CONSTANT:
ret += AddConst();
break;
case RSX_FP_REGISTER_TYPE_UNKNOWN: // ??? Used by a few games, what is it?
rsx_log.error("Src type 3 used, opcode=0x%X, dst=0x%X s0=0x%X s1=0x%X s2=0x%X",
dst.opcode, dst.HEX, src0.HEX, src1.HEX, src2.HEX);
// This is not some special type, it is a bug indicating memory corruption
// Shaders that are even slightly off do not execute on realhw to any meaningful degree
m_is_valid_ucode = false;
ret += "src3";
precision_modifier = RSX_FP_PRECISION_REAL;
break;
default:
rsx_log.fatal("Bad src type %d", u32{ src.reg_type });
break;
}
static constexpr std::string_view f = "xyzw";
std::string swizzle;
swizzle.reserve(5);
swizzle += '.';
swizzle += f[src.swizzle_x];
swizzle += f[src.swizzle_y];
swizzle += f[src.swizzle_z];
swizzle += f[src.swizzle_w];
if (swizzle != ".xyzw"sv)
{
ret += swizzle;
}
// Warning: Modifier order matters. e.g neg should be applied after precision clamping (tested with Naruto UNS)
if (src.abs) ret = "abs(" + ret + ")";
if (precision_modifier) ret = ClampValue(ret, precision_modifier);
if (src.neg) ret = "-" + ret;
return ret;
}
std::string FragmentProgramDecompiler::BuildCode()
{
// Shader validation
// Shader must at least write to one output for the body to be considered valid
const bool fp16_out = !(m_ctrl & CELL_GCM_SHADER_CONTROL_32_BITS_EXPORTS);
const std::string float4_type = (fp16_out && device_props.has_native_half_support)? getHalfTypeName(4) : getFloatTypeName(4);
const std::string init_value = float4_type + "(0.)";
std::array<std::string, 4> output_register_names;
std::array<u32, 4> ouput_register_indices = { 0, 2, 3, 4 };
bool shader_is_valid = false;
// Check depth export
if (m_ctrl & CELL_GCM_SHADER_CONTROL_DEPTH_EXPORT)
{
// Hw tests show that the depth export register is default-initialized to 0 and not wpos.z!!
m_parr.AddParam(PF_PARAM_NONE, getFloatTypeName(4), "r1", init_value);
shader_is_valid = (!!temp_registers[1].h1_writes);
}
// Add the color output registers. They are statically written to and have guaranteed initialization (except r1.z which == wpos.z)
// This can be used instead of an explicit clear pass in some games (Motorstorm)
if (!fp16_out)
{
output_register_names = { "r0", "r2", "r3", "r4" };
}
else
{
output_register_names = { "h0", "h4", "h6", "h8" };
}
for (int n = 0; n < 4; ++n)
{
if (!m_parr.HasParam(PF_PARAM_NONE, float4_type, output_register_names[n]))
{
m_parr.AddParam(PF_PARAM_NONE, float4_type, output_register_names[n], init_value);
continue;
}
const auto block_index = ouput_register_indices[n];
shader_is_valid |= (!!temp_registers[block_index].h0_writes);
}
if (!shader_is_valid)
{
properties.has_no_output = true;
if (!properties.has_discard_op)
{
// NOTE: Discard operation overrides output
rsx_log.warning("Shader does not write to any output register and will be NOPed");
main = "/*" + main + "*/";
}
}
if (properties.has_dynamic_register_load)
{
// Since the registers will be loaded dynamically, declare all of them
for (int i = 0; i < 10; ++i)
{
m_parr.AddParam(PF_PARAM_IN, getFloatTypeName(4), reg_table[i + 4]);
}
}
std::stringstream OS;
if (!m_is_valid_ucode)
{
// If the code is broken, do not compile. Simply NOP main and write empty outputs
insertHeader(OS);
OS << "\n";
OS << "void main()\n";
OS << "{\n";
OS << "#if 0\n";
OS << main << "\n";
OS << "#endif\n";
OS << " discard;\n";
OS << "}\n";
return OS.str();
}
insertHeader(OS);
OS << "\n";
insertConstants(OS);
OS << "\n";
insertInputs(OS);
OS << "\n";
insertOutputs(OS);
OS << "\n";
// Insert global function definitions
insertGlobalFunctions(OS);
std::string float4 = getFloatTypeName(4);
const bool glsl = float4 == "vec4";
if (properties.has_clamp)
{
std::string precision_func =
"$float4 precision_clamp($float4 x, float _min, float _max)\n"
"{\n"
" // Treat NaNs as 0\n"
" bvec4 nans = isnan(x);\n"
" x = _select(x, $float4(0.), nans);\n"
" return clamp(x, _min, _max);\n"
"}\n\n";
if (device_props.has_native_half_support)
{
precision_func +=
"$half4 precision_clamp($half4 x, float _min, float _max)\n"
"{\n"
" // Treat NaNs as 0\n"
" bvec4 nans = isnan(x);\n"
" x = _select(x, $half4(0.), nans);\n"
" return clamp(x, $half_t(_min), $half_t(_max));\n"
"}\n\n";
}
OS << Format(precision_func);
}
if (!device_props.has_native_half_support)
{
// Accurate float to half clamping (preserves IEEE-754 NaN)
std::string clamp_func;
if (glsl)
{
clamp_func +=
"vec2 clamp16(vec2 val){ return unpackHalf2x16(packHalf2x16(val)); }\n"
"vec4 clamp16(vec4 val){ return vec4(clamp16(val.xy), clamp16(val.zw)); }\n\n";
}
else
{
clamp_func +=
"$float4 clamp16($float4 x)\n"
"{\n"
" if (!isnan(x.x) && !isinf(x.x)) x.x = clamp(x.x, -65504., +65504.);\n"
" if (!isnan(x.x) && !isinf(x.x)) x.x = clamp(x.x, -65504., +65504.);\n"
" if (!isnan(x.x) && !isinf(x.x)) x.x = clamp(x.x, -65504., +65504.);\n"
" if (!isnan(x.x) && !isinf(x.x)) x.x = clamp(x.x, -65504., +65504.);\n"
" return x;\n"
"}\n\n";
}
OS << Format(clamp_func);
}
else
{
// Define raw casts from f32->f16
OS <<
"#define clamp16(x) " << getHalfTypeName(4) << "(x)\n";
}
OS <<
"#define _builtin_lit lit_legacy\n"
"#define _builtin_log2 log2\n"
"#define _builtin_normalize(x) (length(x) > 0? normalize(x) : x)\n" // HACK!! Workaround for some games that generate NaNs unless texture filtering exactly matches PS3 (BFBC)
"#define _builtin_sqrt(x) sqrt(abs(x))\n"
"#define _builtin_rcp(x) (1. / x)\n"
"#define _builtin_rsq(x) (1. / _builtin_sqrt(x))\n"
"#define _builtin_div(x, y) (x / y)\n";
if (device_props.has_low_precision_rounding)
{
// NVIDIA has terrible rounding errors interpolating constant values across vertices with different w
// PS3 games blindly rely on interpolating a constant to not change the values
// Calling floor/equality will fail randomly causing a moire pattern
OS <<
"#define _builtin_floor(x) floor(x + 0.000001)\n\n";
}
else
{
OS <<
"#define _builtin_floor floor\n\n";
}
if (properties.has_pkg)
{
OS <<
"vec4 _builtin_pkg(const in vec4 value)\n"
"{\n"
" vec4 convert = linear_to_srgb(value);\n"
" return uintBitsToFloat(packUnorm4x8(convert)).xxxx;\n"
"}\n\n";
}
if (properties.has_upg)
{
OS <<
"vec4 _builtin_upg(const in float value)\n"
"{\n"
" vec4 raw = unpackUnorm4x8(floatBitsToUint(value));\n"
" return srgb_to_linear(raw);\n"
"}\n\n";
}
if (properties.has_divsq)
{
// Define RSX-compliant DIVSQ
// If the numerator is 0, the result is always 0 even if the denominator is 0
// NOTE: This operation is component-wise and cannot be accelerated with lerp/mix because these always return NaN if any of the choices is NaN
std::string divsq_func =
"$float4 _builtin_divsq($float4 a, float b)\n"
"{\n"
" $float4 tmp = a / _builtin_sqrt(b);\n"
" $float4 choice = abs(a);\n";
if (glsl)
{
divsq_func +=
" return _select(a, tmp, greaterThan(choice, vec4(0.)));\n";
}
else
{
divsq_func +=
" if (choice.x > 0.) a.x = tmp.x;\n"
" if (choice.y > 0.) a.y = tmp.y;\n"
" if (choice.z > 0.) a.z = tmp.z;\n"
" if (choice.w > 0.) a.w = tmp.w;\n"
" return a;\n";
}
divsq_func +=
"}\n\n";
OS << Format(divsq_func);
}
// Declare register gather/merge if needed
if (properties.has_gather_op)
{
std::string float2 = getFloatTypeName(2);
OS << float4 << " gather(" << float4 << " _h0, " << float4 << " _h1)\n";
OS << "{\n";
OS << " float x = uintBitsToFloat(packHalf2x16(_h0.xy));\n";
OS << " float y = uintBitsToFloat(packHalf2x16(_h0.zw));\n";
OS << " float z = uintBitsToFloat(packHalf2x16(_h1.xy));\n";
OS << " float w = uintBitsToFloat(packHalf2x16(_h1.zw));\n";
OS << " return " << float4 << "(x, y, z, w);\n";
OS << "}\n\n";
OS << float2 << " gather(" << float4 << " _h)\n";
OS << "{\n";
OS << " float x = uintBitsToFloat(packHalf2x16(_h.xy));\n";
OS << " float y = uintBitsToFloat(packHalf2x16(_h.zw));\n";
OS << " return " << float2 << "(x, y);\n";
OS << "}\n\n";
}
if (properties.has_dynamic_register_load)
{
OS <<
"vec4 _indexed_load(int index)\n"
"{\n"
" switch (index)\n"
" {\n"
" case 0: return tc0;\n"
" case 1: return tc1;\n"
" case 2: return tc2;\n"
" case 3: return tc3;\n"
" case 4: return tc4;\n"
" case 5: return tc5;\n"
" case 6: return tc6;\n"
" case 7: return tc7;\n"
" case 8: return tc8;\n"
" case 9: return tc9;\n"
" }\n"
" return vec4(0., 0., 0., 1.);\n"
"}\n\n";
}
insertMainStart(OS);
OS << main << std::endl;
insertMainEnd(OS);
return OS.str();
}
bool FragmentProgramDecompiler::handle_sct_scb(u32 opcode)
{
// Compliance notes based on HW tests:
// DIV is IEEE compliant as is MUL, LG2, EX2. LG2 with negative input returns NaN as expected.
// DIVSQ is not compliant. Result is 0 if numerator is 0 regardless of denominator
// RSQ(0) and RCP(0) return INF as expected
// RSQ ignores the sign of the inputs (Metro Last Light, GTA4)
// SAT modifier flushes NaNs to 0
// Some games that rely on broken DIVSQ behaviour include Dark Souls II and Super Puzzle Fighter II Turbo HD Remix
switch (opcode)
{
case RSX_FP_OPCODE_ADD: SetDst("($0 + $1)"); return true;
case RSX_FP_OPCODE_DIV: SetDst("_builtin_div($0, $1.x)"); return true;
case RSX_FP_OPCODE_DIVSQ:
SetDst("_builtin_divsq($0, $1.x)");
properties.has_divsq = true;
return true;
case RSX_FP_OPCODE_DP2: SetDst(getFunction(FUNCTION::DP2), OPFLAGS::op_extern); return true;
case RSX_FP_OPCODE_DP3: SetDst(getFunction(FUNCTION::DP3), OPFLAGS::op_extern); return true;
case RSX_FP_OPCODE_DP4: SetDst(getFunction(FUNCTION::DP4), OPFLAGS::op_extern); return true;
case RSX_FP_OPCODE_DP2A: SetDst(getFunction(FUNCTION::DP2A), OPFLAGS::op_extern); return true;
case RSX_FP_OPCODE_MAD: SetDst("fma($0, $1, $2)", OPFLAGS::src_cast_f32); return true;
case RSX_FP_OPCODE_MAX: SetDst("max($0, $1)", OPFLAGS::src_cast_f32); return true;
case RSX_FP_OPCODE_MIN: SetDst("min($0, $1)", OPFLAGS::src_cast_f32); return true;
case RSX_FP_OPCODE_MOV: SetDst("$0"); return true;
case RSX_FP_OPCODE_MUL: SetDst("($0 * $1)"); return true;
case RSX_FP_OPCODE_RCP: SetDst("_builtin_rcp($0.x).xxxx"); return true;
case RSX_FP_OPCODE_RSQ: SetDst("_builtin_rsq($0.x).xxxx"); return true;
case RSX_FP_OPCODE_SEQ: SetDst("$Ty(" + compareFunction(COMPARE::SEQ, "$0", "$1") + ")", OPFLAGS::op_extern); return true;
case RSX_FP_OPCODE_SFL: SetDst(getFunction(FUNCTION::SFL), OPFLAGS::skip_type_cast); return true;
case RSX_FP_OPCODE_SGE: SetDst("$Ty(" + compareFunction(COMPARE::SGE, "$0", "$1") + ")", OPFLAGS::op_extern); return true;
case RSX_FP_OPCODE_SGT: SetDst("$Ty(" + compareFunction(COMPARE::SGT, "$0", "$1") + ")", OPFLAGS::op_extern); return true;
case RSX_FP_OPCODE_SLE: SetDst("$Ty(" + compareFunction(COMPARE::SLE, "$0", "$1") + ")", OPFLAGS::op_extern); return true;
case RSX_FP_OPCODE_SLT: SetDst("$Ty(" + compareFunction(COMPARE::SLT, "$0", "$1") + ")", OPFLAGS::op_extern); return true;
case RSX_FP_OPCODE_SNE: SetDst("$Ty(" + compareFunction(COMPARE::SNE, "$0", "$1") + ")", OPFLAGS::op_extern); return true;
case RSX_FP_OPCODE_STR: SetDst(getFunction(FUNCTION::STR), OPFLAGS::skip_type_cast); return true;
// SCB-only ops
case RSX_FP_OPCODE_COS: SetDst("cos($0.xxxx)"); return true;
case RSX_FP_OPCODE_DST: SetDst("$Ty(1.0, $0.y * $1.y, $0.z, $1.w)", OPFLAGS::op_extern); return true;
case RSX_FP_OPCODE_REFL: SetDst(getFunction(FUNCTION::REFL), OPFLAGS::op_extern); return true;
case RSX_FP_OPCODE_EX2: SetDst("exp2($0.xxxx)"); return true;
case RSX_FP_OPCODE_FLR: SetDst("_builtin_floor($0)"); return true;
case RSX_FP_OPCODE_FRC: SetDst(getFunction(FUNCTION::FRACT)); return true;
case RSX_FP_OPCODE_LIT:
SetDst("_builtin_lit($0)");
properties.has_lit_op = true;
return true;
case RSX_FP_OPCODE_LIF: SetDst("$Ty(1.0, $0.y, ($0.y > 0 ? exp2($0.w) : 0.0), 1.0)", OPFLAGS::op_extern); return true;
case RSX_FP_OPCODE_LRP: SetDst("$Ty($2 * (1 - $0) + $1 * $0)", OPFLAGS::skip_type_cast); return true;
case RSX_FP_OPCODE_LG2: SetDst("_builtin_log2($0.x).xxxx"); return true;
// Pack operations. See https://www.khronos.org/registry/OpenGL/extensions/NV/NV_fragment_program.txt
// PK2 = PK2H (2 16-bit floats)
// PK16 = PK2US (2 unsigned 16-bit scalars)
// PK4 = PK4B (4 signed 8-bit scalars)
// PKB = PK4UB (4 unsigned 8-bit scalars)
// PK16/UP16 behavior confirmed by Saints Row: Gat out of Hell, ARGB8 -> X16Y16 conversion relies on this to render the wings
case RSX_FP_OPCODE_PK2: SetDst(getFloatTypeName(4) + "(uintBitsToFloat(packHalf2x16($0.xy)))"); return true;
case RSX_FP_OPCODE_PK4: SetDst(getFloatTypeName(4) + "(uintBitsToFloat(packSnorm4x8($0)))"); return true;
case RSX_FP_OPCODE_PK16: SetDst(getFloatTypeName(4) + "(uintBitsToFloat(packUnorm2x16($0.xy)))"); return true;
case RSX_FP_OPCODE_PKG:
// Should be similar to PKB but with gamma correction, see description of PK4UBG in khronos page
properties.has_pkg = true;
SetDst("_builtin_pkg($0)");
return true;
case RSX_FP_OPCODE_PKB: SetDst(getFloatTypeName(4) + "(uintBitsToFloat(packUnorm4x8($0)))"); return true;
case RSX_FP_OPCODE_SIN: SetDst("sin($0.xxxx)"); return true;
}
return false;
}
bool FragmentProgramDecompiler::handle_tex_srb(u32 opcode)
{
auto insert_texture_fetch = [this](FUNCTION base_func)
{
const auto type = m_prog.get_texture_dimension(dst.tex_num);
const auto ref_mask = (1 << dst.tex_num);
std::string swz_mask = "";
auto func_id = base_func;
if (m_prog.texture_state.shadow_textures & ref_mask)
{
properties.shadow_sampler_mask |= ref_mask;
swz_mask = ".xxxx";
func_id = (base_func == FUNCTION::TEXTURE_SAMPLE_PROJ_BASE) ? FUNCTION::TEXTURE_SAMPLE_SHADOW_PROJ_BASE : FUNCTION::TEXTURE_SAMPLE_SHADOW_BASE;
}
else
{
properties.common_access_sampler_mask |= ref_mask;
if (m_prog.texture_state.redirected_textures & ref_mask)
{
properties.redirected_sampler_mask |= ref_mask;
func_id = (base_func == FUNCTION::TEXTURE_SAMPLE_PROJ_BASE) ? FUNCTION::TEXTURE_SAMPLE_DEPTH_RGBA_PROJ_BASE : FUNCTION::TEXTURE_SAMPLE_DEPTH_RGBA_BASE;
}
}
ensure(func_id <= FUNCTION::TEXTURE_SAMPLE_MAX_BASE_ENUM && func_id >= FUNCTION::TEXTURE_SAMPLE_BASE);
if (!(m_prog.texture_state.multisampled_textures & ref_mask)) [[ likely ]]
{
// Clamp type to 3 types (1d, 2d, cube+3d) and offset into sampling redirection table
const auto type_offset = (std::min(static_cast<int>(type), 2) + 1) * static_cast<int>(FUNCTION::TEXTURE_SAMPLE_BASE_ENUM_COUNT);
func_id = static_cast<FUNCTION>(static_cast<int>(func_id) + type_offset);
}
else
{
// Map to multisample op
ensure(type <= rsx::texture_dimension_extended::texture_dimension_2d);
properties.multisampled_sampler_mask |= ref_mask;
func_id = static_cast<FUNCTION>(static_cast<int>(func_id) - static_cast<int>(FUNCTION::TEXTURE_SAMPLE_BASE) + static_cast<int>(FUNCTION::TEXTURE_SAMPLE2DMS));
}
if (dst.exp_tex)
{
properties.has_exp_tex_op = true;
AddCode("_enable_texture_expand();");
}
// Shadow proj
switch (func_id)
{
case FUNCTION::TEXTURE_SAMPLE1D_SHADOW_PROJ:
case FUNCTION::TEXTURE_SAMPLE2D_SHADOW_PROJ:
case FUNCTION::TEXTURE_SAMPLE2DMS_SHADOW_PROJ:
case FUNCTION::TEXTURE_SAMPLE3D_SHADOW_PROJ:
properties.has_texShadowProj = true;
break;
default:
break;
}
SetDst(getFunction(func_id) + swz_mask);
if (dst.exp_tex)
{
// Cleanup
AddCode("_disable_texture_expand();");
}
};
switch (opcode)
{
case RSX_FP_OPCODE_DDX: SetDst(getFunction(FUNCTION::DFDX)); return true;
case RSX_FP_OPCODE_DDY: SetDst(getFunction(FUNCTION::DFDY)); return true;
case RSX_FP_OPCODE_NRM: SetDst("_builtin_normalize($0.xyz).xyzz", OPFLAGS::src_cast_f32); return true;
case RSX_FP_OPCODE_BEM: SetDst("$0.xyxy + $1.xxxx * $2.xzxz + $1.yyyy * $2.ywyw"); return true;
case RSX_FP_OPCODE_TEXBEM:
{
//Untested, should be x2d followed by TEX
AddX2d();
AddCode(Format("x2d = $0.xyxy + $1.xxxx * $2.xzxz + $1.yyyy * $2.ywyw;", true));
[[fallthrough]];
}
case RSX_FP_OPCODE_TEX:
{
AddTex();
insert_texture_fetch(FUNCTION::TEXTURE_SAMPLE_BASE);
return true;
}
case RSX_FP_OPCODE_TXPBEM:
{
// Untested, should be x2d followed by TXP
AddX2d();
AddCode(Format("x2d = $0.xyxy + $1.xxxx * $2.xzxz + $1.yyyy * $2.ywyw;", true));
[[fallthrough]];
}
case RSX_FP_OPCODE_TXP:
{
AddTex();
insert_texture_fetch(FUNCTION::TEXTURE_SAMPLE_PROJ_BASE);
return true;
}
case RSX_FP_OPCODE_TXD:
{
AddTex();
insert_texture_fetch(FUNCTION::TEXTURE_SAMPLE_GRAD_BASE);
return true;
}
case RSX_FP_OPCODE_TXB:
{
AddTex();
insert_texture_fetch(FUNCTION::TEXTURE_SAMPLE_BIAS_BASE);
return true;
}
case RSX_FP_OPCODE_TXL:
{
AddTex();
insert_texture_fetch(FUNCTION::TEXTURE_SAMPLE_LOD_BASE);
return true;
}
// Unpack operations. See https://www.khronos.org/registry/OpenGL/extensions/NV/NV_fragment_program.txt
// UP2 = UP2H (2 16-bit floats)
// UP16 = UP2US (2 unsigned 16-bit scalars)
// UP4 = UP4B (4 signed 8-bit scalars)
// UPB = UP4UB (4 unsigned 8-bit scalars)
// PK16/UP16 behavior confirmed by Saints Row: Gat out of Hell, ARGB8 -> X16Y16 conversion relies on this to render the wings
case RSX_FP_OPCODE_UP2: SetDst("unpackHalf2x16(floatBitsToUint($0.x)).xyxy"); return true;
case RSX_FP_OPCODE_UP4: SetDst("unpackSnorm4x8(floatBitsToUint($0.x))"); return true;
case RSX_FP_OPCODE_UP16: SetDst("unpackUnorm2x16(floatBitsToUint($0.x)).xyxy"); return true;
case RSX_FP_OPCODE_UPG:
// Same as UPB with gamma correction
properties.has_upg = true;
SetDst("_builtin_upg($0.x)");
return true;
case RSX_FP_OPCODE_UPB: SetDst("(unpackUnorm4x8(floatBitsToUint($0.x)))"); return true;
}
return false;
}
std::string FragmentProgramDecompiler::Decompile()
{
auto data = static_cast<be_t<u32>*>(m_prog.get_data());
m_size = 0;
m_location = 0;
m_loop_count = 0;
m_code_level = 1;
m_is_valid_ucode = true;
enum
{
FORCE_NONE,
FORCE_SCT,
FORCE_SCB,
};
int forced_unit = FORCE_NONE;
while (true)
{
for (auto found = std::find(m_end_offsets.begin(), m_end_offsets.end(), m_size);
found != m_end_offsets.end();
found = std::find(m_end_offsets.begin(), m_end_offsets.end(), m_size))
{
m_end_offsets.erase(found);
m_code_level--;
AddCode("}");
m_loop_count--;
}
for (auto found = std::find(m_else_offsets.begin(), m_else_offsets.end(), m_size);
found != m_else_offsets.end();
found = std::find(m_else_offsets.begin(), m_else_offsets.end(), m_size))
{
m_else_offsets.erase(found);
m_code_level--;
AddCode("}");
AddCode("else");
AddCode("{");
m_code_level++;
}
dst.HEX = GetData(data[0]);
src0.HEX = GetData(data[1]);
src1.HEX = GetData(data[2]);
src2.HEX = GetData(data[3]);
m_offset = 4 * sizeof(u32);
opflags = 0;
const u32 opcode = dst.opcode | (src1.opcode_is_branch << 6);
auto SIP = [&]()
{
switch (opcode)
{
case RSX_FP_OPCODE_BRK:
if (m_loop_count) AddFlowOp("break");
else rsx_log.error("BRK opcode found outside of a loop");
break;
case RSX_FP_OPCODE_CAL:
rsx_log.error("Unimplemented SIP instruction: CAL");
break;
case RSX_FP_OPCODE_FENCT:
AddCode("//FENCT");
forced_unit = FORCE_SCT;
break;
case RSX_FP_OPCODE_FENCB:
AddCode("//FENCB");
forced_unit = FORCE_SCB;
break;
case RSX_FP_OPCODE_IFE:
AddCode("if($cond)");
if (src2.end_offset != src1.else_offset)
m_else_offsets.push_back(src1.else_offset << 2);
m_end_offsets.push_back(src2.end_offset << 2);
AddCode("{");
m_code_level++;
break;
case RSX_FP_OPCODE_LOOP:
if (!src0.exec_if_eq && !src0.exec_if_gr && !src0.exec_if_lt)
{
AddCode(fmt::format("//$ifcond for(int i%u = %u; i%u < %u; i%u += %u) {} //-> %u //LOOP",
m_loop_count, src1.init_counter, m_loop_count, src1.end_counter, m_loop_count, src1.increment, src2.end_offset));
}
else
{
AddCode(fmt::format("$ifcond for(int i%u = %u; i%u < %u; i%u += %u) //LOOP",
m_loop_count, src1.init_counter, m_loop_count, src1.end_counter, m_loop_count, src1.increment));
m_loop_count++;
m_end_offsets.push_back(src2.end_offset << 2);
AddCode("{");
m_code_level++;
}
break;
case RSX_FP_OPCODE_REP:
if (!src0.exec_if_eq && !src0.exec_if_gr && !src0.exec_if_lt)
{
AddCode(fmt::format("//$ifcond for(int i%u = %u; i%u < %u; i%u += %u) {} //-> %u //REP",
m_loop_count, src1.init_counter, m_loop_count, src1.end_counter, m_loop_count, src1.increment, src2.end_offset));
}
else
{
AddCode(fmt::format("if($cond) for(int i%u = %u; i%u < %u; i%u += %u) //REP",
m_loop_count, src1.init_counter, m_loop_count, src1.end_counter, m_loop_count, src1.increment));
m_loop_count++;
m_end_offsets.push_back(src2.end_offset << 2);
AddCode("{");
m_code_level++;
}
break;
case RSX_FP_OPCODE_RET:
AddFlowOp("return");
break;
default:
return false;
}
return true;
};
switch (opcode)
{
case RSX_FP_OPCODE_NOP: break;
case RSX_FP_OPCODE_KIL:
properties.has_discard_op = true;
AddFlowOp("_kill()");
break;
default:
int prev_force_unit = forced_unit;
// Some instructions do not respect forced unit
// Tested with Tales of Vesperia
if (SIP()) break;
if (handle_tex_srb(opcode)) break;
// FENCT/FENCB do not actually reject instructions if they dont match the forced unit
// Looks like they are optimization hints and not hard-coded forced paths
if (handle_sct_scb(opcode)) break;
forced_unit = FORCE_NONE;
rsx_log.error("Unknown/illegal instruction: 0x%x (forced unit %d)", opcode, prev_force_unit);
break;
}
m_size += m_offset;
if (dst.end) break;
ensure(m_offset % sizeof(u32) == 0);
data += m_offset / sizeof(u32);
}
while (m_code_level > 1)
{
rsx_log.error("Hanging block found at end of shader. Malformed shader?");
m_code_level--;
AddCode("}");
}
// flush m_code_level
m_code_level = 1;
std::string m_shader = BuildCode();
main.clear();
// m_parr.params.clear();
return m_shader;
}
| 41,784
|
C++
|
.cpp
| 1,242
| 30.538647
| 175
| 0.65529
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,476
|
program_util.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Program/program_util.cpp
|
#include "stdafx.h"
#include "program_util.h"
namespace rsx
{
// Convert u16 to u32
static u32 duplicate_and_extend(u16 bits)
{
u32 x = bits;
x = (x | (x << 8)) & 0x00FF00FF;
x = (x | (x << 4)) & 0x0F0F0F0F;
x = (x | (x << 2)) & 0x33333333;
x = (x | (x << 1)) & 0x55555555;
return x | (x << 1);
}
void fragment_program_texture_config::masked_transfer(void* dst, const void* src, u16 mask)
{
// Try to optimize for the very common case (first 4 slots used)
switch (mask)
{
case 0:
return;
case 1:
std::memcpy(dst, src, sizeof(TIU_slot)); return;
case 3:
std::memcpy(dst, src, sizeof(TIU_slot) * 2); return;
case 7:
std::memcpy(dst, src, sizeof(TIU_slot) * 3); return;
case 15:
std::memcpy(dst, src, sizeof(TIU_slot) * 4); return;
default:
break;
};
const auto start = std::countr_zero(mask);
const auto end = 16 - std::countl_zero(mask);
const auto mem_offset = (start * sizeof(TIU_slot));
const auto mem_size = (end - start) * sizeof(TIU_slot);
std::memcpy(static_cast<u8*>(dst) + mem_offset, reinterpret_cast<const u8*>(src) + mem_offset, mem_size);
}
void fragment_program_texture_config::write_to(void* dst, u16 mask) const
{
masked_transfer(dst, slots_, mask);
}
void fragment_program_texture_config::load_from(const void* src, u16 mask)
{
masked_transfer(slots_, src, mask);
}
void fragment_program_texture_state::clear(u32 index)
{
const u16 clear_mask = ~(static_cast<u16>(1 << index));
redirected_textures &= clear_mask;
shadow_textures &= clear_mask;
multisampled_textures &= clear_mask;
}
void fragment_program_texture_state::import(const fragment_program_texture_state& other, u16 mask)
{
redirected_textures = other.redirected_textures & mask;
shadow_textures = other.shadow_textures & mask;
multisampled_textures = other.multisampled_textures & mask;
texture_dimensions = other.texture_dimensions & duplicate_and_extend(mask);
}
void fragment_program_texture_state::set_dimension(texture_dimension_extended type, u32 index)
{
const auto offset = (index * 2);
const auto mask = 3 << offset;
texture_dimensions &= ~mask;
texture_dimensions |= static_cast<u32>(type) << offset;
}
bool fragment_program_texture_state::operator == (const fragment_program_texture_state& other) const
{
return texture_dimensions == other.texture_dimensions &&
redirected_textures == other.redirected_textures &&
shadow_textures == other.shadow_textures &&
multisampled_textures == other.multisampled_textures;
}
void vertex_program_texture_state::clear(u32 index)
{
const u16 clear_mask = ~(static_cast<u16>(1 << index));
multisampled_textures &= clear_mask;
}
void vertex_program_texture_state::import(const vertex_program_texture_state& other, u16 mask)
{
multisampled_textures = other.multisampled_textures & mask;
texture_dimensions = other.texture_dimensions & duplicate_and_extend(mask);
}
void vertex_program_texture_state::set_dimension(texture_dimension_extended type, u32 index)
{
const auto offset = (index * 2);
const auto mask = 3 << offset;
texture_dimensions &= ~mask;
texture_dimensions |= static_cast<u32>(type) << offset;
}
bool vertex_program_texture_state::operator == (const vertex_program_texture_state& other) const
{
return texture_dimensions == other.texture_dimensions &&
multisampled_textures == other.multisampled_textures;
}
int VertexProgramBase::TranslateConstantsRange(int first_index, int count) const
{
// The constant ids should be sorted, so just find the first one and check for continuity
int index = -1;
int next = first_index;
int last = first_index + count - 1;
// Early rejection test
if (constant_ids.empty() || first_index > constant_ids.back() || last < first_index)
{
return -1;
}
for (size_t i = 0; i < constant_ids.size(); ++i)
{
if (constant_ids[i] > first_index && index < 0)
{
// No chance of a match
return -1;
}
if (constant_ids[i] == next)
{
// Index matched
if (index < 0)
{
index = static_cast<int>(i);
}
if (last == next++)
{
return index;
}
continue;
}
if (index >= 0)
{
// Previously matched but no more
return -1;
}
}
// OOB or partial match
return -1;
}
}
| 4,464
|
C++
|
.cpp
| 137
| 28.248175
| 108
| 0.667367
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,477
|
VertexProgramDecompiler.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Program/VertexProgramDecompiler.cpp
|
#include "stdafx.h"
#include "Emu/System.h"
#include "VertexProgramDecompiler.h"
#include <sstream>
std::string VertexProgramDecompiler::GetMask(bool is_sca) const
{
std::string ret;
if (is_sca)
{
if (d3.sca_writemask_x) ret += "x";
if (d3.sca_writemask_y) ret += "y";
if (d3.sca_writemask_z) ret += "z";
if (d3.sca_writemask_w) ret += "w";
}
else
{
if (d3.vec_writemask_x) ret += "x";
if (d3.vec_writemask_y) ret += "y";
if (d3.vec_writemask_z) ret += "z";
if (d3.vec_writemask_w) ret += "w";
}
return ret.empty() || ret == "xyzw" ? "" : ("." + ret);
}
std::string VertexProgramDecompiler::GetVecMask()
{
return GetMask(false);
}
std::string VertexProgramDecompiler::GetScaMask()
{
return GetMask(true);
}
std::string VertexProgramDecompiler::GetDST(bool is_sca)
{
std::string ret;
const std::string mask = GetMask(is_sca);
// ARL writes to special integer registers
const bool is_address_reg = !is_sca && (d1.vec_opcode == RSX_VEC_OPCODE_ARL);
const auto tmp_index = is_sca ? d3.sca_dst_tmp : d0.dst_tmp;
const bool is_result = is_sca ? !d0.vec_result : d0.vec_result;
if (is_result)
{
// Write to output result register
// vec_result can mask out the VEC op from writing to o[] if SCA is writing to o[]
if (d3.dst != 0x1f)
{
if (d3.dst > 15)
{
rsx_log.error("dst index out of range: %u", d3.dst);
}
if (is_address_reg)
{
rsx_log.error("ARL opcode writing to output register!");
}
const auto reg_type = getFloatTypeName(4);
const auto reg_name = std::string("dst_reg") + std::to_string(d3.dst);
const auto default_value = reg_type + "(0.0f, 0.0f, 0.0f, 1.0f)";
ret += m_parr.AddParam(PF_PARAM_OUT, reg_type, reg_name, default_value) + mask;
}
}
if (tmp_index != 0x3f)
{
if (!ret.empty())
{
// Double assignment. Only possible for vector ops
ensure(!is_sca);
ret += " = ";
}
const std::string reg_type = (is_address_reg) ? getIntTypeName(4) : getFloatTypeName(4);
const std::string reg_sel = (is_address_reg) ? "a" : "r";
ret += m_parr.AddParam(PF_PARAM_NONE, reg_type, reg_sel + std::to_string(tmp_index), reg_type + "(0.)") + mask;
}
else if (!is_result)
{
// Not writing to result register, but not writing to a tmp register either
// Write to CC instead (Far Cry 2)
ret = AddCondReg() + mask;
}
return ret;
}
std::string VertexProgramDecompiler::GetSRC(const u32 n)
{
ensure(n < 3);
static const std::string reg_table[] =
{
"in_pos", "in_weight", "in_normal",
"in_diff_color", "in_spec_color",
"in_fog",
"in_point_size", "in_7",
"in_tc0", "in_tc1", "in_tc2", "in_tc3",
"in_tc4", "in_tc5", "in_tc6", "in_tc7"
};
std::string ret;
const auto float4 = getFloatTypeName(4);
switch (src[n].reg_type)
{
case RSX_VP_REGISTER_TYPE_TEMP:
ret += m_parr.AddParam(PF_PARAM_NONE, float4, "r" + std::to_string(src[n].tmp_src), float4 + "(0.)");
break;
case RSX_VP_REGISTER_TYPE_INPUT:
if (d1.input_src < std::size(reg_table))
{
ret += m_parr.AddParam(PF_PARAM_IN, float4, reg_table[d1.input_src], d1.input_src);
}
else
{
rsx_log.error("Bad input src num: %d", u32{ d1.input_src });
ret += m_parr.AddParam(PF_PARAM_IN, float4, "in_unk", d1.input_src);
}
break;
case RSX_VP_REGISTER_TYPE_CONSTANT:
m_parr.AddParam(PF_PARAM_UNIFORM, float4, std::string("vc[468]"));
properties.has_indexed_constants |= !!d3.index_const;
m_constant_ids.insert(static_cast<u16>(d1.const_src));
ret += std::string("vc[") + std::to_string(d1.const_src) + (d3.index_const ? " + " + AddAddrReg() : "") + "]";
break;
default:
rsx_log.fatal("Bad src%u reg type: %d", n, u32{ src[n].reg_type });
break;
}
static const std::string f = "xyzw";
std::string swizzle;
swizzle += f[src[n].swz_x];
swizzle += f[src[n].swz_y];
swizzle += f[src[n].swz_z];
swizzle += f[src[n].swz_w];
if (swizzle != f) ret += '.' + swizzle;
bool abs = false;
switch (n)
{
default:
case 0: abs = d0.src0_abs; break;
case 1: abs = d0.src1_abs; break;
case 2: abs = d0.src2_abs; break;
}
if (abs) ret = "abs(" + ret + ")";
if (src[n].neg) ret = "-" + ret;
return ret;
}
void VertexProgramDecompiler::SetDST(bool is_sca, std::string value)
{
if (d0.cond == 0) return;
if (is_sca)
{
value = getFloatTypeName(4) + "(" + value + ")";
}
std::string mask = GetMask(is_sca);
value += mask;
if (d0.staturate)
{
value = "clamp(" + value + ", 0.0, 1.0)";
}
std::string dest;
if (const auto tmp_reg = is_sca? d3.sca_dst_tmp: d0.dst_tmp;
d3.dst != 0x1f || tmp_reg != 0x3f)
{
dest = GetDST(is_sca);
}
else if (d0.cond_update_enable_0 || d0.cond_update_enable_1)
{
dest = AddCondReg() + mask;
}
else
{
// Broken instruction?
rsx_log.error("Operation has no output defined! (0x%x, 0x%x, 0x%x, 0x%x)", d0.HEX, d1.HEX, d2.HEX, d3.HEX);
dest = " //";
}
AddCodeCond(Format(dest), value);
}
std::string VertexProgramDecompiler::GetTex()
{
std::string sampler;
switch (m_prog.get_texture_dimension(d2.tex_num))
{
case rsx::texture_dimension_extended::texture_dimension_1d:
sampler = "sampler1D";
break;
case rsx::texture_dimension_extended::texture_dimension_2d:
sampler = "sampler2D";
break;
case rsx::texture_dimension_extended::texture_dimension_3d:
sampler = "sampler3D";
break;
case rsx::texture_dimension_extended::texture_dimension_cubemap:
sampler = "samplerCube";
break;
}
return m_parr.AddParam(PF_PARAM_UNIFORM, sampler, std::string("vtex") + std::to_string(d2.tex_num));
}
std::string VertexProgramDecompiler::Format(const std::string& code)
{
const std::pair<std::string_view, std::function<std::string()>> repl_list[] =
{
{ "$$", []() -> std::string { return "$"; } },
{ "$0", std::bind(std::mem_fn(&VertexProgramDecompiler::GetSRC), this, 0) },
{ "$1", std::bind(std::mem_fn(&VertexProgramDecompiler::GetSRC), this, 1) },
{ "$2", std::bind(std::mem_fn(&VertexProgramDecompiler::GetSRC), this, 2) },
{ "$s", std::bind(std::mem_fn(&VertexProgramDecompiler::GetSRC), this, 2) },
{ "$a", std::bind(std::mem_fn(&VertexProgramDecompiler::AddAddrReg), this) },
{ "$t", [this]() -> std::string { return "vtex" + std::to_string(d2.tex_num); } },
{ "$ifcond ", [this]() -> std::string
{
const std::string& cond = GetCond();
if (cond == "true") return "";
return "if(" + cond + ") ";
}
},
{ "$cond", std::bind(std::mem_fn(&VertexProgramDecompiler::GetCond), this) },
{ "$ifbcond", std::bind(std::mem_fn(&VertexProgramDecompiler::GetOptionalBranchCond), this) },
{ "$Ty", [this](){ return getFloatTypeName(4); } }
};
return fmt::replace_all(code, repl_list);
}
std::string VertexProgramDecompiler::GetRawCond()
{
static const COMPARE cond_string_table[(lt | gt | eq) + 1] =
{
COMPARE::SLT, // "error"
COMPARE::SLT,
COMPARE::SEQ,
COMPARE::SLE,
COMPARE::SGT,
COMPARE::SNE,
COMPARE::SGE,
};
static const char f[4] = { 'x', 'y', 'z', 'w' };
std::string swizzle;
swizzle += f[d0.mask_x];
swizzle += f[d0.mask_y];
swizzle += f[d0.mask_z];
swizzle += f[d0.mask_w];
swizzle = swizzle == "xyzw" ? "" : "." + swizzle;
return compareFunction(cond_string_table[d0.cond], AddCondReg() + swizzle, getFloatTypeName(4) + "(0.)" + swizzle);
}
std::string VertexProgramDecompiler::GetCond()
{
if (d0.cond == 0) return "false";
if (d0.cond == (lt | gt | eq)) return "true";
return "any(" + GetRawCond() + ")";
}
std::string VertexProgramDecompiler::GetOptionalBranchCond() const
{
std::string cond_operator = d3.brb_cond_true ? " != " : " == ";
std::string cond = "(transform_branch_bits & (1u << " + std::to_string(d3.branch_index) + "))" + cond_operator + "0";
return "if (" + cond + ")";
}
void VertexProgramDecompiler::AddCodeCond(const std::string& lhs, const std::string& rhs)
{
enum
{
lt = 0x1,
eq = 0x2,
gt = 0x4,
};
if (!d0.cond_test_enable || d0.cond == (lt | gt | eq))
{
AddCode(lhs + " = " + rhs + ";");
return;
}
if (d0.cond == 0)
{
AddCode("//" + lhs + " = " + rhs + ";");
return;
}
// NOTE: x = _select(x, y, cond) is equivalent to x = cond? y : x;
const auto dst_var = ShaderVariable(lhs);
const auto raw_cond = dst_var.add_mask(GetRawCond());
const auto cond = dst_var.match_size(raw_cond);
AddCode(lhs + " = _select(" + lhs + ", " + rhs + ", " + cond + ");");
}
std::string VertexProgramDecompiler::AddAddrReg()
{
static const char f[] = { 'x', 'y', 'z', 'w' };
const auto mask = std::string(".") + f[d0.addr_swz];
return m_parr.AddParam(PF_PARAM_NONE, getIntTypeName(4), "a" + std::to_string(d0.addr_reg_sel_1), getIntTypeName(4) + "(0)") + mask;
}
std::string VertexProgramDecompiler::AddCondReg()
{
return m_parr.AddParam(PF_PARAM_NONE, getFloatTypeName(4), "cc" + std::to_string(d0.cond_reg_sel_1), getFloatTypeName(4) + "(0.)");
}
u32 VertexProgramDecompiler::GetAddr() const
{
return (d0.iaddrh2 << 9) | (d2.iaddrh << 3) | d3.iaddrl;
}
void VertexProgramDecompiler::AddCode(const std::string& code)
{
m_body.push_back(Format(code) + ";");
m_cur_instr->body.push_back(Format(code));
}
void VertexProgramDecompiler::SetDSTVec(const std::string& code)
{
SetDST(false, code);
}
void VertexProgramDecompiler::SetDSTSca(const std::string& code)
{
SetDST(true, code);
}
std::string VertexProgramDecompiler::NotZeroPositive(const std::string& code)
{
return "max(" + code + ", 0.0000000001)";
}
std::string VertexProgramDecompiler::BuildCode()
{
std::string main_body;
for (uint i = 0, lvl = 1; i < m_instr_count; i++)
{
lvl -= m_instructions[i].close_scopes;
if (lvl < 1) lvl = 1;
for (int j = 0; j < m_instructions[i].put_close_scopes; ++j)
{
--lvl;
if (lvl < 1) lvl = 1;
main_body.append(lvl, '\t') += "}\n";
}
for (int j = 0; j < m_instructions[i].do_count; ++j)
{
main_body.append(lvl, '\t') += "do\n";
main_body.append(lvl, '\t') += "{\n";
lvl++;
}
for (const auto& instruction_body : m_instructions[i].body)
{
main_body.append(lvl, '\t') += instruction_body + "\n";
}
lvl += m_instructions[i].open_scopes;
}
if (const auto float4_type = getFloatTypeName(4); !m_parr.HasParam(PF_PARAM_OUT, float4_type, "dst_reg0"))
{
rsx_log.warning("Vertex program has no POS output, shader will be NOPed");
main_body = "/*" + main_body + "*/";
// Initialize vertex output register to all 0, GPU hw does not always clear position register
m_parr.AddParam(PF_PARAM_OUT, float4_type, "dst_reg0", float4_type + "(0., 0., 0., 1.)");
}
if (!properties.has_indexed_constants && !m_constant_ids.empty())
{
// Relocate transform constants
std::vector<std::pair<std::string, std::string>> reloc_table;
reloc_table.reserve(m_constant_ids.size());
// Build the string lookup table
int offset = 0;
for (const auto& index : m_constant_ids)
{
const auto i = offset++;
if (i == index) continue; // Replace with self
reloc_table.emplace_back(fmt::format("vc[%d]", index), fmt::format("vc[%d]", i));
}
// One-time patch
main_body = fmt::replace_all(main_body, reloc_table);
// Rename the array type
auto type_list = ensure(m_parr.SearchParam(PF_PARAM_UNIFORM, getFloatTypeName(4)));
const auto item = ParamItem(fmt::format("vc[%llu]", m_constant_ids.size()), -1);
type_list->ReplaceOrInsert("vc[468]", item);
}
std::stringstream OS;
insertHeader(OS);
insertInputs(OS, m_parr.params[PF_PARAM_IN]);
OS << std::endl;
insertOutputs(OS, m_parr.params[PF_PARAM_NONE]);
OS << std::endl;
insertConstants(OS, m_parr.params[PF_PARAM_UNIFORM]);
OS << std::endl;
insertMainStart(OS);
OS << main_body.c_str() << std::endl;
insertMainEnd(OS);
return OS.str();
}
VertexProgramDecompiler::VertexProgramDecompiler(const RSXVertexProgram& prog) :
m_prog(prog)
{
}
std::string VertexProgramDecompiler::Decompile()
{
const auto& data = m_prog.data;
m_instr_count = data.size() / 4;
bool has_BRA = false;
bool program_end = false;
u32 i = 1;
u32 last_label_addr = 0;
for (auto& param : m_parr.params)
{
param.clear();
}
for (auto& instruction : m_instructions)
{
instruction.reset();
}
if (!m_prog.jump_table.empty())
{
last_label_addr = *m_prog.jump_table.rbegin();
}
auto find_jump_lvl = [this](u32 address) -> u32
{
u32 jump = 1;
for (auto pos : m_prog.jump_table)
{
if (address == pos)
return jump;
++jump;
}
return -1;
};
auto do_function_call = [this, &i](const std::string& condition)
{
// Call function
// NOTE: Addresses are assumed to have been patched
m_call_stack.push(i+1);
AddCode(condition);
AddCode("{");
m_cur_instr->open_scopes++;
i = GetAddr();
};
auto do_function_return = [this, &i]()
{
if (!m_call_stack.empty())
{
//TODO: Conditional returns
i = m_call_stack.top();
m_call_stack.pop();
m_cur_instr->close_scopes++;
AddCode("}");
}
else
{
AddCode("$ifcond return");
}
};
auto do_program_exit = [this, do_function_return, &i](bool abort)
{
if (abort)
{
AddCode("//ABORT");
}
while (!m_call_stack.empty())
{
rsx_log.error("vertex program end in subroutine call!");
do_function_return();
}
if ((i + 1) < m_instr_count)
{
//Forcefully exit
AddCode("return;");
}
};
if (has_BRA || !m_prog.jump_table.empty())
{
m_cur_instr = &m_instructions[0];
u32 jump_position = 0;
if (m_prog.entry != m_prog.base_address)
{
jump_position = find_jump_lvl(m_prog.entry - m_prog.base_address);
ensure(jump_position != umax);
}
AddCode(fmt::format("int jump_position = %u;", jump_position));
AddCode("while (true)");
AddCode("{");
m_cur_instr->open_scopes++;
AddCode("if (jump_position <= 0)");
AddCode("{");
m_cur_instr->open_scopes++;
}
for (i = 0; i < m_instr_count; ++i)
{
if (!m_prog.instruction_mask[i])
{
// Dead code, skip
continue;
}
m_cur_instr = &m_instructions[i];
d0.HEX = data[i * 4 + 0];
d1.HEX = data[i * 4 + 1];
d2.HEX = data[i * 4 + 2];
d3.HEX = data[i * 4 + 3];
src[0].src0l = d2.src0l;
src[0].src0h = d1.src0h;
src[1].src1 = d2.src1;
src[2].src2l = d3.src2l;
src[2].src2h = d2.src2h;
if (m_call_stack.empty() && i)
{
//TODO: Subroutines can also have arbitrary jumps!
u32 jump_position = find_jump_lvl(i);
if (has_BRA || jump_position != umax)
{
m_cur_instr->close_scopes++;
AddCode("}");
AddCode("");
AddCode(fmt::format("if (jump_position <= %u)", jump_position));
AddCode("{");
m_cur_instr->open_scopes++;
}
}
if (!src[0].reg_type || !src[1].reg_type || !src[2].reg_type)
{
AddCode("//Src check failed. Aborting");
program_end = true;
d1.vec_opcode = d1.sca_opcode = 0;
}
switch (d1.vec_opcode)
{
case RSX_VEC_OPCODE_NOP: break;
case RSX_VEC_OPCODE_MOV: SetDSTVec("$0"); break;
case RSX_VEC_OPCODE_MUL: SetDSTVec("($0 * $1)"); break;
case RSX_VEC_OPCODE_ADD: SetDSTVec("($0 + $2)"); break;
case RSX_VEC_OPCODE_MAD: SetDSTVec("fma($0, $1, $2)"); break;
case RSX_VEC_OPCODE_DP3: SetDSTVec(getFunction(FUNCTION::DP3)); break;
case RSX_VEC_OPCODE_DPH: SetDSTVec(getFunction(FUNCTION::DPH)); break;
case RSX_VEC_OPCODE_DP4: SetDSTVec(getFunction(FUNCTION::DP4)); break;
case RSX_VEC_OPCODE_DST: SetDSTVec("vec4(1.0, $0.y * $1.y, $0.z, $1.w)"); break;
case RSX_VEC_OPCODE_MIN: SetDSTVec("min($0, $1)"); break;
case RSX_VEC_OPCODE_MAX: SetDSTVec("max($0, $1)"); break;
case RSX_VEC_OPCODE_SLT: SetDSTVec(getFloatTypeName(4) + "(" + compareFunction(COMPARE::SLT, "$0", "$1") + ")"); break;
case RSX_VEC_OPCODE_SGE: SetDSTVec(getFloatTypeName(4) + "(" + compareFunction(COMPARE::SGE, "$0", "$1") + ")"); break;
case RSX_VEC_OPCODE_ARL: SetDSTVec(getIntTypeName(4) + "($0)"); break;
case RSX_VEC_OPCODE_FRC: SetDSTVec(getFunction(FUNCTION::FRACT)); break;
case RSX_VEC_OPCODE_FLR: SetDSTVec("floor($0)"); break;
case RSX_VEC_OPCODE_SEQ: SetDSTVec(getFloatTypeName(4) + "(" + compareFunction(COMPARE::SEQ, "$0", "$1") + ")"); break;
case RSX_VEC_OPCODE_SFL: SetDSTVec(getFunction(FUNCTION::SFL)); break;
case RSX_VEC_OPCODE_SGT: SetDSTVec(getFloatTypeName(4) + "(" + compareFunction(COMPARE::SGT, "$0", "$1") + ")"); break;
case RSX_VEC_OPCODE_SLE: SetDSTVec(getFloatTypeName(4) + "(" + compareFunction(COMPARE::SLE, "$0", "$1") + ")"); break;
case RSX_VEC_OPCODE_SNE: SetDSTVec(getFloatTypeName(4) + "(" + compareFunction(COMPARE::SNE, "$0", "$1") + ")"); break;
case RSX_VEC_OPCODE_STR: SetDSTVec(getFunction(FUNCTION::STR)); break;
case RSX_VEC_OPCODE_SSG: SetDSTVec("sign($0)"); break;
case RSX_VEC_OPCODE_TXL:
{
GetTex();
const bool is_multisampled = m_prog.texture_state.multisampled_textures & (1 << d2.tex_num);
switch (m_prog.get_texture_dimension(d2.tex_num))
{
case rsx::texture_dimension_extended::texture_dimension_1d:
SetDSTVec(is_multisampled ? getFunction(FUNCTION::VERTEX_TEXTURE_FETCH2DMS) : getFunction(FUNCTION::VERTEX_TEXTURE_FETCH1D));
break;
case rsx::texture_dimension_extended::texture_dimension_2d:
SetDSTVec(getFunction(is_multisampled ? FUNCTION::VERTEX_TEXTURE_FETCH2DMS : FUNCTION::VERTEX_TEXTURE_FETCH2D));
break;
case rsx::texture_dimension_extended::texture_dimension_3d:
SetDSTVec(getFunction(FUNCTION::VERTEX_TEXTURE_FETCH3D));
break;
case rsx::texture_dimension_extended::texture_dimension_cubemap:
SetDSTVec(getFunction(FUNCTION::VERTEX_TEXTURE_FETCHCUBE));
break;
}
break;
}
default:
AddCode(fmt::format("//Unknown vp opcode 0x%x", u32{ d1.vec_opcode }));
rsx_log.error("Unknown vp opcode 0x%x", u32{ d1.vec_opcode });
program_end = true;
break;
}
//NOTE: Branch instructions have to be decoded last in case there was a dual-issued instruction!
switch (d1.sca_opcode)
{
case RSX_SCA_OPCODE_NOP: break;
case RSX_SCA_OPCODE_MOV: SetDSTSca("$s"); break;
case RSX_SCA_OPCODE_RCP: SetDSTSca("(1.0 / $s)"); break;
case RSX_SCA_OPCODE_RCC: SetDSTSca("clamp(1.0 / $s, 5.42101e-20, 1.884467e19)"); break;
case RSX_SCA_OPCODE_RSQ: SetDSTSca("1. / sqrt(" + NotZeroPositive("$s.x") +").xxxx"); break;
case RSX_SCA_OPCODE_EXP: SetDSTSca("exp($s)"); break;
case RSX_SCA_OPCODE_LOG: SetDSTSca("log($s)"); break;
case RSX_SCA_OPCODE_LIT:
SetDSTSca("lit_legacy($s)");
properties.has_lit_op = true;
break;
case RSX_SCA_OPCODE_BRA:
{
if (m_call_stack.empty())
{
AddCode("$ifcond //BRA");
AddCode("{");
m_cur_instr->open_scopes++;
AddCode("jump_position = $a;");
AddCode("continue;");
m_cur_instr->close_scopes++;
AddCode("}");
}
else
{
//TODO
rsx_log.error("BRA opcode found in subroutine!");
}
}
break;
case RSX_SCA_OPCODE_BRI: // works differently (BRI o[1].x(TR) L0;)
{
if (m_call_stack.empty())
{
u32 jump_position = find_jump_lvl(GetAddr());
AddCode("$ifcond //BRI");
AddCode("{");
m_cur_instr->open_scopes++;
AddCode(fmt::format("jump_position = %u;", jump_position));
AddCode("continue;");
m_cur_instr->close_scopes++;
AddCode("}");
}
else
{
//TODO
rsx_log.error("BRI opcode found in subroutine!");
}
}
break;
case RSX_SCA_OPCODE_CAL:
// works same as BRI
AddCode("//CAL");
do_function_call("$ifcond");
break;
case RSX_SCA_OPCODE_CLI:
// works same as BRI
rsx_log.error("Unimplemented VP opcode CLI");
AddCode("//CLI");
do_function_call("$ifcond");
break;
case RSX_SCA_OPCODE_RET:
// works like BRI but shorter (RET o[1].x(TR);)
do_function_return();
break;
case RSX_SCA_OPCODE_LG2: SetDSTSca("log2(" + NotZeroPositive("$s") + ")"); break;
case RSX_SCA_OPCODE_EX2: SetDSTSca("exp2($s)"); break;
case RSX_SCA_OPCODE_SIN: SetDSTSca("sin($s)"); break;
case RSX_SCA_OPCODE_COS: SetDSTSca("cos($s)"); break;
case RSX_SCA_OPCODE_BRB:
// works differently (BRB o[1].x !b0, L0;)
{
if (m_call_stack.empty())
{
u32 jump_position = find_jump_lvl(GetAddr());
AddCode("$ifbcond //BRB");
AddCode("{");
m_cur_instr->open_scopes++;
AddCode(fmt::format("jump_position = %u;", jump_position));
AddCode("continue;");
m_cur_instr->close_scopes++;
AddCode("}");
AddCode("");
}
else
{
//TODO
rsx_log.error("BRA opcode found in subroutine!");
}
break;
}
case RSX_SCA_OPCODE_CLB:
// works same as BRB
AddCode("//CLB");
do_function_call("$ifbcond");
break;
case RSX_SCA_OPCODE_PSH:
// works differently (PSH o[1].x A0;)
rsx_log.error("Unimplemented sca_opcode PSH");
break;
case RSX_SCA_OPCODE_POP:
// works differently (POP o[1].x;)
rsx_log.error("Unimplemented sca_opcode POP");
break;
default:
AddCode(fmt::format("//Unknown vp sca_opcode 0x%x", u32{ d1.sca_opcode }));
rsx_log.error("Unknown vp sca_opcode 0x%x", u32{ d1.sca_opcode });
program_end = true;
break;
}
if (program_end || !!d3.end)
{
do_program_exit(!d3.end);
if (i >= last_label_addr)
{
if ((i + 1) < m_instr_count)
{
// In rare cases, this might be harmless (large coalesced program blocks controlled via branches aka ubershaders)
rsx_log.error("Vertex program block aborts prematurely. Expect glitches");
}
break;
}
}
}
if (has_BRA || !m_prog.jump_table.empty())
{
m_cur_instr = &m_instructions[m_instr_count - 1];
m_cur_instr->close_scopes++;
AddCode("}");
AddCode("break;");
m_cur_instr->close_scopes++;
AddCode("}");
}
std::string result = BuildCode();
m_body.clear();
return result;
}
| 21,428
|
C++
|
.cpp
| 689
| 28.079826
| 133
| 0.646563
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,478
|
CgBinaryFragmentProgram.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Program/CgBinaryFragmentProgram.cpp
|
#include "stdafx.h"
#include "CgBinaryProgram.h"
#include "RSXFragmentProgram.h"
#include <algorithm>
void CgBinaryDisasm::AddCodeAsm(const std::string& code)
{
ensure((m_opcode < 70));
std::string op_name;
if (dst.dest_reg == 63)
{
m_dst_reg_name = fmt::format("RC%s, ", GetMask());
op_name = rsx_fp_op_names[m_opcode] + "XC";
}
else
{
m_dst_reg_name = fmt::format("%s%d%s, ", dst.fp16 ? "H" : "R", dst.dest_reg, GetMask());
op_name = rsx_fp_op_names[m_opcode] + std::string(dst.fp16 ? "H" : "R");
}
switch (m_opcode)
{
case RSX_FP_OPCODE_BRK:
case RSX_FP_OPCODE_CAL:
case RSX_FP_OPCODE_FENCT:
case RSX_FP_OPCODE_FENCB:
case RSX_FP_OPCODE_IFE:
case RSX_FP_OPCODE_KIL:
case RSX_FP_OPCODE_LOOP:
case RSX_FP_OPCODE_NOP:
case RSX_FP_OPCODE_REP:
case RSX_FP_OPCODE_RET:
m_dst_reg_name.clear();
op_name = rsx_fp_op_names[m_opcode] + std::string(dst.fp16 ? "H" : "R");
break;
default: break;
}
m_arb_shader += (op_name + " " + m_dst_reg_name + FormatDisAsm(code) + ";" + "\n");
}
std::string CgBinaryDisasm::GetMask() const
{
std::string ret;
ret.reserve(5);
static constexpr std::string_view dst_mask = "xyzw";
ret += '.';
if (dst.mask_x) ret += dst_mask[0];
if (dst.mask_y) ret += dst_mask[1];
if (dst.mask_z) ret += dst_mask[2];
if (dst.mask_w) ret += dst_mask[3];
return ret == "."sv || ret == ".xyzw"sv ? "" : (ret);
}
std::string CgBinaryDisasm::AddRegDisAsm(u32 index, int fp16) const
{
return (fp16 ? 'H' : 'R') + std::to_string(index);
}
std::string CgBinaryDisasm::AddConstDisAsm()
{
u32* data = reinterpret_cast<u32*>(&m_buffer[m_offset + m_size + 4 * sizeof(u32)]);
m_step = 2 * 4 * sizeof(u32);
const u32 x = GetData(data[0]);
const u32 y = GetData(data[1]);
const u32 z = GetData(data[2]);
const u32 w = GetData(data[3]);
return fmt::format("{0x%08x(%g), 0x%08x(%g), 0x%08x(%g), 0x%08x(%g)}", x, std::bit_cast<f32>(x), y, std::bit_cast<f32>(y), z, std::bit_cast<f32>(z), w, std::bit_cast<f32>(w));
}
std::string CgBinaryDisasm::AddTexDisAsm() const
{
return (std::string("TEX") + std::to_string(dst.tex_num));
}
std::string CgBinaryDisasm::GetCondDisAsm() const
{
static constexpr std::string_view f = "xyzw";
std::string swizzle, cond;
swizzle.reserve(5);
swizzle += '.';
swizzle += f[src0.cond_swizzle_x];
swizzle += f[src0.cond_swizzle_y];
swizzle += f[src0.cond_swizzle_z];
swizzle += f[src0.cond_swizzle_w];
if (swizzle == ".xxxx"sv) swizzle = ".x";
else if (swizzle == ".yyyy"sv) swizzle = ".y";
else if (swizzle == ".zzzz"sv) swizzle = ".z";
else if (swizzle == ".wwww"sv) swizzle = ".w";
if (swizzle == ".xyzw"sv)
{
swizzle.clear();
}
if (src0.exec_if_gr && src0.exec_if_eq)
{
cond = "GE";
}
else if (src0.exec_if_lt && src0.exec_if_eq)
{
cond = "LE";
}
else if (src0.exec_if_gr && src0.exec_if_lt)
{
cond = "NE";
}
else if (src0.exec_if_gr)
{
cond = "GT";
}
else if (src0.exec_if_lt)
{
cond = "LT";
}
else if (src0.exec_if_eq)
{
cond = "FL";
}
else
{
cond = "TR";
}
return cond + swizzle;
}
std::string CgBinaryDisasm::FormatDisAsm(const std::string& code)
{
const std::pair<std::string_view, std::function<std::string()>> repl_list[] =
{
{ "$$", []() -> std::string { return "$"; } },
{ "$0", [this]{ return GetSrcDisAsm<SRC0>(src0); } },
{ "$1", [this]{ return GetSrcDisAsm<SRC1>(src1); } },
{ "$2", [this]{ return GetSrcDisAsm<SRC2>(src2); } },
{ "$t", [this]{ return AddTexDisAsm(); } },
{ "$m", [this]{ return GetMask(); } },
{ "$cond", [this]{ return GetCondDisAsm(); } },
{ "$c", [this]{ return AddConstDisAsm(); } },
};
return fmt::replace_all(code, repl_list);
}
template<typename T> std::string CgBinaryDisasm::GetSrcDisAsm(T src)
{
std::string ret;
switch (src.reg_type)
{
case 0: //tmp
ret += AddRegDisAsm(src.tmp_reg_index, src.fp16);
break;
case 1: //input
{
static const std::string reg_table[] =
{
"WPOS", "COL0", "COL1", "FOGC", "TEX0",
"TEX1", "TEX2", "TEX3", "TEX4", "TEX5",
"TEX6", "TEX7", "TEX8", "TEX9", "SSA"
};
switch (dst.src_attr_reg_num)
{
case 0x00: ret += reg_table[0]; break;
default:
if (dst.src_attr_reg_num < std::size(reg_table))
{
const std::string perspective_correction = src2.perspective_corr ? "g" : "f";
const std::string input_attr_reg = reg_table[dst.src_attr_reg_num];
fmt::append(ret, "%s[%s]", perspective_correction, input_attr_reg);
}
else
{
rsx_log.error("Bad src reg num: %d", u32{ dst.src_attr_reg_num });
}
break;
}
break;
}
case 2: //const
ret += AddConstDisAsm();
break;
default:
rsx_log.error("Bad src type %d", u32{ src.reg_type });
break;
}
static constexpr std::string_view f = "xyzw";
std::string swizzle;
swizzle.reserve(5);
swizzle += '.';
swizzle += f[src.swizzle_x];
swizzle += f[src.swizzle_y];
swizzle += f[src.swizzle_z];
swizzle += f[src.swizzle_w];
if (swizzle == ".xxxx"sv) swizzle = ".x";
else if (swizzle == ".yyyy"sv) swizzle = ".y";
else if (swizzle == ".zzzz"sv) swizzle = ".z";
else if (swizzle == ".wwww"sv) swizzle = ".w";
if (swizzle != ".xyzw"sv)
{
ret += swizzle;
}
if (src.neg) ret = "-" + ret;
if (src.abs) ret = "|" + ret + "|";
return ret;
}
void CgBinaryDisasm::TaskFP()
{
m_size = 0;
u32* data = reinterpret_cast<u32*>(&m_buffer[m_offset]);
ensure((m_buffer_size - m_offset) % sizeof(u32) == 0);
enum
{
FORCE_NONE,
FORCE_SCT,
FORCE_SCB
};
int forced_unit = FORCE_NONE;
while (true)
{
for (auto found = std::find(m_end_offsets.begin(), m_end_offsets.end(), m_size);
found != m_end_offsets.end();
found = std::find(m_end_offsets.begin(), m_end_offsets.end(), m_size))
{
m_end_offsets.erase(found);
m_arb_shader += "ENDIF;\n";
}
for (auto found = std::find(m_loop_end_offsets.begin(), m_loop_end_offsets.end(), m_size);
found != m_loop_end_offsets.end();
found = std::find(m_loop_end_offsets.begin(), m_loop_end_offsets.end(), m_size))
{
m_loop_end_offsets.erase(found);
m_arb_shader += "ENDLOOP;\n";
}
for (auto found = std::find(m_else_offsets.begin(), m_else_offsets.end(), m_size);
found != m_else_offsets.end();
found = std::find(m_else_offsets.begin(), m_else_offsets.end(), m_size))
{
m_else_offsets.erase(found);
m_arb_shader += "ELSE;\n";
}
dst.HEX = GetData(data[0]);
src0.HEX = GetData(data[1]);
src1.HEX = GetData(data[2]);
src2.HEX = GetData(data[3]);
m_step = 4 * sizeof(u32);
m_opcode = dst.opcode | (src1.opcode_is_branch << 6);
auto SCT = [&]()
{
switch (m_opcode)
{
case RSX_FP_OPCODE_ADD: AddCodeAsm("$0, $1"); break;
case RSX_FP_OPCODE_DIV: AddCodeAsm("$0, $1"); break;
case RSX_FP_OPCODE_DIVSQ: AddCodeAsm("$0, $1"); break;
case RSX_FP_OPCODE_DP2: AddCodeAsm("$0, $1"); break;
case RSX_FP_OPCODE_DP3: AddCodeAsm("$0, $1"); break;
case RSX_FP_OPCODE_DP4: AddCodeAsm("$0, $1"); break;
case RSX_FP_OPCODE_DP2A: AddCodeAsm("$0, $1, $2"); break;
case RSX_FP_OPCODE_MAD: AddCodeAsm("$0, $1, $2"); break;
case RSX_FP_OPCODE_MAX: AddCodeAsm("$0, $1"); break;
case RSX_FP_OPCODE_MIN: AddCodeAsm("$0, $1"); break;
case RSX_FP_OPCODE_MOV: AddCodeAsm("$0"); break;
case RSX_FP_OPCODE_MUL: AddCodeAsm("$0, $1"); break;
case RSX_FP_OPCODE_RCP: AddCodeAsm("$0"); break;
case RSX_FP_OPCODE_RSQ: AddCodeAsm("$0"); break;
case RSX_FP_OPCODE_SEQ: AddCodeAsm("$0, $1"); break;
case RSX_FP_OPCODE_SFL: AddCodeAsm("$0, $1"); break;
case RSX_FP_OPCODE_SGE: AddCodeAsm("$0, $1"); break;
case RSX_FP_OPCODE_SGT: AddCodeAsm("$0, $1"); break;
case RSX_FP_OPCODE_SLE: AddCodeAsm("$0, $1"); break;
case RSX_FP_OPCODE_SLT: AddCodeAsm("$0, $1"); break;
case RSX_FP_OPCODE_SNE: AddCodeAsm("$0, $1"); break;
case RSX_FP_OPCODE_STR: AddCodeAsm("$0, $1"); break;
default:
return false;
}
return true;
};
auto SCB = [&]()
{
switch (m_opcode)
{
case RSX_FP_OPCODE_ADD: AddCodeAsm("$0, $1"); break;
case RSX_FP_OPCODE_COS: AddCodeAsm("$0"); break;
case RSX_FP_OPCODE_DP2: AddCodeAsm("$0, $1"); break;
case RSX_FP_OPCODE_DP3: AddCodeAsm("$0, $1"); break;
case RSX_FP_OPCODE_DP4: AddCodeAsm("$0, $1"); break;
case RSX_FP_OPCODE_DP2A: AddCodeAsm("$0, $1, $2"); break;
case RSX_FP_OPCODE_DST: AddCodeAsm("$0, $1"); break;
case RSX_FP_OPCODE_REFL: AddCodeAsm("$0, $1"); break;
case RSX_FP_OPCODE_EX2: AddCodeAsm("$0"); break;
case RSX_FP_OPCODE_FLR: AddCodeAsm("$0"); break;
case RSX_FP_OPCODE_FRC: AddCodeAsm("$0"); break;
case RSX_FP_OPCODE_LIT: AddCodeAsm("$0"); break;
case RSX_FP_OPCODE_LIF: AddCodeAsm("$0"); break;
case RSX_FP_OPCODE_LRP: AddCodeAsm("# WARNING"); break;
case RSX_FP_OPCODE_LG2: AddCodeAsm("$0"); break;
case RSX_FP_OPCODE_MAD: AddCodeAsm("$0, $1, $2"); break;
case RSX_FP_OPCODE_MAX: AddCodeAsm("$0, $1"); break;
case RSX_FP_OPCODE_MIN: AddCodeAsm("$0, $1"); break;
case RSX_FP_OPCODE_MOV: AddCodeAsm("$0"); break;
case RSX_FP_OPCODE_MUL: AddCodeAsm("$0, $1"); break;
case RSX_FP_OPCODE_PK2: AddCodeAsm("$0"); break;
case RSX_FP_OPCODE_PK4: AddCodeAsm("$0"); break;
case RSX_FP_OPCODE_PK16: AddCodeAsm("$0"); break;
case RSX_FP_OPCODE_PKB: AddCodeAsm("$0"); break;
case RSX_FP_OPCODE_PKG: AddCodeAsm("$0"); break;
case RSX_FP_OPCODE_SEQ: AddCodeAsm("$0, $1"); break;
case RSX_FP_OPCODE_SFL: AddCodeAsm("$0, $1"); break;
case RSX_FP_OPCODE_SGE: AddCodeAsm("$0, $1"); break;
case RSX_FP_OPCODE_SGT: AddCodeAsm("$0, $1"); break;
case RSX_FP_OPCODE_SIN: AddCodeAsm("$0"); break;
case RSX_FP_OPCODE_SLE: AddCodeAsm("$0, $1"); break;
case RSX_FP_OPCODE_SLT: AddCodeAsm("$0, $1"); break;
case RSX_FP_OPCODE_SNE: AddCodeAsm("$0, $1"); break;
case RSX_FP_OPCODE_STR: AddCodeAsm("$0, $1"); break;
default:
return false;
}
return true;
};
auto TEX_SRB = [&]()
{
switch (m_opcode)
{
case RSX_FP_OPCODE_DDX: AddCodeAsm("$0"); break;
case RSX_FP_OPCODE_DDY: AddCodeAsm("$0"); break;
case RSX_FP_OPCODE_NRM: AddCodeAsm("$0"); break;
case RSX_FP_OPCODE_BEM: AddCodeAsm("# WARNING"); break;
case RSX_FP_OPCODE_TEX: AddCodeAsm("$0, $t"); break;
case RSX_FP_OPCODE_TEXBEM: AddCodeAsm("# WARNING"); break;
case RSX_FP_OPCODE_TXP: AddCodeAsm("$0"); break;
case RSX_FP_OPCODE_TXPBEM: AddCodeAsm("# WARNING"); break;
case RSX_FP_OPCODE_TXD: AddCodeAsm("$0, $1, $t"); break;
case RSX_FP_OPCODE_TXB: AddCodeAsm("$0, $t"); break;
case RSX_FP_OPCODE_TXL: AddCodeAsm("$0, $t"); break;
case RSX_FP_OPCODE_UP2: AddCodeAsm("$0"); break;
case RSX_FP_OPCODE_UP4: AddCodeAsm("$0"); break;
case RSX_FP_OPCODE_UP16: AddCodeAsm("$0"); break;
case RSX_FP_OPCODE_UPB: AddCodeAsm("$0"); break;
case RSX_FP_OPCODE_UPG: AddCodeAsm("$0"); break;
default:
return false;
}
return true;
};
auto SIP = [&]()
{
switch (m_opcode)
{
case RSX_FP_OPCODE_BRK: AddCodeAsm("$cond"); break;
case RSX_FP_OPCODE_CAL: AddCodeAsm("$cond"); break;
case RSX_FP_OPCODE_FENCT: AddCodeAsm(""); break;
case RSX_FP_OPCODE_FENCB: AddCodeAsm(""); break;
case RSX_FP_OPCODE_IFE:
{
m_else_offsets.push_back(src1.else_offset << 2);
m_end_offsets.push_back(src2.end_offset << 2);
AddCodeAsm("($cond)");
break;
}
case RSX_FP_OPCODE_LOOP:
{
if (!src0.exec_if_eq && !src0.exec_if_gr && !src0.exec_if_lt)
{
AddCodeAsm(fmt::format("{ %u, %u, %u }", src1.end_counter, src1.init_counter, src1.increment));
}
else
{
m_loop_end_offsets.push_back(src2.end_offset << 2);
AddCodeAsm(fmt::format("{ %u, %u, %u }", src1.end_counter, src1.init_counter, src1.increment));
}
break;
}
case RSX_FP_OPCODE_REP:
{
if (!src0.exec_if_eq && !src0.exec_if_gr && !src0.exec_if_lt)
{
m_arb_shader += "# RSX_FP_OPCODE_REP_1\n";
}
else
{
m_end_offsets.push_back(src2.end_offset << 2);
m_arb_shader += "# RSX_FP_OPCODE_REP_2\n";
}
break;
}
case RSX_FP_OPCODE_RET: AddCodeAsm("$cond"); break;
default:
return false;
}
return true;
};
switch (m_opcode)
{
case RSX_FP_OPCODE_NOP: AddCodeAsm(""); break;
case RSX_FP_OPCODE_KIL: AddCodeAsm("$cond"); break;
default:
if (forced_unit == FORCE_NONE)
{
if (SIP()) break;
if (SCT()) break;
if (TEX_SRB()) break;
if (SCB()) break;
}
else if (forced_unit == FORCE_SCT)
{
forced_unit = FORCE_NONE;
if (SCT()) break;
}
else if (forced_unit == FORCE_SCB)
{
forced_unit = FORCE_NONE;
if (SCB()) break;
}
rsx_log.error("Unknown/illegal instruction: 0x%x (forced unit %d)", m_opcode, forced_unit);
break;
}
m_size += m_step;
if (dst.end)
{
m_arb_shader.pop_back();
m_arb_shader += " # last instruction\nEND\n";
break;
}
ensure(m_step % sizeof(u32) == 0);
data += m_step / sizeof(u32);
}
}
| 12,885
|
C++
|
.cpp
| 414
| 27.859903
| 176
| 0.622663
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,479
|
GLSLCommon.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Program/GLSLCommon.cpp
|
#include "stdafx.h"
#include "GLSLCommon.h"
#include "RSXFragmentProgram.h"
#include "Emu/system_config.h"
#include "Emu/RSX/gcm_enums.h"
#include "Utilities/StrFmt.h"
namespace program_common
{
template <typename T>
void define_glsl_constants(std::ostream& OS, std::initializer_list<std::pair<const char*, T>> enums)
{
for (const auto& e : enums)
{
if constexpr (std::is_enum_v<T> || std::is_integral_v<T>)
{
OS << "#define " << e.first << " " << static_cast<int>(e.second) << "\n";
}
else
{
OS << "#define " << e.first << " " << e.second << "\n";
}
}
OS << "\n";
}
void define_glsl_switches(std::ostream& OS, std::vector<std::string_view>& enums)
{
for (const auto& e : enums)
{
OS << "#define " << e << "\n";
}
OS << "\n";
}
}
namespace glsl
{
std::string getFloatTypeNameImpl(usz elementCount)
{
switch (elementCount)
{
default:
abort();
case 1:
return "float";
case 2:
return "vec2";
case 3:
return "vec3";
case 4:
return "vec4";
}
}
std::string getHalfTypeNameImpl(usz elementCount)
{
switch (elementCount)
{
default:
abort();
case 1:
return "float16_t";
case 2:
return "f16vec2";
case 3:
return "f16vec3";
case 4:
return "f16vec4";
}
}
std::string compareFunctionImpl(COMPARE f, const std::string &Op0, const std::string &Op1, bool scalar)
{
if (scalar)
{
switch (f)
{
case COMPARE::SEQ:
return fmt::format("CMP_FIXUP(%s) == CMP_FIXUP(%s)", Op0, Op1);
case COMPARE::SGE:
return fmt::format("CMP_FIXUP(%s) >= CMP_FIXUP(%s)", Op0, Op1);
case COMPARE::SGT:
return fmt::format("CMP_FIXUP(%s) > CMP_FIXUP(%s)", Op0, Op1);
case COMPARE::SLE:
return fmt::format("CMP_FIXUP(%s) <= CMP_FIXUP(%s)", Op0, Op1);
case COMPARE::SLT:
return fmt::format("CMP_FIXUP(%s) < CMP_FIXUP(%s)", Op0, Op1);
case COMPARE::SNE:
return fmt::format("CMP_FIXUP(%s) != CMP_FIXUP(%s)", Op0, Op1);
}
}
else
{
switch (f)
{
case COMPARE::SEQ:
return fmt::format("equal(CMP_FIXUP(%s), CMP_FIXUP(%s))", Op0, Op1);
case COMPARE::SGE:
return fmt::format("greaterThanEqual(CMP_FIXUP(%s), CMP_FIXUP(%s))", Op0, Op1);
case COMPARE::SGT:
return fmt::format("greaterThan(CMP_FIXUP(%s), CMP_FIXUP(%s))", Op0, Op1);
case COMPARE::SLE:
return fmt::format("lessThanEqual(CMP_FIXUP(%s), CMP_FIXUP(%s))", Op0, Op1);
case COMPARE::SLT:
return fmt::format("lessThan(CMP_FIXUP(%s), CMP_FIXUP(%s))", Op0, Op1);
case COMPARE::SNE:
return fmt::format("notEqual(CMP_FIXUP(%s), CMP_FIXUP(%s))", Op0, Op1);
}
}
fmt::throw_exception("Unknown compare function");
}
void insert_vertex_input_fetch(std::stringstream& OS, glsl_rules rules, bool glsl4_compliant)
{
std::string vertex_id_name = (rules != glsl_rules_vulkan) ? "gl_VertexID" : "gl_VertexIndex";
// Actually decode a vertex attribute from a raw byte stream
program_common::define_glsl_constants<int>(OS,
{
{ "VTX_FMT_SNORM16", RSX_VERTEX_BASE_TYPE_SNORM16 },
{ "VTX_FMT_FLOAT32", RSX_VERTEX_BASE_TYPE_FLOAT },
{ "VTX_FMT_FLOAT16", RSX_VERTEX_BASE_TYPE_HALF_FLOAT },
{ "VTX_FMT_UNORM8 ", RSX_VERTEX_BASE_TYPE_UNORM8 },
{ "VTX_FMT_SINT16 ", RSX_VERTEX_BASE_TYPE_SINT16 },
{ "VTX_FMT_COMP32 ", RSX_VERTEX_BASE_TYPE_CMP32 },
{ "VTX_FMT_UINT8 ", RSX_VERTEX_BASE_TYPE_UINT8 }
});
// For intel GPUs which cannot access vectors in indexed mode (driver bug? or glsl version too low?)
// Note: Tested on Mesa iris with HD 530 and compilant path works fine, may be a bug on Windows proprietary drivers
if (!glsl4_compliant)
{
OS << "#define _INTEL_GLSL\n";
}
OS <<
#include "GLSLSnippets/RSXProg/RSXVertexFetch.glsl"
;
}
void insert_blend_prologue(std::ostream& OS)
{
OS <<
#include "GLSLSnippets/RSXProg/RSXProgrammableBlendPrologue.glsl"
;
}
void insert_rop_init(std::ostream& OS)
{
OS <<
#include "GLSLSnippets/RSXProg/RSXROPPrologue.glsl"
;
}
void insert_rop(std::ostream& OS, const shader_properties& /*props*/)
{
OS <<
#include "GLSLSnippets/RSXProg/RSXROPEpilogue.glsl"
;
}
void insert_glsl_legacy_function(std::ostream& OS, const shader_properties& props)
{
std::vector<std::string_view> enabled_options;
if (props.low_precision_tests)
{
enabled_options.push_back("_GPU_LOW_PRECISION_COMPARE");
}
if (props.require_lit_emulation)
{
enabled_options.push_back("_ENABLE_LIT_EMULATION");
}
OS << "#define _select mix\n";
OS << "#define _saturate(x) clamp(x, 0., 1.)\n";
OS << "#define _get_bits(x, off, count) bitfieldExtract(x, off, count)\n";
OS << "#define _set_bits(x, y, off, count) bitfieldInsert(x, y, off, count)\n";
OS << "#define _test_bit(x, y) (_get_bits(x, y, 1) != 0)\n";
OS << "#define _rand(seed) fract(sin(dot(seed.xy, vec2(12.9898f, 78.233f))) * 43758.5453f)\n\n";
if (props.domain == glsl::program_domain::glsl_fragment_program)
{
OS << "// ROP control\n";
program_common::define_glsl_constants<rsx::ROP_control_bits>(OS,
{
{ "ALPHA_TEST_ENABLE_BIT ", rsx::ROP_control_bits::ALPHA_TEST_ENABLE_BIT },
{ "SRGB_FRAMEBUFFER_BIT ", rsx::ROP_control_bits::SRGB_FRAMEBUFFER_BIT },
{ "ALPHA_TO_COVERAGE_ENABLE_BIT", rsx::ROP_control_bits::ALPHA_TO_COVERAGE_ENABLE_BIT },
{ "MSAA_WRITE_ENABLE_BIT ", rsx::ROP_control_bits::MSAA_WRITE_ENABLE_BIT },
{ "INT_FRAMEBUFFER_BIT ", rsx::ROP_control_bits::INT_FRAMEBUFFER_BIT },
{ "POLYGON_STIPPLE_ENABLE_BIT ", rsx::ROP_control_bits::POLYGON_STIPPLE_ENABLE_BIT },
{ "ALPHA_TEST_FUNC_OFFSET ", rsx::ROP_control_bits::ALPHA_FUNC_OFFSET },
{ "ALPHA_TEST_FUNC_LENGTH ", rsx::ROP_control_bits::ALPHA_FUNC_NUM_BITS },
{ "MSAA_SAMPLE_CTRL_OFFSET ", rsx::ROP_control_bits::MSAA_SAMPLE_CTRL_OFFSET },
{ "MSAA_SAMPLE_CTRL_LENGTH ", rsx::ROP_control_bits::MSAA_SAMPLE_CTRL_NUM_BITS },
{ "ROP_CMD_MASK ", rsx::ROP_control_bits::ROP_CMD_MASK }
});
program_common::define_glsl_constants<const char*>(OS,
{
{ "col0", props.fp32_outputs ? "r0" : "h0" },
{ "col1", props.fp32_outputs ? "r2" : "h4" },
{ "col2", props.fp32_outputs ? "r3" : "h6" },
{ "col3", props.fp32_outputs ? "r4" : "h8" }
});
if (props.fp32_outputs || !props.supports_native_fp16)
{
enabled_options.push_back("_32_BIT_OUTPUT");
}
if (!props.fp32_outputs)
{
enabled_options.push_back("_ENABLE_FRAMEBUFFER_SRGB");
}
if (props.disable_early_discard)
{
enabled_options.push_back("_DISABLE_EARLY_DISCARD");
}
if (props.ROP_output_rounding)
{
enabled_options.push_back("_ENABLE_ROP_OUTPUT_ROUNDING");
}
enabled_options.push_back("_ENABLE_POLYGON_STIPPLE");
}
// Import common header
program_common::define_glsl_switches(OS, enabled_options);
enabled_options.clear();
OS <<
#include "GLSLSnippets/RSXProg/RSXProgramCommon.glsl"
;
if (props.domain == glsl::program_domain::glsl_vertex_program)
{
if (props.require_explicit_invariance)
{
enabled_options.push_back("_FORCE_POSITION_INVARIANCE");
}
if (props.emulate_zclip_transform)
{
if (props.emulate_depth_clip_only)
{
enabled_options.push_back("_EMULATE_ZCLIP_XFORM_STANDARD");
}
else
{
enabled_options.push_back("_EMULATE_ZCLIP_XFORM_FALLBACK");
}
}
// Import vertex header
program_common::define_glsl_switches(OS, enabled_options);
OS <<
#include "GLSLSnippets/RSXProg/RSXVertexPrologue.glsl"
;
return;
}
if (props.emulate_coverage_tests)
{
enabled_options.push_back("_EMULATE_COVERAGE_TEST");
}
if (!props.fp32_outputs || props.require_linear_to_srgb)
{
enabled_options.push_back("_ENABLE_LINEAR_TO_SRGB");
}
if (props.require_texture_ops || props.require_srgb_to_linear)
{
enabled_options.push_back("_ENABLE_SRGB_TO_LINEAR");
}
if (props.require_wpos)
{
enabled_options.push_back("_ENABLE_WPOS");
}
if (props.require_fog_read)
{
program_common::define_glsl_constants<rsx::fog_mode>(OS,
{
{ "FOG_LINEAR ", rsx::fog_mode::linear },
{ "FOG_EXP ", rsx::fog_mode::exponential },
{ "FOG_EXP2 ", rsx::fog_mode::exponential2 },
{ "FOG_LINEAR_ABS", rsx::fog_mode::linear_abs },
{ "FOG_EXP_ABS ", rsx::fog_mode::exponential_abs },
{ "FOG_EXP2_ABS ", rsx::fog_mode::exponential2_abs },
});
enabled_options.push_back("_ENABLE_FOG_READ");
}
// Import fragment header
program_common::define_glsl_switches(OS, enabled_options);
enabled_options.clear();
OS <<
#include "GLSLSnippets/RSXProg/RSXFragmentPrologue.glsl"
;
if (props.require_texture_ops)
{
// Declare special texture control flags
program_common::define_glsl_constants<rsx::texture_control_bits>(OS,
{
{ "GAMMA_R_BIT " , rsx::texture_control_bits::GAMMA_R },
{ "GAMMA_G_BIT " , rsx::texture_control_bits::GAMMA_G },
{ "GAMMA_B_BIT " , rsx::texture_control_bits::GAMMA_B },
{ "GAMMA_A_BIT " , rsx::texture_control_bits::GAMMA_A },
{ "EXPAND_R_BIT" , rsx::texture_control_bits::EXPAND_R },
{ "EXPAND_G_BIT" , rsx::texture_control_bits::EXPAND_G },
{ "EXPAND_B_BIT" , rsx::texture_control_bits::EXPAND_B },
{ "EXPAND_A_BIT" , rsx::texture_control_bits::EXPAND_A },
{ "SEXT_R_BIT" , rsx::texture_control_bits::SEXT_R },
{ "SEXT_G_BIT" , rsx::texture_control_bits::SEXT_G },
{ "SEXT_B_BIT" , rsx::texture_control_bits::SEXT_B },
{ "SEXT_A_BIT" , rsx::texture_control_bits::SEXT_A },
{ "WRAP_S_BIT", rsx::texture_control_bits::WRAP_S },
{ "WRAP_T_BIT", rsx::texture_control_bits::WRAP_T },
{ "WRAP_R_BIT", rsx::texture_control_bits::WRAP_R },
{ "ALPHAKILL ", rsx::texture_control_bits::ALPHAKILL },
{ "RENORMALIZE ", rsx::texture_control_bits::RENORMALIZE },
{ "DEPTH_FLOAT ", rsx::texture_control_bits::DEPTH_FLOAT },
{ "DEPTH_COMPARE", rsx::texture_control_bits::DEPTH_COMPARE_OP },
{ "FILTERED_MAG_BIT", rsx::texture_control_bits::FILTERED_MAG },
{ "FILTERED_MIN_BIT", rsx::texture_control_bits::FILTERED_MIN },
{ "INT_COORDS_BIT ", rsx::texture_control_bits::UNNORMALIZED_COORDS },
{ "CLAMP_COORDS_BIT", rsx::texture_control_bits::CLAMP_TEXCOORDS_BIT }
});
if (props.require_texture_expand)
{
enabled_options.push_back("_ENABLE_TEXTURE_EXPAND");
}
if (props.emulate_shadow_compare)
{
enabled_options.push_back("_EMULATED_TEXSHADOW");
}
if (props.require_tex_shadow_ops)
{
enabled_options.push_back("_ENABLE_SHADOW");
}
if (props.require_tex1D_ops)
{
enabled_options.push_back("_ENABLE_TEX1D");
}
if (props.require_tex2D_ops)
{
enabled_options.push_back("_ENABLE_TEX2D");
}
if (props.require_tex3D_ops)
{
enabled_options.push_back("_ENABLE_TEX3D");
}
if (props.require_shadowProj_ops)
{
enabled_options.push_back("_ENABLE_SHADOWPROJ");
}
program_common::define_glsl_switches(OS, enabled_options);
enabled_options.clear();
OS <<
#include "GLSLSnippets/RSXProg/RSXFragmentTextureOps.glsl"
;
if (props.require_depth_conversion)
{
OS <<
#include "GLSLSnippets/RSXProg/RSXFragmentTextureDepthConversion.glsl"
;
}
if (props.require_msaa_ops)
{
OS <<
#include "GLSLSnippets/RSXProg/RSXFragmentTextureMSAAOps.glsl"
;
// Generate multiple versions of the actual sampler code.
// We could use defines to generate these, but I don't trust some OpenGL compilers to do the right thing.
const std::string_view msaa_sampling_impl =
#include "GLSLSnippets/RSXProg/RSXFragmentTextureMSAAOpsInternal.glsl"
;
OS << fmt::replace_all(msaa_sampling_impl, "_MSAA_SAMPLER_TYPE_", "sampler2DMS");
if (props.require_depth_conversion)
{
OS << fmt::replace_all(msaa_sampling_impl, "_MSAA_SAMPLER_TYPE_", "usampler2DMS");
}
}
}
}
std::string getFunctionImpl(FUNCTION f)
{
switch (f)
{
default:
abort();
case FUNCTION::DP2:
return "$Ty(dot($0.xy, $1.xy))";
case FUNCTION::DP2A:
return "$Ty(dot($0.xy, $1.xy) + $2.x)";
case FUNCTION::DP3:
return "$Ty(dot($0.xyz, $1.xyz))";
case FUNCTION::DP4:
return "$Ty(dot($0, $1))";
case FUNCTION::DPH:
return "$Ty(dot(vec4($0.xyz, 1.0), $1))";
case FUNCTION::SFL:
return "$Ty(0.)";
case FUNCTION::STR:
return "$Ty(1.)";
case FUNCTION::FRACT:
return "fract($0)";
case FUNCTION::REFL:
return "reflect($0, $1)";
case FUNCTION::TEXTURE_SAMPLE1D:
return "TEX1D($_i, $0.x)";
case FUNCTION::TEXTURE_SAMPLE1D_BIAS:
return "TEX1D_BIAS($_i, $0.x, $1.x)";
case FUNCTION::TEXTURE_SAMPLE1D_PROJ:
return "TEX1D_PROJ($_i, $0)";
case FUNCTION::TEXTURE_SAMPLE1D_LOD:
return "TEX1D_LOD($_i, $0.x, $1.x)";
case FUNCTION::TEXTURE_SAMPLE1D_GRAD:
return "TEX1D_GRAD($_i, $0.x, $1.x, $2.x)";
case FUNCTION::TEXTURE_SAMPLE1D_SHADOW:
case FUNCTION::TEXTURE_SAMPLE1D_SHADOW_PROJ:
// Unimplemented
break;
case FUNCTION::TEXTURE_SAMPLE1D_DEPTH_RGBA:
return "TEX1D_Z24X8_RGBA8($_i, $0.x)";
case FUNCTION::TEXTURE_SAMPLE1D_DEPTH_RGBA_PROJ:
return "TEX1D_Z24X8_RGBA8($_i, ($0.x / $0.w))";
case FUNCTION::TEXTURE_SAMPLE2D:
return "TEX2D($_i, $0.xy)";
case FUNCTION::TEXTURE_SAMPLE2D_BIAS:
return "TEX2D_BIAS($_i, $0.xy, $1.x)";
case FUNCTION::TEXTURE_SAMPLE2D_PROJ:
return "TEX2D_PROJ($_i, $0)";
case FUNCTION::TEXTURE_SAMPLE2D_LOD:
return "TEX2D_LOD($_i, $0.xy, $1.x)";
case FUNCTION::TEXTURE_SAMPLE2D_GRAD:
return "TEX2D_GRAD($_i, $0.xy, $1.xy, $2.xy)";
case FUNCTION::TEXTURE_SAMPLE2D_SHADOW:
return "TEX2D_SHADOW($_i, $0.xyz)";
case FUNCTION::TEXTURE_SAMPLE2D_SHADOW_PROJ:
return "TEX2D_SHADOWPROJ($_i, $0)";
case FUNCTION::TEXTURE_SAMPLE2D_DEPTH_RGBA:
return "TEX2D_Z24X8_RGBA8($_i, $0.xy)";
case FUNCTION::TEXTURE_SAMPLE2D_DEPTH_RGBA_PROJ:
return "TEX2D_Z24X8_RGBA8($_i, ($0.xy / $0.w))";
case FUNCTION::TEXTURE_SAMPLE3D:
return "TEX3D($_i, $0.xyz)";
case FUNCTION::TEXTURE_SAMPLE3D_BIAS:
return "TEX3D_BIAS($_i, $0.xyz, $1.x)";
case FUNCTION::TEXTURE_SAMPLE3D_PROJ:
return "TEX3D_PROJ($_i, $0)";
case FUNCTION::TEXTURE_SAMPLE3D_LOD:
return "TEX3D_LOD($_i, $0.xyz, $1.x)";
case FUNCTION::TEXTURE_SAMPLE3D_GRAD:
return "TEX3D_GRAD($_i, $0.xyz, $1.xyz, $2.xyz)";
case FUNCTION::TEXTURE_SAMPLE3D_SHADOW:
return "TEX3D_SHADOW($_i, $0)";
case FUNCTION::TEXTURE_SAMPLE3D_SHADOW_PROJ:
// Impossible
break;
case FUNCTION::TEXTURE_SAMPLE3D_DEPTH_RGBA:
return "TEX3D_Z24X8_RGBA8($_i, $0.xyz)";
case FUNCTION::TEXTURE_SAMPLE3D_DEPTH_RGBA_PROJ:
return "TEX3D_Z24X8_RGBA8($_i, ($0.xyz / $0.w))";
case FUNCTION::TEXTURE_SAMPLE2DMS:
case FUNCTION::TEXTURE_SAMPLE2DMS_BIAS:
return "TEX2D_MS($_i, $0.xy)";
case FUNCTION::TEXTURE_SAMPLE2DMS_PROJ:
return "TEX2D_MS($_i, $0.xy / $0.w)";
case FUNCTION::TEXTURE_SAMPLE2DMS_LOD:
case FUNCTION::TEXTURE_SAMPLE2DMS_GRAD:
return "TEX2D_MS($_i, $0.xy)";
case FUNCTION::TEXTURE_SAMPLE2DMS_SHADOW:
return "TEX2D_SHADOW_MS($_i, $0.xyz)";
case FUNCTION::TEXTURE_SAMPLE2DMS_SHADOW_PROJ:
return "TEX2D_SHADOWPROJ_MS($_i, $0)";
case FUNCTION::TEXTURE_SAMPLE2DMS_DEPTH_RGBA:
return "TEX2D_Z24X8_RGBA8_MS($_i, $0.xy)";
case FUNCTION::TEXTURE_SAMPLE2DMS_DEPTH_RGBA_PROJ:
return "TEX2D_Z24X8_RGBA8_MS($_i, ($0.xy / $0.w))";
case FUNCTION::DFDX:
return "dFdx($0)";
case FUNCTION::DFDY:
return "dFdy($0)";
case FUNCTION::VERTEX_TEXTURE_FETCH1D:
return "textureLod($t, $0.x, 0)";
case FUNCTION::VERTEX_TEXTURE_FETCH2D:
return "textureLod($t, $0.xy, 0)";
case FUNCTION::VERTEX_TEXTURE_FETCH3D:
case FUNCTION::VERTEX_TEXTURE_FETCHCUBE:
return "textureLod($t, $0.xyz, 0)";
case FUNCTION::VERTEX_TEXTURE_FETCH2DMS:
return "texelFetch($t, ivec2($0.xy * textureSize($t)), 0)";
}
rsx_log.error("Unexpected function request: %d", static_cast<int>(f));
return "$Ty(0.)";
}
void insert_subheader_block(std::ostream& OS)
{
// Global types and stuff
// Must be compatible with std140 packing rules
OS <<
#include "GLSLSnippets/RSXProg/RSXDefines2.glsl"
;
}
void insert_fragment_shader_inputs_block(
std::stringstream& OS,
const std::string_view ext_flavour,
const RSXFragmentProgram& prog,
const std::vector<ParamType>& params,
const two_sided_lighting_config& _2sided_lighting,
std::function<int(std::string_view)> varying_location)
{
struct _varying_register_config
{
int location;
std::string name;
std::string type;
};
std::vector<_varying_register_config> varying_list;
for (const ParamType& PT : params)
{
for (const ParamItem& PI : PT.items)
{
// ssa is defined in the program body and is not a varying type
if (PI.name == "ssa") continue;
const auto reg_location = varying_location(PI.name);
std::string var_name = PI.name;
if (var_name == "fogc")
{
var_name = "fog_c";
}
else if (prog.two_sided_lighting)
{
if (var_name == "diff_color")
{
var_name = "diff_color0";
}
else if (var_name == "spec_color")
{
var_name = "spec_color0";
}
}
varying_list.push_back({ reg_location, var_name, PT.type });
}
}
if (prog.two_sided_lighting)
{
if (_2sided_lighting.two_sided_color)
{
varying_list.push_back({ varying_location("diff_color1"), "diff_color1", "vec4" });
}
if (_2sided_lighting.two_sided_specular)
{
varying_list.push_back({ varying_location("spec_color1"), "spec_color1", "vec4" });
}
}
if (varying_list.empty())
{
return;
}
// Make the output a little nicer
std::sort(varying_list.begin(), varying_list.end(), FN(x.location < y.location));
if (!(prog.ctrl & RSX_SHADER_CONTROL_ATTRIBUTE_INTERPOLATION))
{
for (const auto& reg : varying_list)
{
OS << "layout(location=" << reg.location << ") in " << reg.type << " " << reg.name << ";\n";
}
OS << "\n";
return;
}
for (const auto& reg : varying_list)
{
OS << "layout(location=" << reg.location << ") pervertex" << ext_flavour << " in " << reg.type << " " << reg.name << "_raw[3];\n";
}
// Interpolate the input attributes manually.
// Matches AMD behavior where gl_BaryCoordSmoothAMD only provides x and y with z being autogenerated.
std::string interpolate_function_block =
"\n"
"vec4 _interpolate_varying3(const in vec4[3] v)\n"
"{\n"
// In the corner case where v[0] == v[1] == v[2], this algorithm generates a perfect result vs alternatives that use weighted multiply + add.
// Due to the finite precision of floating point arithmetic, adding together the result of different multiplies yeields a slightly inaccurate result which breaks things.
" const vec4 p10 = v[1] - v[0];\n"
" const vec4 p20 = v[2] - v[0];\n"
" return v[0] + p10 * $gl_BaryCoord.y + p20 * $gl_BaryCoord.z;\n"
"}\n\n";
OS << fmt::replace_all(interpolate_function_block, {{ "$gl_BaryCoord", "gl_BaryCoord"s + std::string(ext_flavour) }});
for (const auto& reg : varying_list)
{
OS << "vec4 " << reg.name << " = _interpolate_varying3(" << reg.name << "_raw);\n";
}
OS << "\n";
}
}
| 19,186
|
C++
|
.cpp
| 570
| 29.822807
| 172
| 0.654674
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,480
|
SPIRVCommon.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Program/SPIRVCommon.cpp
|
#include "stdafx.h"
#ifdef _MSC_VER
#pragma warning(push, 0)
#else
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wall"
#pragma GCC diagnostic ignored "-Wextra"
#pragma GCC diagnostic ignored "-Wold-style-cast"
#pragma GCC diagnostic ignored "-Wsuggest-override"
#ifdef __clang__
#pragma clang diagnostic ignored "-Winconsistent-missing-override"
#endif
#endif
#include "3rdparty/glslang/glslang/SPIRV/GlslangToSpv.h"
#ifdef _MSC_VER
#pragma warning(pop)
#else
#pragma GCC diagnostic pop
#endif
#include "SPIRVCommon.h"
#include "GLSLCommon.h"
namespace spirv
{
static TBuiltInResource g_default_config;
void init_default_resources(TBuiltInResource& rsc)
{
rsc.maxLights = 32;
rsc.maxClipPlanes = 6;
rsc.maxTextureUnits = 32;
rsc.maxTextureCoords = 32;
rsc.maxVertexAttribs = 64;
rsc.maxVertexUniformComponents = 4096;
rsc.maxVaryingFloats = 64;
rsc.maxVertexTextureImageUnits = 32;
rsc.maxCombinedTextureImageUnits = 80;
rsc.maxTextureImageUnits = 32;
rsc.maxFragmentUniformComponents = 4096;
rsc.maxDrawBuffers = 32;
rsc.maxVertexUniformVectors = 128;
rsc.maxVaryingVectors = 8;
rsc.maxFragmentUniformVectors = 16;
rsc.maxVertexOutputVectors = 16;
rsc.maxFragmentInputVectors = 15;
rsc.minProgramTexelOffset = -8;
rsc.maxProgramTexelOffset = 7;
rsc.maxClipDistances = 8;
rsc.maxComputeWorkGroupCountX = 65535;
rsc.maxComputeWorkGroupCountY = 65535;
rsc.maxComputeWorkGroupCountZ = 65535;
rsc.maxComputeWorkGroupSizeX = 1024;
rsc.maxComputeWorkGroupSizeY = 1024;
rsc.maxComputeWorkGroupSizeZ = 64;
rsc.maxComputeUniformComponents = 1024;
rsc.maxComputeTextureImageUnits = 16;
rsc.maxComputeImageUniforms = 8;
rsc.maxComputeAtomicCounters = 8;
rsc.maxComputeAtomicCounterBuffers = 1;
rsc.maxVaryingComponents = 60;
rsc.maxVertexOutputComponents = 64;
rsc.maxGeometryInputComponents = 64;
rsc.maxGeometryOutputComponents = 128;
rsc.maxFragmentInputComponents = 128;
rsc.maxImageUnits = 8;
rsc.maxCombinedImageUnitsAndFragmentOutputs = 8;
rsc.maxCombinedShaderOutputResources = 8;
rsc.maxImageSamples = 0;
rsc.maxVertexImageUniforms = 0;
rsc.maxTessControlImageUniforms = 0;
rsc.maxTessEvaluationImageUniforms = 0;
rsc.maxGeometryImageUniforms = 0;
rsc.maxFragmentImageUniforms = 8;
rsc.maxCombinedImageUniforms = 8;
rsc.maxGeometryTextureImageUnits = 16;
rsc.maxGeometryOutputVertices = 256;
rsc.maxGeometryTotalOutputComponents = 1024;
rsc.maxGeometryUniformComponents = 1024;
rsc.maxGeometryVaryingComponents = 64;
rsc.maxTessControlInputComponents = 128;
rsc.maxTessControlOutputComponents = 128;
rsc.maxTessControlTextureImageUnits = 16;
rsc.maxTessControlUniformComponents = 1024;
rsc.maxTessControlTotalOutputComponents = 4096;
rsc.maxTessEvaluationInputComponents = 128;
rsc.maxTessEvaluationOutputComponents = 128;
rsc.maxTessEvaluationTextureImageUnits = 16;
rsc.maxTessEvaluationUniformComponents = 1024;
rsc.maxTessPatchComponents = 120;
rsc.maxPatchVertices = 32;
rsc.maxTessGenLevel = 64;
rsc.maxViewports = 16;
rsc.maxVertexAtomicCounters = 0;
rsc.maxTessControlAtomicCounters = 0;
rsc.maxTessEvaluationAtomicCounters = 0;
rsc.maxGeometryAtomicCounters = 0;
rsc.maxFragmentAtomicCounters = 8;
rsc.maxCombinedAtomicCounters = 8;
rsc.maxAtomicCounterBindings = 1;
rsc.maxVertexAtomicCounterBuffers = 0;
rsc.maxTessControlAtomicCounterBuffers = 0;
rsc.maxTessEvaluationAtomicCounterBuffers = 0;
rsc.maxGeometryAtomicCounterBuffers = 0;
rsc.maxFragmentAtomicCounterBuffers = 1;
rsc.maxCombinedAtomicCounterBuffers = 1;
rsc.maxAtomicCounterBufferSize = 16384;
rsc.maxTransformFeedbackBuffers = 4;
rsc.maxTransformFeedbackInterleavedComponents = 64;
rsc.maxCullDistances = 8;
rsc.maxCombinedClipAndCullDistances = 8;
rsc.maxSamples = 4;
rsc.limits.nonInductiveForLoops = true;
rsc.limits.whileLoops = true;
rsc.limits.doWhileLoops = true;
rsc.limits.generalUniformIndexing = true;
rsc.limits.generalAttributeMatrixVectorIndexing = true;
rsc.limits.generalVaryingIndexing = true;
rsc.limits.generalSamplerIndexing = true;
rsc.limits.generalVariableIndexing = true;
rsc.limits.generalConstantMatrixVectorIndexing = true;
}
bool compile_glsl_to_spv(std::vector<u32>& spv, std::string& shader, ::glsl::program_domain domain, ::glsl::glsl_rules rules)
{
EShLanguage lang = (domain == ::glsl::glsl_fragment_program)
? EShLangFragment
: (domain == ::glsl::glsl_vertex_program)
? EShLangVertex
: EShLangCompute;
glslang::EShClient client;
glslang::EShTargetClientVersion target_version;
EShMessages msg;
if (rules == ::glsl::glsl_rules_vulkan)
{
client = glslang::EShClientVulkan;
target_version = glslang::EShTargetClientVersion::EShTargetVulkan_1_0;
msg = static_cast<EShMessages>(EShMsgVulkanRules | EShMsgSpvRules | EShMsgEnhanced);
}
else
{
client = glslang::EShClientOpenGL;
target_version = glslang::EShTargetClientVersion::EShTargetOpenGL_450;
msg = static_cast<EShMessages>(EShMsgDefault | EShMsgSpvRules | EShMsgEnhanced);
}
glslang::TProgram program;
glslang::TShader shader_object(lang);
shader_object.setEnvInput(glslang::EShSourceGlsl, lang, client, 100);
shader_object.setEnvClient(client, target_version);
shader_object.setEnvTarget(glslang::EshTargetSpv, glslang::EShTargetLanguageVersion::EShTargetSpv_1_0);
bool success = false;
const char* shader_text = shader.data();
shader_object.setStrings(&shader_text, 1);
if (shader_object.parse(&g_default_config, 430, EProfile::ECoreProfile, false, true, msg))
{
program.addShader(&shader_object);
success = program.link(msg);
if (success)
{
glslang::SpvOptions options;
options.disableOptimizer = true;
options.optimizeSize = true;
glslang::GlslangToSpv(*program.getIntermediate(lang), spv, &options);
// Now we optimize
//spvtools::Optimizer optimizer(SPV_ENV_VULKAN_1_0);
//optimizer.RegisterPass(spvtools::CreateUnifyConstantPass()); // Remove duplicate constants
//optimizer.RegisterPass(spvtools::CreateMergeReturnPass()); // Huge savings in vertex interpreter and likely normal vertex shaders
//optimizer.RegisterPass(spvtools::CreateAggressiveDCEPass()); // Remove dead code
//optimizer.Run(spv.data(), spv.size(), &spv);
}
}
else
{
rsx_log.error("%s", shader_object.getInfoLog());
rsx_log.error("%s", shader_object.getInfoDebugLog());
}
return success;
}
void initialize_compiler_context()
{
glslang::InitializeProcess();
init_default_resources(g_default_config);
}
void finalize_compiler_context()
{
glslang::FinalizeProcess();
}
}
| 6,896
|
C++
|
.cpp
| 184
| 33.423913
| 143
| 0.760778
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,481
|
CgBinaryVertexProgram.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Program/CgBinaryVertexProgram.cpp
|
#include "stdafx.h"
#include "CgBinaryProgram.h"
#include "Emu/System.h"
#include "RSXVertexProgram.h"
void CgBinaryDisasm::AddScaCodeDisasm(const std::string& code)
{
ensure((m_sca_opcode < 21));
m_arb_shader += rsx_vp_sca_op_names[m_sca_opcode] + code + " ";
}
void CgBinaryDisasm::AddVecCodeDisasm(const std::string& code)
{
ensure((m_vec_opcode < 26));
m_arb_shader += rsx_vp_vec_op_names[m_vec_opcode] + code + " ";
}
std::string CgBinaryDisasm::GetMaskDisasm(bool is_sca) const
{
std::string ret;
ret.reserve(5);
ret += '.';
if (is_sca)
{
if (d3.sca_writemask_x) ret += "x";
if (d3.sca_writemask_y) ret += "y";
if (d3.sca_writemask_z) ret += "z";
if (d3.sca_writemask_w) ret += "w";
}
else
{
if (d3.vec_writemask_x) ret += "x";
if (d3.vec_writemask_y) ret += "y";
if (d3.vec_writemask_z) ret += "z";
if (d3.vec_writemask_w) ret += "w";
}
return ret == "."sv || ret == ".xyzw"sv ? "" : (ret);
}
std::string CgBinaryDisasm::GetVecMaskDisasm() const
{
return GetMaskDisasm(false);
}
std::string CgBinaryDisasm::GetScaMaskDisasm() const
{
return GetMaskDisasm(true);
}
std::string CgBinaryDisasm::GetDSTDisasm(bool is_sca) const
{
std::string ret;
std::string mask = GetMaskDisasm(is_sca);
static constexpr std::array<std::string_view, 22> output_names =
{
"out_diffuse_color",
"out_specular_color",
"out_back_diffuse_color",
"out_back_specular_color",
"out_fog",
"out_point_size",
"out_clip_distance[0]",
"out_clip_distance[1]",
"out_clip_distance[2]",
"out_clip_distance[3]",
"out_clip_distance[4]",
"out_clip_distance[5]",
"out_tc8",
"out_tc9",
"out_tc0",
"out_tc1",
"out_tc2",
"out_tc3",
"out_tc4",
"out_tc5",
"out_tc6",
"out_tc7"
};
switch ((is_sca && d3.sca_dst_tmp != 0x3f) ? 0x1f : d3.dst)
{
case 0x1f:
ret += (is_sca ? fmt::format("R%d", d3.sca_dst_tmp) : fmt::format("R%d", d0.dst_tmp)) + mask;
break;
default:
if (d3.dst < output_names.size())
{
fmt::append(ret, "%s%s", output_names[d3.dst], mask);
}
else
{
rsx_log.error("dst index out of range: %u", d3.dst);
fmt::append(ret, "(bad out index) o[%d]", d3.dst);
}
// Vertex Program supports double destinations, notably in MOV
if (d0.dst_tmp != 0x3f)
fmt::append(ret, " R%d%s", d0.dst_tmp, mask);
break;
}
return ret;
}
std::string CgBinaryDisasm::GetSRCDisasm(const u32 n) const
{
ensure(n < 3);
std::string ret;
static constexpr std::array<std::string_view, 16> reg_table =
{
"in_pos", "in_weight", "in_normal",
"in_diff_color", "in_spec_color",
"in_fog",
"in_point_size", "in_7",
"in_tc0", "in_tc1", "in_tc2", "in_tc3",
"in_tc4", "in_tc5", "in_tc6", "in_tc7"
};
switch (src[n].reg_type)
{
case 1: //temp
ret += 'R';
ret += std::to_string(src[n].tmp_src);
break;
case 2: //input
if (d1.input_src < reg_table.size())
{
fmt::append(ret, "%s", reg_table[d1.input_src]);
}
else
{
rsx_log.error("Bad input src num: %d", u32{ d1.input_src });
fmt::append(ret, "v[%d] # bad src", d1.input_src);
}
break;
case 3: //const
ret += std::string("c[" + (d3.index_const ? AddAddrRegDisasm() + " + " : "") + std::to_string(d1.const_src) + "]");
break;
default:
rsx_log.fatal("Bad src%u reg type: %d", n, u32{ src[n].reg_type });
break;
}
static constexpr std::string_view f = "xyzw";
std::string swizzle;
swizzle.reserve(5);
swizzle += '.';
swizzle += f[src[n].swz_x];
swizzle += f[src[n].swz_y];
swizzle += f[src[n].swz_z];
swizzle += f[src[n].swz_w];
if (swizzle == ".xxxx") swizzle = ".x";
else if (swizzle == ".yyyy") swizzle = ".y";
else if (swizzle == ".zzzz") swizzle = ".z";
else if (swizzle == ".wwww") swizzle = ".w";
if (swizzle != ".xyzw"sv)
{
ret += swizzle;
}
bool abs = false;
switch (n)
{
default:
case 0: abs = d0.src0_abs; break;
case 1: abs = d0.src1_abs; break;
case 2: abs = d0.src2_abs; break;
}
if (abs) ret = "|" + ret + "|";
if (src[n].neg) ret = "-" + ret;
return ret;
}
void CgBinaryDisasm::SetDSTDisasm(bool is_sca, const std::string& value)
{
is_sca ? AddScaCodeDisasm() : AddVecCodeDisasm();
if (d0.cond == 0) return;
enum
{
lt = 0x1,
eq = 0x2,
gt = 0x4
};
if (d0.staturate)
{
m_arb_shader.pop_back();
m_arb_shader += "_sat ";
}
std::string dest;
if (d0.cond_update_enable_0 && d0.cond_update_enable_1)
{
m_arb_shader.pop_back();
m_arb_shader += "C ";
dest = fmt::format("RC%s", GetMaskDisasm(is_sca).c_str());
}
else if (d3.dst != 0x1f || (is_sca ? d3.sca_dst_tmp != 0x3f : d0.dst_tmp != 0x3f))
{
dest = GetDSTDisasm(is_sca);
}
AddCodeCondDisasm(FormatDisasm(dest), value);
}
std::string CgBinaryDisasm::GetTexDisasm()
{
return fmt::format("TEX%d", 0);
}
std::string CgBinaryDisasm::FormatDisasm(const std::string& code) const
{
const std::pair<std::string_view, std::function<std::string()>> repl_list[] =
{
{ "$$", []() -> std::string { return "$"; } },
{ "$0", [this]{ return GetSRCDisasm(0); } },
{ "$1", [this]{ return GetSRCDisasm(1); } },
{ "$2", [this]{ return GetSRCDisasm(2); } },
{ "$s", [this]{ return GetSRCDisasm(2); } },
{ "$am", [this]{ return AddAddrMaskDisasm(); } },
{ "$a", [this]{ return AddAddrRegDisasm(); } },
{ "$t", [this]{ return GetTexDisasm(); } },
{ "$fa", [this]{ return std::to_string(GetAddrDisasm()); } },
{ "$ifcond ", [this]
{
std::string cond = GetCondDisasm();
if (cond == "true") cond.clear();
return cond;
}
},
{ "$cond", [this]{ return GetCondDisasm(); } },
};
return fmt::replace_all(code, repl_list);
}
std::string CgBinaryDisasm::GetCondDisasm() const
{
enum
{
lt = 0x1,
eq = 0x2,
gt = 0x4
};
if (d0.cond == 0) return "false";
if (d0.cond == (lt | gt | eq)) return "true";
static const char* cond_string_table[(lt | gt | eq) + 1] =
{
"ERROR",
"LT", "EQ", "LE",
"GT", "NE", "GE",
"ERROR"
};
static constexpr std::string_view f = "xyzw";
std::string swizzle;
swizzle.reserve(5);
swizzle += '.';
swizzle += f[d0.mask_x];
swizzle += f[d0.mask_y];
swizzle += f[d0.mask_z];
swizzle += f[d0.mask_w];
if (swizzle == ".xxxx") swizzle = ".x";
else if (swizzle == ".yyyy") swizzle = ".y";
else if (swizzle == ".zzzz") swizzle = ".z";
else if (swizzle == ".wwww") swizzle = ".w";
if (swizzle == ".xyzw"sv)
{
swizzle.clear();
}
return fmt::format("(%s%s)", cond_string_table[d0.cond], swizzle.c_str());
}
void CgBinaryDisasm::AddCodeCondDisasm(const std::string& dst, const std::string& src)
{
enum
{
lt = 0x1,
eq = 0x2,
gt = 0x4
};
if (!d0.cond_test_enable || d0.cond == (lt | gt | eq))
{
AddCodeDisasm(dst + ", " + src + ";");
return;
}
if (d0.cond == 0)
{
AddCodeDisasm("# " + dst + ", " + src + ";");
return;
}
static const char* cond_string_table[(lt | gt | eq) + 1] =
{
"ERROR",
"LT", "EQ", "LE",
"GT", "NE", "GE",
"ERROR"
};
static constexpr std::string_view f = "xyzw";
std::string swizzle;
swizzle.reserve(5);
swizzle += '.';
swizzle += f[d0.mask_x];
swizzle += f[d0.mask_y];
swizzle += f[d0.mask_z];
swizzle += f[d0.mask_w];
if (swizzle == ".xxxx") swizzle = ".x";
else if (swizzle == ".yyyy") swizzle = ".y";
else if (swizzle == ".zzzz") swizzle = ".z";
else if (swizzle == ".wwww") swizzle = ".w";
if (swizzle == ".xyzw"sv)
{
swizzle.clear();
}
std::string cond = fmt::format("%s%s", cond_string_table[d0.cond], swizzle.c_str());
AddCodeDisasm(dst + "(" + cond + ") " + ", " + src + ";");
}
std::string CgBinaryDisasm::AddAddrMaskDisasm() const
{
static constexpr std::string_view f = "xyzw";
return std::string(".") + f[d0.addr_swz];
}
std::string CgBinaryDisasm::AddAddrRegDisasm() const
{
//static constexpr std::string_view f = "xyzw";
return fmt::format("A%d", d0.addr_reg_sel_1) + AddAddrMaskDisasm();
}
u32 CgBinaryDisasm::GetAddrDisasm() const
{
return (d2.iaddrh << 3) | d3.iaddrl;
}
void CgBinaryDisasm::AddCodeDisasm(const std::string& code)
{
m_arb_shader += FormatDisasm(code) + "\n";
}
void CgBinaryDisasm::SetDSTVecDisasm(const std::string& code)
{
SetDSTDisasm(false, code);
}
void CgBinaryDisasm::SetDSTScaDisasm(const std::string& code)
{
SetDSTDisasm(true, code);
}
void CgBinaryDisasm::TaskVP()
{
m_instr_count = 0;
bool is_has_BRA = false;
for (u32 i = 1; m_instr_count < m_max_instr_count; m_instr_count++)
{
if (is_has_BRA)
{
d3.HEX = m_data[i];
i += 4;
}
else
{
d1.HEX = m_data[i++];
m_sca_opcode = d1.sca_opcode;
switch (d1.sca_opcode)
{
case 0x08: //BRA
is_has_BRA = true;
d3.HEX = m_data[++i];
i += 4;
AddScaCodeDisasm("# WARNING");
break;
case 0x09: //BRI
d2.HEX = m_data[i++];
d3.HEX = m_data[i];
i += 2;
AddScaCodeDisasm("$ifcond # WARNING");
break;
default:
d3.HEX = m_data[++i];
i += 2;
break;
}
}
if (d3.end)
{
m_instr_count++;
if (i < m_data.size())
{
rsx_log.error("Program end before buffer end.");
}
break;
}
}
for (u32 i = 0; i < m_instr_count; ++i)
{
d0.HEX = m_data[i * 4 + 0];
d1.HEX = m_data[i * 4 + 1];
d2.HEX = m_data[i * 4 + 2];
d3.HEX = m_data[i * 4 + 3];
src[0].src0l = d2.src0l;
src[0].src0h = d1.src0h;
src[1].src1 = d2.src1;
src[2].src2l = d3.src2l;
src[2].src2h = d2.src2h;
m_sca_opcode = d1.sca_opcode;
switch (d1.sca_opcode)
{
case RSX_SCA_OPCODE_NOP: break;
case RSX_SCA_OPCODE_MOV: SetDSTScaDisasm("$s"); break;
case RSX_SCA_OPCODE_RCP: SetDSTScaDisasm("$s"); break;
case RSX_SCA_OPCODE_RCC: SetDSTScaDisasm("$s"); break;
case RSX_SCA_OPCODE_RSQ: SetDSTScaDisasm("$s"); break;
case RSX_SCA_OPCODE_EXP: SetDSTScaDisasm("$s"); break;
case RSX_SCA_OPCODE_LOG: SetDSTScaDisasm("$s"); break;
case RSX_SCA_OPCODE_LIT: SetDSTScaDisasm("$s"); break;
case RSX_SCA_OPCODE_BRA: AddScaCodeDisasm("BRA # WARNING"); break;
case RSX_SCA_OPCODE_BRI: AddCodeDisasm("$ifcond # WARNING"); break;
case RSX_SCA_OPCODE_CAL: AddCodeDisasm("$ifcond $f# WARNING"); break;
case RSX_SCA_OPCODE_CLI: AddCodeDisasm("$ifcond $f # WARNING"); break;
case RSX_SCA_OPCODE_RET: AddCodeDisasm("$ifcond # WARNING"); break;
case RSX_SCA_OPCODE_LG2: SetDSTScaDisasm("$s"); break;
case RSX_SCA_OPCODE_EX2: SetDSTScaDisasm("$s"); break;
case RSX_SCA_OPCODE_SIN: SetDSTScaDisasm("$s"); break;
case RSX_SCA_OPCODE_COS: SetDSTScaDisasm("$s"); break;
case RSX_SCA_OPCODE_BRB: SetDSTScaDisasm("# WARNING Boolean constant"); break;
case RSX_SCA_OPCODE_CLB: SetDSTScaDisasm("# WARNING Boolean constant"); break;
case RSX_SCA_OPCODE_PSH: SetDSTScaDisasm(""); break;
case RSX_SCA_OPCODE_POP: SetDSTScaDisasm(""); break;
default:
rsx_log.error("Unknown vp sca_opcode 0x%x", u32{ d1.sca_opcode });
break;
}
m_vec_opcode = d1.vec_opcode;
switch (d1.vec_opcode)
{
case RSX_VEC_OPCODE_NOP: break;
case RSX_VEC_OPCODE_MOV: SetDSTVecDisasm("$0"); break;
case RSX_VEC_OPCODE_MUL: SetDSTVecDisasm("$0, $1"); break;
case RSX_VEC_OPCODE_ADD: SetDSTVecDisasm("$0, $2"); break;
case RSX_VEC_OPCODE_MAD: SetDSTVecDisasm("$0, $1, $2"); break;
case RSX_VEC_OPCODE_DP3: SetDSTVecDisasm("$0, $1"); break;
case RSX_VEC_OPCODE_DPH: SetDSTVecDisasm("$0, $1"); break;
case RSX_VEC_OPCODE_DP4: SetDSTVecDisasm("$0, $1"); break;
case RSX_VEC_OPCODE_DST: SetDSTVecDisasm("$0, $1"); break;
case RSX_VEC_OPCODE_MIN: SetDSTVecDisasm("$0, $1"); break;
case RSX_VEC_OPCODE_MAX: SetDSTVecDisasm("$0, $1"); break;
case RSX_VEC_OPCODE_SLT: SetDSTVecDisasm("$0, $1"); break;
case RSX_VEC_OPCODE_SGE: SetDSTVecDisasm("$0, $1"); break;
case RSX_VEC_OPCODE_ARL: AddCodeDisasm("ARL, $a, $0"); break;
case RSX_VEC_OPCODE_FRC: SetDSTVecDisasm("$0"); break;
case RSX_VEC_OPCODE_FLR: SetDSTVecDisasm("$0"); break;
case RSX_VEC_OPCODE_SEQ: SetDSTVecDisasm("$0, $1"); break;
case RSX_VEC_OPCODE_SFL: SetDSTVecDisasm("$0"); break;
case RSX_VEC_OPCODE_SGT: SetDSTVecDisasm("$0, $1"); break;
case RSX_VEC_OPCODE_SLE: SetDSTVecDisasm("$0, $1"); break;
case RSX_VEC_OPCODE_SNE: SetDSTVecDisasm("$0, $1"); break;
case RSX_VEC_OPCODE_STR: SetDSTVecDisasm("$0"); break;
case RSX_VEC_OPCODE_SSG: SetDSTVecDisasm("$0"); break;
case RSX_VEC_OPCODE_TXL: SetDSTVecDisasm("$t, $0"); break;
default:
rsx_log.error("Unknown vp opcode 0x%x", u32{ d1.vec_opcode });
break;
}
}
m_arb_shader += "END\n";
}
| 12,311
|
C++
|
.cpp
| 438
| 25.43379
| 117
| 0.631708
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,482
|
GLPipelineCompiler.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/GL/GLPipelineCompiler.cpp
|
#include "stdafx.h"
#include "GLPipelineCompiler.h"
#include "Utilities/Thread.h"
#include <thread>
#include "util/sysinfo.hpp"
namespace gl
{
// Global list of worker threads
std::unique_ptr<named_thread_group<pipe_compiler>> g_pipe_compilers;
int g_num_pipe_compilers = 0;
atomic_t<int> g_compiler_index{};
pipe_compiler::pipe_compiler()
{
}
pipe_compiler::~pipe_compiler()
{
if (m_context_destroy_func)
{
m_context_destroy_func(m_context);
}
}
void pipe_compiler::initialize(
std::function<draw_context_t()> context_create_func,
std::function<void(draw_context_t)> context_bind_func,
std::function<void(draw_context_t)> context_destroy_func)
{
m_context_bind_func = context_bind_func;
m_context_destroy_func = context_destroy_func;
m_context = context_create_func();
}
void pipe_compiler::operator()()
{
while (thread_ctrl::state() != thread_state::aborting)
{
for (auto&& job : m_work_queue.pop_all())
{
if (!m_context_ready.test_and_set())
{
// Bind context on first use
m_context_bind_func(m_context);
}
auto result = int_compile_graphics_pipe(
job.post_create_func,
job.post_link_func);
job.completion_callback(result);
}
thread_ctrl::wait_on(m_work_queue);
}
}
std::unique_ptr<glsl::program> pipe_compiler::compile(
op_flags flags,
build_callback_t post_create_func,
build_callback_t post_link_func,
storage_callback_t completion_callback_func)
{
if (flags == COMPILE_INLINE)
{
return int_compile_graphics_pipe(post_create_func, post_link_func);
}
m_work_queue.push(post_create_func, post_link_func, completion_callback_func);
return {};
}
std::unique_ptr<glsl::program> pipe_compiler::int_compile_graphics_pipe(
build_callback_t post_create_func,
build_callback_t post_link_func)
{
auto result = std::make_unique<glsl::program>();
result->create();
if (post_create_func)
{
post_create_func(result.get());
}
result->link(post_link_func);
return result;
}
void initialize_pipe_compiler(
std::function<draw_context_t()> context_create_func,
std::function<void(draw_context_t)> context_bind_func,
std::function<void(draw_context_t)> context_destroy_func,
int num_worker_threads)
{
if (num_worker_threads == 0)
{
// Select optimal number of compiler threads
const auto hw_threads = utils::get_thread_count();
if (hw_threads > 12)
{
num_worker_threads = 6;
}
else if (hw_threads > 8)
{
num_worker_threads = 4;
}
else if (hw_threads == 8)
{
num_worker_threads = 2;
}
else
{
num_worker_threads = 1;
}
}
ensure(num_worker_threads >= 1);
// Create the thread pool
g_pipe_compilers = std::make_unique<named_thread_group<pipe_compiler>>("RSX.W", num_worker_threads);
g_num_pipe_compilers = num_worker_threads;
// Initialize the workers. At least one inline compiler shall exist (doesn't actually run)
for (pipe_compiler& compiler : *g_pipe_compilers.get())
{
compiler.initialize(context_create_func, context_bind_func, context_destroy_func);
}
}
void destroy_pipe_compiler()
{
g_pipe_compilers.reset();
}
pipe_compiler* get_pipe_compiler()
{
ensure(g_pipe_compilers);
int thread_index = g_compiler_index++;
return g_pipe_compilers.get()->begin() + (thread_index % g_num_pipe_compilers);
}
}
| 3,508
|
C++
|
.cpp
| 123
| 24.105691
| 103
| 0.674154
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,483
|
GLFragmentProgram.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/GL/GLFragmentProgram.cpp
|
#include "stdafx.h"
#include "GLFragmentProgram.h"
#include "Emu/system_config.h"
#include "GLCommonDecompiler.h"
#include "../GCM.h"
#include "../Program/GLSLCommon.h"
std::string GLFragmentDecompilerThread::getFloatTypeName(usz elementCount)
{
return glsl::getFloatTypeNameImpl(elementCount);
}
std::string GLFragmentDecompilerThread::getHalfTypeName(usz elementCount)
{
return glsl::getHalfTypeNameImpl(elementCount);
}
std::string GLFragmentDecompilerThread::getFunction(FUNCTION f)
{
return glsl::getFunctionImpl(f);
}
std::string GLFragmentDecompilerThread::compareFunction(COMPARE f, const std::string &Op0, const std::string &Op1)
{
return glsl::compareFunctionImpl(f, Op0, Op1);
}
void GLFragmentDecompilerThread::insertHeader(std::stringstream & OS)
{
int gl_version = 430;
std::vector<std::string> required_extensions;
if (device_props.has_native_half_support)
{
const auto driver_caps = gl::get_driver_caps();
if (driver_caps.NV_gpu_shader5_supported)
{
required_extensions.push_back("GL_NV_gpu_shader5");
}
else if (driver_caps.AMD_gpu_shader_half_float_supported)
{
required_extensions.push_back("GL_AMD_gpu_shader_half_float");
}
}
if (m_prog.ctrl & RSX_SHADER_CONTROL_ATTRIBUTE_INTERPOLATION)
{
gl_version = std::max(gl_version, 450);
required_extensions.push_back("GL_NV_fragment_shader_barycentric");
}
OS << "#version " << gl_version << "\n";
for (const auto& ext : required_extensions)
{
OS << "#extension " << ext << ": require\n";
}
glsl::insert_subheader_block(OS);
}
void GLFragmentDecompilerThread::insertInputs(std::stringstream & OS)
{
glsl::insert_fragment_shader_inputs_block(
OS,
glsl::extension_flavour::NV,
m_prog,
m_parr.params[PF_PARAM_IN],
{
.two_sided_color = !!(properties.in_register_mask & in_diff_color),
.two_sided_specular = !!(properties.in_register_mask & in_spec_color)
},
gl::get_varying_register_location
);
}
void GLFragmentDecompilerThread::insertOutputs(std::stringstream & OS)
{
const std::pair<std::string, std::string> table[] =
{
{ "ocol0", m_ctrl & CELL_GCM_SHADER_CONTROL_32_BITS_EXPORTS ? "r0" : "h0" },
{ "ocol1", m_ctrl & CELL_GCM_SHADER_CONTROL_32_BITS_EXPORTS ? "r2" : "h4" },
{ "ocol2", m_ctrl & CELL_GCM_SHADER_CONTROL_32_BITS_EXPORTS ? "r3" : "h6" },
{ "ocol3", m_ctrl & CELL_GCM_SHADER_CONTROL_32_BITS_EXPORTS ? "r4" : "h8" },
};
const bool float_type = (m_ctrl & CELL_GCM_SHADER_CONTROL_32_BITS_EXPORTS) || !device_props.has_native_half_support;
const auto reg_type = float_type ? "vec4" : getHalfTypeName(4);
for (uint i = 0; i < std::size(table); ++i)
{
if (m_parr.HasParam(PF_PARAM_NONE, reg_type, table[i].second))
OS << "layout(location=" << i << ") out vec4 " << table[i].first << ";\n";
}
}
void GLFragmentDecompilerThread::insertConstants(std::stringstream & OS)
{
for (const ParamType& PT : m_parr.params[PF_PARAM_UNIFORM])
{
if (PT.type != "sampler1D" &&
PT.type != "sampler2D" &&
PT.type != "sampler3D" &&
PT.type != "samplerCube")
continue;
for (const ParamItem& PI : PT.items)
{
std::string samplerType = PT.type;
int index = atoi(&PI.name[3]);
const auto mask = (1 << index);
if (properties.redirected_sampler_mask & mask)
{
// Provide a stencil view of the main resource for the S channel
OS << "uniform u" << samplerType << " " << PI.name << "_stencil;\n";
}
else if (properties.shadow_sampler_mask & mask)
{
if (properties.common_access_sampler_mask & mask)
{
rsx_log.error("Texture unit %d is sampled as both a shadow texture and a depth texture", index);
}
else
{
samplerType += "Shadow";
}
}
OS << "uniform " << samplerType << " " << PI.name << ";\n";
}
}
OS << "\n";
std::string constants_block;
for (const ParamType& PT : m_parr.params[PF_PARAM_UNIFORM])
{
if (PT.type == "sampler1D" ||
PT.type == "sampler2D" ||
PT.type == "sampler3D" ||
PT.type == "samplerCube")
continue;
for (const ParamItem& PI : PT.items)
{
constants_block += " " + PT.type + " " + PI.name + ";\n";
}
}
if (!constants_block.empty())
{
OS << "layout(std140, binding = " << GL_FRAGMENT_CONSTANT_BUFFERS_BIND_SLOT << ") uniform FragmentConstantsBuffer\n";
OS << "{\n";
OS << constants_block;
OS << "};\n\n";
}
OS << "layout(std140, binding = " << GL_FRAGMENT_STATE_BIND_SLOT << ") uniform FragmentStateBuffer\n";
OS << "{\n";
OS << " float fog_param0;\n";
OS << " float fog_param1;\n";
OS << " uint rop_control;\n";
OS << " float alpha_ref;\n";
OS << " uint reserved;\n";
OS << " uint fog_mode;\n";
OS << " float wpos_scale;\n";
OS << " float wpos_bias;\n";
OS << "};\n\n";
OS << "layout(std140, binding = " << GL_FRAGMENT_TEXTURE_PARAMS_BIND_SLOT << ") uniform TextureParametersBuffer\n";
OS << "{\n";
OS << " sampler_info texture_parameters[16];\n";
OS << "};\n\n";
OS << "layout(std140, binding = " << GL_RASTERIZER_STATE_BIND_SLOT << ") uniform RasterizerHeap\n";
OS << "{\n";
OS << " uvec4 stipple_pattern[8];\n";
OS << "};\n\n";
}
void GLFragmentDecompilerThread::insertGlobalFunctions(std::stringstream &OS)
{
m_shader_props.domain = glsl::glsl_fragment_program;
m_shader_props.require_lit_emulation = properties.has_lit_op;
m_shader_props.fp32_outputs = !!(m_prog.ctrl & CELL_GCM_SHADER_CONTROL_32_BITS_EXPORTS);
m_shader_props.require_depth_conversion = properties.redirected_sampler_mask != 0;
m_shader_props.require_wpos = !!(properties.in_register_mask & in_wpos);
m_shader_props.require_texture_ops = properties.has_tex_op;
m_shader_props.require_tex_shadow_ops = properties.shadow_sampler_mask != 0;
m_shader_props.require_texture_expand = properties.has_exp_tex_op;
m_shader_props.require_srgb_to_linear = properties.has_upg;
m_shader_props.require_linear_to_srgb = properties.has_pkg;
m_shader_props.require_fog_read = properties.in_register_mask & in_fogc;
m_shader_props.emulate_coverage_tests = true; // g_cfg.video.antialiasing_level == msaa_level::none;
m_shader_props.emulate_shadow_compare = device_props.emulate_depth_compare;
m_shader_props.low_precision_tests = ::gl::get_driver_caps().vendor_NVIDIA && !(m_prog.ctrl & RSX_SHADER_CONTROL_ATTRIBUTE_INTERPOLATION);
m_shader_props.disable_early_discard = !::gl::get_driver_caps().vendor_NVIDIA;
m_shader_props.supports_native_fp16 = device_props.has_native_half_support;
m_shader_props.ROP_output_rounding = g_cfg.video.shader_precision != gpu_preset_level::low;
m_shader_props.require_tex1D_ops = properties.has_tex1D;
m_shader_props.require_tex2D_ops = properties.has_tex2D;
m_shader_props.require_tex3D_ops = properties.has_tex3D;
m_shader_props.require_shadowProj_ops = properties.shadow_sampler_mask != 0 && properties.has_texShadowProj;
glsl::insert_glsl_legacy_function(OS, m_shader_props);
}
void GLFragmentDecompilerThread::insertMainStart(std::stringstream & OS)
{
std::set<std::string> output_registers;
if (m_ctrl & CELL_GCM_SHADER_CONTROL_32_BITS_EXPORTS)
{
output_registers = { "r0", "r2", "r3", "r4" };
}
else
{
output_registers = { "h0", "h4", "h6", "h8" };
}
if (m_ctrl & CELL_GCM_SHADER_CONTROL_DEPTH_EXPORT)
{
output_registers.insert("r1");
}
std::string registers;
std::string reg_type;
const auto half4 = getHalfTypeName(4);
for (auto ®_name : output_registers)
{
const auto type = (reg_name[0] == 'r' || !device_props.has_native_half_support)? "vec4" : half4;
if (reg_type == type) [[likely]]
{
registers += ", " + reg_name + " = " + type + "(0.)";
}
else
{
if (!registers.empty())
registers += ";\n";
registers += type + " " + reg_name + " = " + type + "(0.)";
}
reg_type = type;
}
if (!registers.empty())
{
OS << registers << ";\n";
}
OS << "void fs_main()\n";
OS << "{\n";
for (const ParamType& PT : m_parr.params[PF_PARAM_NONE])
{
for (const auto& PI : PT.items)
{
if (output_registers.find(PI.name) != output_registers.end())
continue;
OS << " " << PT.type << " " << PI.name;
if (!PI.value.empty())
OS << " = " << PI.value;
OS << ";\n";
}
}
if (properties.has_w_access)
OS << " float in_w = (1. / gl_FragCoord.w);\n";
if (properties.in_register_mask & in_ssa)
OS << " vec4 ssa = gl_FrontFacing ? vec4(1.) : vec4(-1.);\n";
if (properties.in_register_mask & in_wpos)
OS << " vec4 wpos = get_wpos();\n";
if (properties.in_register_mask & in_fogc)
OS << " vec4 fogc = fetch_fog_value(fog_mode);\n";
if (m_prog.two_sided_lighting)
{
if (properties.in_register_mask & in_diff_color)
OS << " vec4 diff_color = gl_FrontFacing ? diff_color1 : diff_color0;\n";
if (properties.in_register_mask & in_spec_color)
OS << " vec4 spec_color = gl_FrontFacing ? spec_color1 : spec_color0;\n";
}
}
void GLFragmentDecompilerThread::insertMainEnd(std::stringstream & OS)
{
OS << "}\n\n";
OS << "void main()\n";
OS << "{\n";
::glsl::insert_rop_init(OS);
OS << "\n" << " fs_main();\n\n";
glsl::insert_rop(OS, m_shader_props);
if (m_ctrl & CELL_GCM_SHADER_CONTROL_DEPTH_EXPORT)
{
if (m_parr.HasParam(PF_PARAM_NONE, "vec4", "r1"))
{
//Depth writes are always from a fp32 register. See issues section on nvidia's NV_fragment_program spec
//https://www.khronos.org/registry/OpenGL/extensions/NV/NV_fragment_program.txt
OS << " gl_FragDepth = r1.z;\n";
}
else
{
//Input not declared. Leave commented to assist in debugging the shader
OS << " //gl_FragDepth = r1.z;\n";
}
}
OS << "}\n";
}
void GLFragmentDecompilerThread::Task()
{
m_shader = Decompile();
}
GLFragmentProgram::GLFragmentProgram() = default;
GLFragmentProgram::~GLFragmentProgram()
{
Delete();
}
void GLFragmentProgram::Decompile(const RSXFragmentProgram& prog)
{
u32 size;
std::string source;
GLFragmentDecompilerThread decompiler(source, parr, prog, size);
if (g_cfg.video.shader_precision == gpu_preset_level::low)
{
const auto driver_caps = gl::get_driver_caps();
decompiler.device_props.has_native_half_support = driver_caps.NV_gpu_shader5_supported || driver_caps.AMD_gpu_shader_half_float_supported;
decompiler.device_props.has_low_precision_rounding = driver_caps.vendor_NVIDIA;
}
decompiler.Task();
for (const ParamType& PT : decompiler.m_parr.params[PF_PARAM_UNIFORM])
{
for (const ParamItem& PI : PT.items)
{
if (PT.type == "sampler1D" ||
PT.type == "sampler2D" ||
PT.type == "sampler3D" ||
PT.type == "samplerCube")
continue;
usz offset = atoi(PI.name.c_str() + 2);
FragmentConstantOffsetCache.push_back(offset);
}
}
shader.create(::glsl::program_domain::glsl_fragment_program, source);
id = shader.id();
}
void GLFragmentProgram::Delete()
{
shader.remove();
id = 0;
}
| 10,701
|
C++
|
.cpp
| 313
| 31.517572
| 140
| 0.679899
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,484
|
GLDMA.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/GL/GLDMA.cpp
|
#include "stdafx.h"
#include "GLDMA.h"
#include "Emu/Memory/vm.h"
namespace gl
{
static constexpr u32 s_dma_block_size = 0x10000;
static constexpr u32 s_dma_block_mask = ~(s_dma_block_size - 1);
std::unordered_map<u32, std::unique_ptr<dma_block>> g_dma_pool;
void dma_block::allocate(u32 base_address, u32 block_size)
{
// Since this is a userptr block, we don't need to move data around on resize. Just "claim" a different chunk and move on.
if (m_data)
{
m_data->remove();
}
void* userptr = vm::get_super_ptr(base_address);
m_data = std::make_unique<gl::buffer>();
m_data->create(buffer::target::array, block_size, userptr, buffer::memory_type::userptr, 0);
m_base_address = base_address;
// Some drivers may reject userptr input for whatever reason. Check that the state is still valid.
gl::check_state();
}
void* dma_block::map(const utils::address_range& range) const
{
ensure(range.inside(this->range()));
return vm::get_super_ptr(range.start);
}
void dma_block::resize(u32 new_length)
{
if (new_length <= length())
{
return;
}
allocate(m_base_address, new_length);
}
void dma_block::set_parent(const dma_block* other)
{
ensure(this->range().inside(other->range()));
ensure(other != this);
m_parent = other;
if (m_data)
{
m_data->remove();
m_data.reset();
}
}
bool dma_block::can_map(const utils::address_range& range) const
{
if (m_parent)
{
return m_parent->can_map(range);
}
return range.inside(this->range());
}
void clear_dma_resources()
{
g_dma_pool.clear();
}
utils::address_range to_dma_block_range(u32 start, u32 length)
{
const auto start_block_address = start & s_dma_block_mask;
const auto end_block_address = (start + length + s_dma_block_size - 1) & s_dma_block_mask;
return utils::address_range::start_end(start_block_address, end_block_address);
}
const dma_block& get_block(u32 start, u32 length)
{
const auto block_range = to_dma_block_range(start, length);
auto& block = g_dma_pool[block_range.start];
if (!block)
{
block = std::make_unique<dma_block>();
block->allocate(block_range.start, block_range.length());
return *block;
}
const auto range = utils::address_range::start_length(start, length);
if (block->can_map(range)) [[ likely ]]
{
return *block;
}
const auto owner = block->head();
const auto new_length = (block_range.end + 1) - owner->base_addr();
const auto search_end = (block_range.end + 1);
// 1. Resize to new length
ensure((new_length & ~s_dma_block_mask) == 0);
auto new_owner = std::make_unique<dma_block>();
new_owner->allocate(owner->base_addr(), new_length);
// 2. Acquire all the extras
for (u32 id = owner->base_addr() + s_dma_block_size;
id < search_end;
id += s_dma_block_size)
{
ensure((id % s_dma_block_size) == 0);
g_dma_pool[id]->set_parent(new_owner.get());
}
block = std::move(new_owner);
return *block;
}
dma_mapping_handle map_dma(u32 guest_address, u32 length)
{
auto& block = get_block(guest_address, length);
return { guest_address - block.base_addr(), block.get() };
}
}
| 3,264
|
C++
|
.cpp
| 103
| 27.504854
| 125
| 0.656501
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,485
|
GLHelpers.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/GL/GLHelpers.cpp
|
#include "stdafx.h"
#include "GLHelpers.h"
#include "GLTexture.h"
#include "GLCompute.h"
#include "util/logs.hpp"
#include "../Common/simple_array.hpp"
#include <unordered_map>
namespace gl
{
std::unordered_map<u32, std::unique_ptr<gl::compute_task>> g_compute_tasks;
capabilities g_driver_caps;
void flush_command_queue(fence& fence_obj)
{
fence_obj.check_signaled();
}
GLenum draw_mode(rsx::primitive_type in)
{
switch (in)
{
case rsx::primitive_type::points: return GL_POINTS;
case rsx::primitive_type::lines: return GL_LINES;
case rsx::primitive_type::line_loop: return GL_LINE_LOOP;
case rsx::primitive_type::line_strip: return GL_LINE_STRIP;
case rsx::primitive_type::triangles: return GL_TRIANGLES;
case rsx::primitive_type::triangle_strip: return GL_TRIANGLE_STRIP;
case rsx::primitive_type::triangle_fan: return GL_TRIANGLE_FAN;
case rsx::primitive_type::quads: return GL_TRIANGLES;
case rsx::primitive_type::quad_strip: return GL_TRIANGLE_STRIP;
case rsx::primitive_type::polygon: return GL_TRIANGLES;
default:
fmt::throw_exception("unknown primitive type");
}
}
void destroy_compute_tasks()
{
for (auto& [key, prog] : g_compute_tasks)
{
prog->destroy();
}
g_compute_tasks.clear();
}
// https://www.khronos.org/opengl/wiki/Debug_Output
void APIENTRY log_debug(GLenum source, GLenum type, GLuint id,
GLenum severity, GLsizei /*length*/, const GLchar* message,
const void* /*user_param*/)
{
// Message source
std::string str_source;
switch (source)
{
// Calls to the OpenGL API
case GL_DEBUG_SOURCE_API:
str_source = "API";
break;
// Calls to a window-system API
case GL_DEBUG_SOURCE_WINDOW_SYSTEM:
str_source = "WINDOW_SYSTEM";
break;
// A compiler for a shading language
case GL_DEBUG_SOURCE_SHADER_COMPILER:
str_source = "SHADER_COMPILER";
break;
// An application associated with OpenGL
case GL_DEBUG_SOURCE_THIRD_PARTY:
str_source = "THIRD_PARTY";
break;
// Generated by the user of this application
case GL_DEBUG_SOURCE_APPLICATION:
str_source = "APPLICATION";
break;
// Some source that isn't one of these
case GL_DEBUG_SOURCE_OTHER:
str_source = "OTHER";
break;
// Not on documentation
default:
str_source = "UNKNOWN";
rsx_log.error("log_debug(source=%d): Unknown message source", source);
}
// Message type
std::string str_type;
switch (type)
{
// An error, typically from the API
case GL_DEBUG_TYPE_ERROR:
str_type = "ERROR";
break;
// Some behavior marked deprecated has been used
case GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR:
str_type = "DEPRECATED";
break;
// Something has invoked undefined behavior
case GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR:
str_type = "UNDEFINED";
break;
// Some functionality the user relies upon is not portable
case GL_DEBUG_TYPE_PORTABILITY:
str_type = "PORTABILITY";
break;
// Code has triggered possible performance issues
case GL_DEBUG_TYPE_PERFORMANCE:
str_type = "PERFORMANCE";
break;
// Command stream annotation
case GL_DEBUG_TYPE_MARKER:
str_type = "MARKER";
break;
// Group pushing
case GL_DEBUG_TYPE_PUSH_GROUP:
str_type = "PUSH_GROUP";
break;
// foo
case GL_DEBUG_TYPE_POP_GROUP:
str_type = "POP_GROUP";
break;
// Some type that isn't one of these
case GL_DEBUG_TYPE_OTHER:
str_type = "OTHER";
break;
// Not on documentation
default:
str_type = "UNKNOWN";
rsx_log.error("log_debug(type=%d): Unknown message type", type);
}
switch (severity)
{
// All OpenGL Errors, shader compilation/linking errors, or highly-dangerous undefined behavior
case GL_DEBUG_SEVERITY_HIGH:
// Major performance warnings, shader compilation/linking warnings, or the use of deprecated functionality
case GL_DEBUG_SEVERITY_MEDIUM:
rsx_log.error("[DEBUG_OUTPUT] [%s] [%s] [%d]: %s", str_source, str_type, id, message);
return;
// Redundant state change performance warning, or unimportant undefined behavior
case GL_DEBUG_SEVERITY_LOW:
rsx_log.warning("[DEBUG_OUTPUT] [%s] [%s] [%d]: %s", str_source, str_type, id, message);
return;
// Anything that isn't an error or performance issue
case GL_DEBUG_SEVERITY_NOTIFICATION:
rsx_log.notice("[DEBUG_OUTPUT] [%s] [%s] [%d]: %s", str_source, str_type, id, message);
return;
// Not on documentation
default:
rsx_log.error("log_debug(severity=%d): Unknown severity level", severity);
rsx_log.error("[DEBUG_OUTPUT] [%s] [%s] [%d]: %s", str_source, str_type, id, message);
return;
}
}
void enable_debugging()
{
glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS);
glDebugMessageCallback(log_debug, nullptr);
}
const capabilities& get_driver_caps()
{
if (!g_driver_caps.initialized)
g_driver_caps.initialize();
return g_driver_caps;
}
bool is_primitive_native(rsx::primitive_type in)
{
switch (in)
{
case rsx::primitive_type::points:
case rsx::primitive_type::lines:
case rsx::primitive_type::line_loop:
case rsx::primitive_type::line_strip:
case rsx::primitive_type::triangles:
case rsx::primitive_type::triangle_strip:
case rsx::primitive_type::triangle_fan:
case rsx::primitive_type::quad_strip:
return true;
case rsx::primitive_type::quads:
case rsx::primitive_type::polygon:
return false;
default:
fmt::throw_exception("unknown primitive type");
}
}
}
| 5,386
|
C++
|
.cpp
| 179
| 26.949721
| 108
| 0.712553
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,486
|
GLGSRender.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/GL/GLGSRender.cpp
|
#include "stdafx.h"
#include "../Overlays/overlay_compile_notification.h"
#include "../Overlays/Shaders/shader_loading_dialog_native.h"
#include "GLGSRender.h"
#include "GLCompute.h"
#include "GLDMA.h"
#include "Emu/Memory/vm_locking.h"
#include "Emu/RSX/rsx_methods.h"
#include "Emu/RSX/Host/RSXDMAWriter.h"
#include "Emu/RSX/NV47/HW/context_accessors.define.h"
[[noreturn]] extern void report_fatal_error(std::string_view _text, bool is_html = false, bool include_help_text = true);
namespace
{
void throw_fatal(const std::vector<std::string>& reasons)
{
const std::string delimiter = "\n- ";
const std::string reasons_list = fmt::merge(reasons, delimiter);
const std::string message = fmt::format(
"OpenGL could not be initialized on this system for the following reason(s):\n"
"\n"
"- %s",
reasons_list);
Emu.BlockingCallFromMainThread([message]()
{
report_fatal_error(message, false, false);
});
}
}
u64 GLGSRender::get_cycles()
{
return thread_ctrl::get_cycles(static_cast<named_thread<GLGSRender>&>(*this));
}
GLGSRender::GLGSRender(utils::serial* ar) noexcept : GSRender(ar)
{
m_shaders_cache = std::make_unique<gl::shader_cache>(m_prog_buffer, "opengl", "v1.94");
if (g_cfg.video.disable_vertex_cache)
m_vertex_cache = std::make_unique<gl::null_vertex_cache>();
else
m_vertex_cache = std::make_unique<gl::weak_vertex_cache>();
backend_config.supports_hw_a2c = false;
backend_config.supports_hw_a2one = false;
backend_config.supports_multidraw = true;
backend_config.supports_normalized_barycentrics = true;
}
extern CellGcmContextData current_context;
void GLGSRender::set_viewport()
{
// NOTE: scale offset matrix already contains the viewport transformation
const auto [clip_width, clip_height] = rsx::apply_resolution_scale<true>(
rsx::method_registers.surface_clip_width(), rsx::method_registers.surface_clip_height());
glViewport(0, 0, clip_width, clip_height);
}
void GLGSRender::set_scissor(bool clip_viewport)
{
areau scissor;
if (get_scissor(scissor, clip_viewport))
{
// NOTE: window origin does not affect scissor region (probably only affects viewport matrix; already applied)
// See LIMBO [NPUB-30373] which uses shader window origin = top
glScissor(scissor.x1, scissor.y1, scissor.width(), scissor.height());
gl_state.enable(GL_TRUE, GL_SCISSOR_TEST);
}
}
void GLGSRender::on_init_thread()
{
ensure(m_frame);
// NOTES: All contexts have to be created before any is bound to a thread
// This allows context sharing to work (both GLRCs passed to wglShareLists have to be idle or you get ERROR_BUSY)
m_context = m_frame->make_context();
const auto shadermode = g_cfg.video.shadermode.get();
if (shadermode != shader_mode::recompiler)
{
auto context_create_func = [m_frame = m_frame]()
{
return m_frame->make_context();
};
auto context_bind_func = [m_frame = m_frame](draw_context_t ctx)
{
m_frame->set_current(ctx);
};
auto context_destroy_func = [m_frame = m_frame](draw_context_t ctx)
{
m_frame->delete_context(ctx);
};
gl::initialize_pipe_compiler(context_create_func, context_bind_func, context_destroy_func, g_cfg.video.shader_compiler_threads_count);
}
else
{
auto null_context_create_func = []() -> draw_context_t
{
return nullptr;
};
gl::initialize_pipe_compiler(null_context_create_func, {}, {}, 1);
}
// Bind primary context to main RSX thread
m_frame->set_current(m_context);
gl::set_primary_context_thread();
zcull_ctrl.reset(static_cast<::rsx::reports::ZCULL_control*>(this));
m_occlusion_type = g_cfg.video.precise_zpass_count ? GL_SAMPLES_PASSED : GL_ANY_SAMPLES_PASSED;
gl::init();
gl::set_command_context(gl_state);
// Enable adaptive vsync if vsync is requested
gl::set_swapinterval(g_cfg.video.vsync ? -1 : 0);
if (g_cfg.video.debug_output)
gl::enable_debugging();
rsx_log.success("GL RENDERER: %s (%s)", reinterpret_cast<const char*>(glGetString(GL_RENDERER)), reinterpret_cast<const char*>(glGetString(GL_VENDOR)));
rsx_log.success("GL VERSION: %s", reinterpret_cast<const char*>(glGetString(GL_VERSION)));
rsx_log.success("GLSL VERSION: %s", reinterpret_cast<const char*>(glGetString(GL_SHADING_LANGUAGE_VERSION)));
auto& gl_caps = gl::get_driver_caps();
std::vector<std::string> exception_reasons;
if (!gl_caps.ARB_texture_buffer_supported)
{
exception_reasons.push_back("GL_ARB_texture_buffer_object is required but not supported by your GPU");
}
if (!gl_caps.ARB_dsa_supported && !gl_caps.EXT_dsa_supported)
{
exception_reasons.push_back("GL_ARB_direct_state_access or GL_EXT_direct_state_access is required but not supported by your GPU");
}
if (!exception_reasons.empty())
{
throw_fatal(exception_reasons);
}
if (!gl_caps.ARB_depth_buffer_float_supported && g_cfg.video.force_high_precision_z_buffer)
{
rsx_log.warning("High precision Z buffer requested but your GPU does not support GL_ARB_depth_buffer_float. Option ignored.");
}
if (!gl_caps.ARB_texture_barrier_supported && !gl_caps.NV_texture_barrier_supported && !g_cfg.video.strict_rendering_mode)
{
rsx_log.warning("Texture barriers are not supported by your GPU. Feedback loops will have undefined results.");
}
if (!gl_caps.ARB_bindless_texture_supported)
{
switch (shadermode)
{
case shader_mode::async_with_interpreter:
case shader_mode::interpreter_only:
rsx_log.error("Bindless texture extension required for shader interpreter is not supported on your GPU. Will use async recompiler as a fallback.");
g_cfg.video.shadermode.set(shader_mode::async_recompiler);
break;
default:
break;
}
}
if (gl_caps.NV_fragment_shader_barycentric_supported &&
gl_caps.vendor_NVIDIA &&
g_cfg.video.shader_precision != gpu_preset_level::low)
{
// NVIDIA's attribute interpolation requires some workarounds
backend_config.supports_normalized_barycentrics = false;
}
if (gl_caps.AMD_pinned_memory && g_cfg.video.host_label_synchronization)
{
backend_config.supports_host_gpu_labels = true;
m_host_gpu_context_data = std::make_unique<gl::buffer>();
m_host_gpu_context_data->create(gl::buffer::target::array, 4096, nullptr, gl::buffer::memory_type::host_visible,
gl::buffer::usage::host_read | gl::buffer::usage::host_write | gl::buffer::usage::persistent_map);
auto host_context_ptr = reinterpret_cast<rsx::host_gpu_context_t*>(m_host_gpu_context_data->map(0, 4096, gl::buffer::access::persistent_rw));
m_host_dma_ctrl = std::make_unique<rsx::RSXDMAWriter>(host_context_ptr);
m_enqueued_host_write_buffer = std::make_unique<gl::scratch_ring_buffer>();
m_enqueued_host_write_buffer->create(gl::buffer::target::array, 64 * 0x100000, gl::buffer::usage::dynamic_update);
}
// Use industry standard resource alignment values as defaults
m_uniform_buffer_offset_align = 256;
m_min_texbuffer_alignment = 256;
m_max_texbuffer_size = 0;
glEnable(GL_VERTEX_PROGRAM_POINT_SIZE);
glGetIntegerv(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT, &m_uniform_buffer_offset_align);
glGetIntegerv(GL_TEXTURE_BUFFER_OFFSET_ALIGNMENT, &m_min_texbuffer_alignment);
glGetIntegerv(GL_MAX_TEXTURE_BUFFER_SIZE, &m_max_texbuffer_size);
m_vao.create();
// Set min alignment to 16-bytes for SSE optimizations with aligned addresses to work
m_min_texbuffer_alignment = std::max(m_min_texbuffer_alignment, 16);
m_uniform_buffer_offset_align = std::max(m_uniform_buffer_offset_align, 16);
rsx_log.notice("Supported texel buffer size reported: %d bytes", m_max_texbuffer_size);
if (m_max_texbuffer_size < (16 * 0x100000))
{
rsx_log.error("Max texture buffer size supported is less than 16M which is useless. Expect undefined behaviour.");
m_max_texbuffer_size = (16 * 0x100000);
}
// Array stream buffer
{
m_gl_persistent_stream_buffer = std::make_unique<gl::texture>(GL_TEXTURE_BUFFER, 0, 0, 0, 0, GL_R8UI);
gl_state.bind_texture(GL_STREAM_BUFFER_START + 0, GL_TEXTURE_BUFFER, m_gl_persistent_stream_buffer->id());
}
// Register stream buffer
{
m_gl_volatile_stream_buffer = std::make_unique<gl::texture>(GL_TEXTURE_BUFFER, 0, 0, 0, 0, GL_R8UI);
gl_state.bind_texture(GL_STREAM_BUFFER_START + 1, GL_TEXTURE_BUFFER, m_gl_volatile_stream_buffer->id());
}
// Fallback null texture instead of relying on texture0
{
std::array<u32, 8> pixeldata = { 0, 0, 0, 0, 0, 0, 0, 0 };
// 1D
auto tex1D = std::make_unique<gl::texture>(GL_TEXTURE_1D, 1, 1, 1, 1, GL_RGBA8);
tex1D->copy_from(pixeldata.data(), gl::texture::format::rgba, gl::texture::type::uint_8_8_8_8, {});
// 2D
auto tex2D = std::make_unique<gl::texture>(GL_TEXTURE_2D, 1, 1, 1, 1, GL_RGBA8);
tex2D->copy_from(pixeldata.data(), gl::texture::format::rgba, gl::texture::type::uint_8_8_8_8, {});
// 3D
auto tex3D = std::make_unique<gl::texture>(GL_TEXTURE_3D, 1, 1, 1, 1, GL_RGBA8);
tex3D->copy_from(pixeldata.data(), gl::texture::format::rgba, gl::texture::type::uint_8_8_8_8, {});
// CUBE
auto texCUBE = std::make_unique<gl::texture>(GL_TEXTURE_CUBE_MAP, 1, 1, 1, 1, GL_RGBA8);
texCUBE->copy_from(pixeldata.data(), gl::texture::format::rgba, gl::texture::type::uint_8_8_8_8, {});
m_null_textures[GL_TEXTURE_1D] = std::move(tex1D);
m_null_textures[GL_TEXTURE_2D] = std::move(tex2D);
m_null_textures[GL_TEXTURE_3D] = std::move(tex3D);
m_null_textures[GL_TEXTURE_CUBE_MAP] = std::move(texCUBE);
}
if (!gl_caps.ARB_buffer_storage_supported)
{
rsx_log.warning("Forcing use of legacy OpenGL buffers because ARB_buffer_storage is not supported");
// TODO: do not modify config options
g_cfg.video.renderdoc_compatiblity.from_string("true");
}
if (g_cfg.video.renderdoc_compatiblity)
{
rsx_log.warning("Using legacy openGL buffers.");
manually_flush_ring_buffers = true;
m_attrib_ring_buffer = std::make_unique<gl::legacy_ring_buffer>();
m_transform_constants_buffer = std::make_unique<gl::legacy_ring_buffer>();
m_fragment_constants_buffer = std::make_unique<gl::legacy_ring_buffer>();
m_fragment_env_buffer = std::make_unique<gl::legacy_ring_buffer>();
m_vertex_env_buffer = std::make_unique<gl::legacy_ring_buffer>();
m_texture_parameters_buffer = std::make_unique<gl::legacy_ring_buffer>();
m_vertex_layout_buffer = std::make_unique<gl::legacy_ring_buffer>();
m_index_ring_buffer = std::make_unique<gl::legacy_ring_buffer>();
m_vertex_instructions_buffer = std::make_unique<gl::legacy_ring_buffer>();
m_fragment_instructions_buffer = std::make_unique<gl::legacy_ring_buffer>();
m_raster_env_ring_buffer = std::make_unique<gl::legacy_ring_buffer>();
m_scratch_ring_buffer = std::make_unique<gl::legacy_ring_buffer>();
}
else
{
m_attrib_ring_buffer = std::make_unique<gl::ring_buffer>();
m_transform_constants_buffer = std::make_unique<gl::ring_buffer>();
m_fragment_constants_buffer = std::make_unique<gl::ring_buffer>();
m_fragment_env_buffer = std::make_unique<gl::ring_buffer>();
m_vertex_env_buffer = std::make_unique<gl::ring_buffer>();
m_texture_parameters_buffer = std::make_unique<gl::ring_buffer>();
m_vertex_layout_buffer = std::make_unique<gl::ring_buffer>();
m_index_ring_buffer = gl_caps.vendor_AMD ? std::make_unique<gl::transient_ring_buffer>() : std::make_unique<gl::ring_buffer>();
m_vertex_instructions_buffer = std::make_unique<gl::ring_buffer>();
m_fragment_instructions_buffer = std::make_unique<gl::ring_buffer>();
m_raster_env_ring_buffer = std::make_unique<gl::ring_buffer>();
m_scratch_ring_buffer = std::make_unique<gl::ring_buffer>();
}
m_attrib_ring_buffer->create(gl::buffer::target::texture, 256 * 0x100000);
m_index_ring_buffer->create(gl::buffer::target::element_array, 16 * 0x100000);
m_transform_constants_buffer->create(gl::buffer::target::uniform, 64 * 0x100000);
m_fragment_constants_buffer->create(gl::buffer::target::uniform, 16 * 0x100000);
m_fragment_env_buffer->create(gl::buffer::target::uniform, 16 * 0x100000);
m_vertex_env_buffer->create(gl::buffer::target::uniform, 16 * 0x100000);
m_texture_parameters_buffer->create(gl::buffer::target::uniform, 16 * 0x100000);
m_vertex_layout_buffer->create(gl::buffer::target::uniform, 16 * 0x100000);
m_raster_env_ring_buffer->create(gl::buffer::target::uniform, 16 * 0x100000);
m_scratch_ring_buffer->create(gl::buffer::target::uniform, 16 * 0x100000);
if (shadermode == shader_mode::async_with_interpreter || shadermode == shader_mode::interpreter_only)
{
m_vertex_instructions_buffer->create(gl::buffer::target::ssbo, 16 * 0x100000);
m_fragment_instructions_buffer->create(gl::buffer::target::ssbo, 16 * 0x100000);
m_shader_interpreter.create();
}
if (gl_caps.vendor_AMD)
{
// Initialize with 256k identity entries
std::vector<u32> dst(256 * 1024);
for (u32 n = 0; n < (0x100000 >> 2); ++n)
{
dst[n] = n;
}
m_identity_index_buffer = std::make_unique<gl::buffer>();
m_identity_index_buffer->create(gl::buffer::target::element_array,dst.size() * sizeof(u32), dst.data(), gl::buffer::memory_type::local);
}
else if (gl_caps.vendor_NVIDIA)
{
// NOTE: On NVIDIA cards going back decades (including the PS3) there is a slight normalization inaccuracy in compressed formats.
// Confirmed in BLES01916 (The Evil Within) which uses RGB565 for some virtual texturing data.
backend_config.supports_hw_renormalization = true;
}
m_persistent_stream_view.update(m_attrib_ring_buffer.get(), 0, std::min<u32>(static_cast<u32>(m_attrib_ring_buffer->size()), m_max_texbuffer_size));
m_volatile_stream_view.update(m_attrib_ring_buffer.get(), 0, std::min<u32>(static_cast<u32>(m_attrib_ring_buffer->size()), m_max_texbuffer_size));
m_gl_persistent_stream_buffer->copy_from(m_persistent_stream_view);
m_gl_volatile_stream_buffer->copy_from(m_volatile_stream_view);
m_vao.element_array_buffer = *m_index_ring_buffer;
int image_unit = 0;
for (auto &sampler : m_fs_sampler_states)
{
sampler.create();
sampler.bind(image_unit++);
}
for (auto &sampler : m_vs_sampler_states)
{
sampler.create();
sampler.bind(image_unit++);
}
for (auto& sampler : m_fs_sampler_mirror_states)
{
sampler.create();
sampler.apply_defaults();
sampler.bind(image_unit++);
}
//Occlusion query
for (u32 i = 0; i < rsx::reports::occlusion_query_count; ++i)
{
GLuint handle = 0;
auto &query = m_occlusion_query_data[i];
glGenQueries(1, &handle);
query.driver_handle = handle;
query.pending = false;
query.active = false;
query.result = 0;
}
m_ui_renderer.create();
m_video_output_pass.create();
m_gl_texture_cache.initialize();
m_prog_buffer.initialize
(
[this](void* const& props, const RSXVertexProgram& vp, const RSXFragmentProgram& fp)
{
// Program was linked or queued for linking
m_shaders_cache->store(props, vp, fp);
}
);
if (!m_overlay_manager)
{
m_frame->hide();
m_shaders_cache->load(nullptr);
m_frame->show();
}
else
{
rsx::shader_loading_dialog_native dlg(this);
m_shaders_cache->load(&dlg);
}
}
void GLGSRender::on_exit()
{
// Destroy internal RSX state, may call upon this->do_local_task
GSRender::on_exit();
// Globals
// TODO: Move these
gl::destroy_compute_tasks();
gl::destroy_overlay_passes();
gl::clear_dma_resources();
gl::destroy_global_texture_resources();
gl::debug::g_vis_texture.reset(); // TODO
gl::destroy_pipe_compiler();
m_prog_buffer.clear();
m_rtts.destroy();
m_host_dma_ctrl.reset();
m_host_gpu_context_data.reset();
m_enqueued_host_write_buffer.reset();
for (auto &fbo : m_framebuffer_cache)
{
fbo.remove();
}
m_framebuffer_cache.clear();
m_upscaler.reset();
for (auto& flip_tex_image : m_flip_tex_color)
{
flip_tex_image.reset();
}
if (m_vao)
{
m_vao.remove();
}
m_gl_persistent_stream_buffer.reset();
m_gl_volatile_stream_buffer.reset();
for (auto &sampler : m_fs_sampler_states)
{
sampler.remove();
}
for (auto &sampler : m_fs_sampler_mirror_states)
{
sampler.remove();
}
for (auto &sampler : m_vs_sampler_states)
{
sampler.remove();
}
if (m_attrib_ring_buffer)
{
m_attrib_ring_buffer->remove();
}
if (m_transform_constants_buffer)
{
m_transform_constants_buffer->remove();
}
if (m_fragment_constants_buffer)
{
m_fragment_constants_buffer->remove();
}
if (m_fragment_env_buffer)
{
m_fragment_env_buffer->remove();
}
if (m_vertex_env_buffer)
{
m_vertex_env_buffer->remove();
}
if (m_texture_parameters_buffer)
{
m_texture_parameters_buffer->remove();
}
if (m_vertex_layout_buffer)
{
m_vertex_layout_buffer->remove();
}
if (m_index_ring_buffer)
{
m_index_ring_buffer->remove();
}
if (m_identity_index_buffer)
{
m_identity_index_buffer->remove();
}
if (m_vertex_instructions_buffer)
{
m_vertex_instructions_buffer->remove();
}
if (m_fragment_instructions_buffer)
{
m_fragment_instructions_buffer->remove();
}
if (m_raster_env_ring_buffer)
{
m_raster_env_ring_buffer->remove();
}
if (m_scratch_ring_buffer)
{
m_scratch_ring_buffer->remove();
}
m_null_textures.clear();
m_gl_texture_cache.destroy();
m_ui_renderer.destroy();
m_video_output_pass.destroy();
m_shader_interpreter.destroy();
for (u32 i = 0; i < rsx::reports::occlusion_query_count; ++i)
{
auto &query = m_occlusion_query_data[i];
query.active = false;
query.pending = false;
GLuint handle = query.driver_handle;
glDeleteQueries(1, &handle);
query.driver_handle = 0;
}
zcull_ctrl.release();
gl::set_primary_context_thread(false);
}
void GLGSRender::clear_surface(u32 arg)
{
if (skip_current_frame) return;
// If stencil write mask is disabled, remove clear_stencil bit
if (!rsx::method_registers.stencil_mask()) arg &= ~RSX_GCM_CLEAR_STENCIL_BIT;
// Ignore invalid clear flags
if ((arg & RSX_GCM_CLEAR_ANY_MASK) == 0) return;
u8 ctx = rsx::framebuffer_creation_context::context_draw;
if (arg & RSX_GCM_CLEAR_COLOR_RGBA_MASK) ctx |= rsx::framebuffer_creation_context::context_clear_color;
if (arg & RSX_GCM_CLEAR_DEPTH_STENCIL_MASK) ctx |= rsx::framebuffer_creation_context::context_clear_depth;
init_buffers(static_cast<rsx::framebuffer_creation_context>(ctx), true);
if (!m_graphics_state.test(rsx::rtt_config_valid)) return;
gl::clear_cmd_info clear_cmd{};
gl::command_context cmd{ gl_state };
const bool full_frame =
rsx::method_registers.scissor_origin_x() == 0 &&
rsx::method_registers.scissor_origin_y() == 0 &&
rsx::method_registers.scissor_width() >= rsx::method_registers.surface_clip_width() &&
rsx::method_registers.scissor_height() >= rsx::method_registers.surface_clip_height();
bool update_color = false, update_z = false;
rsx::surface_depth_format2 surface_depth_format = rsx::method_registers.surface_depth_fmt();
if (auto ds = std::get<1>(m_rtts.m_bound_depth_stencil); arg & RSX_GCM_CLEAR_DEPTH_STENCIL_MASK)
{
if (arg & RSX_GCM_CLEAR_DEPTH_BIT)
{
u32 max_depth_value = get_max_depth_value(surface_depth_format);
u32 clear_depth = rsx::method_registers.z_clear_value(is_depth_stencil_format(surface_depth_format));
clear_cmd.clear_depth.value = f32(clear_depth) / max_depth_value;
clear_cmd.aspect_mask |= gl::image_aspect::depth;
}
if (is_depth_stencil_format(surface_depth_format))
{
if (arg & RSX_GCM_CLEAR_STENCIL_BIT)
{
clear_cmd.clear_stencil.mask = rsx::method_registers.stencil_mask();
clear_cmd.clear_stencil.value = rsx::method_registers.stencil_clear_value();
clear_cmd.aspect_mask |= gl::image_aspect::stencil;
}
if (const auto ds_mask = (arg & RSX_GCM_CLEAR_DEPTH_STENCIL_MASK);
ds_mask != RSX_GCM_CLEAR_DEPTH_STENCIL_MASK || !full_frame)
{
ensure(clear_cmd.aspect_mask);
if (ds->state_flags & rsx::surface_state_flags::erase_bkgnd && // Needs initialization
ds->old_contents.empty() && !g_cfg.video.read_depth_buffer) // No way to load data from memory, so no initialization given
{
// Only one aspect was cleared. Make sure to memory initialize the other before removing dirty flag
if (ds_mask == RSX_GCM_CLEAR_DEPTH_BIT)
{
// Depth was cleared, initialize stencil
clear_cmd.clear_stencil.mask = 0xff;
clear_cmd.clear_stencil.value = 0xff;
clear_cmd.aspect_mask |= gl::image_aspect::stencil;
}
else if (ds_mask == RSX_GCM_CLEAR_STENCIL_BIT)
{
// Stencil was cleared, initialize depth
clear_cmd.clear_depth.value = 1.f;
clear_cmd.aspect_mask |= gl::image_aspect::depth;
}
}
else
{
ds->write_barrier(cmd);
}
}
}
if (clear_cmd.aspect_mask)
{
// Memory has been initialized
update_z = true;
}
}
if (auto colormask = (arg & 0xf0))
{
u8 clear_a = rsx::method_registers.clear_color_a();
u8 clear_r = rsx::method_registers.clear_color_r();
u8 clear_g = rsx::method_registers.clear_color_g();
u8 clear_b = rsx::method_registers.clear_color_b();
switch (rsx::method_registers.surface_color())
{
case rsx::surface_color_format::x32:
case rsx::surface_color_format::w16z16y16x16:
case rsx::surface_color_format::w32z32y32x32:
{
// Nop
colormask = 0;
break;
}
case rsx::surface_color_format::b8:
{
rsx::get_b8_clear_color(clear_r, clear_g, clear_b, clear_a);
colormask = rsx::get_b8_clearmask(colormask);
break;
}
case rsx::surface_color_format::g8b8:
{
rsx::get_g8b8_clear_color(clear_r, clear_g, clear_b, clear_a);
colormask = rsx::get_g8b8_r8g8_clearmask(colormask);
break;
}
case rsx::surface_color_format::r5g6b5:
{
rsx::get_rgb565_clear_color(clear_r, clear_g, clear_b, clear_a);
break;
}
case rsx::surface_color_format::x1r5g5b5_o1r5g5b5:
{
rsx::get_a1rgb555_clear_color(clear_r, clear_g, clear_b, clear_a, 255);
break;
}
case rsx::surface_color_format::x1r5g5b5_z1r5g5b5:
{
rsx::get_a1rgb555_clear_color(clear_r, clear_g, clear_b, clear_a, 0);
break;
}
case rsx::surface_color_format::a8b8g8r8:
case rsx::surface_color_format::x8b8g8r8_o8b8g8r8:
case rsx::surface_color_format::x8b8g8r8_z8b8g8r8:
{
rsx::get_abgr8_clear_color(clear_r, clear_g, clear_b, clear_a);
colormask = rsx::get_abgr8_clearmask(colormask);
break;
}
default:
{
break;
}
}
if (colormask)
{
clear_cmd.clear_color.mask = colormask;
clear_cmd.clear_color.attachment_count = static_cast<u8>(m_rtts.m_bound_render_target_ids.size());
clear_cmd.clear_color.r = clear_r;
clear_cmd.clear_color.g = clear_g;
clear_cmd.clear_color.b = clear_b;
clear_cmd.clear_color.a = clear_a;
clear_cmd.aspect_mask |= gl::image_aspect::color;
if (!full_frame)
{
for (const auto& index : m_rtts.m_bound_render_target_ids)
{
m_rtts.m_bound_render_targets[index].second->write_barrier(cmd);
}
}
update_color = true;
}
}
if (update_color || update_z)
{
m_rtts.on_write({ update_color, update_color, update_color, update_color }, update_z);
}
if (!full_frame)
{
gl_state.enable(GL_SCISSOR_TEST);
}
gl::clear_attachments(cmd, clear_cmd);
}
bool GLGSRender::load_program()
{
const auto shadermode = g_cfg.video.shadermode.get();
if (m_graphics_state & rsx::pipeline_state::invalidate_pipeline_bits)
{
get_current_fragment_program(fs_sampler_state);
ensure(current_fragment_program.valid);
get_current_vertex_program(vs_sampler_state);
}
else if (m_program)
{
if (!m_shader_interpreter.is_interpreter(m_program)) [[likely]]
{
return true;
}
if (shadermode == shader_mode::interpreter_only)
{
m_program = m_shader_interpreter.get(current_fp_metadata);
return true;
}
}
const bool was_interpreter = m_shader_interpreter.is_interpreter(m_program);
m_vertex_prog = nullptr;
m_fragment_prog = nullptr;
if (shadermode != shader_mode::interpreter_only) [[likely]]
{
void* pipeline_properties = nullptr;
std::tie(m_program, m_vertex_prog, m_fragment_prog) = m_prog_buffer.get_graphics_pipeline(current_vertex_program, current_fragment_program, pipeline_properties,
shadermode != shader_mode::recompiler, true);
if (m_prog_buffer.check_cache_missed())
{
// Notify the user with HUD notification
if (g_cfg.misc.show_shader_compilation_hint)
{
rsx::overlays::show_shader_compile_notification();
}
}
else
{
ensure(m_program);
m_program->sync();
}
}
else
{
m_program = nullptr;
}
if (!m_program && (shadermode == shader_mode::async_with_interpreter || shadermode == shader_mode::interpreter_only))
{
// Fall back to interpreter
m_program = m_shader_interpreter.get(current_fp_metadata);
if (was_interpreter != m_shader_interpreter.is_interpreter(m_program))
{
// Program has changed, reupload
m_interpreter_state = rsx::invalidate_pipeline_bits;
}
}
return m_program != nullptr;
}
void GLGSRender::load_program_env()
{
if (!m_program)
{
fmt::throw_exception("Unreachable right now");
}
const u32 fragment_constants_size = current_fp_metadata.program_constants_buffer_length;
const bool update_transform_constants = m_graphics_state & rsx::pipeline_state::transform_constants_dirty;
const bool update_fragment_constants = (m_graphics_state & rsx::pipeline_state::fragment_constants_dirty) && fragment_constants_size;
const bool update_vertex_env = m_graphics_state & rsx::pipeline_state::vertex_state_dirty;
const bool update_fragment_env = m_graphics_state & rsx::pipeline_state::fragment_state_dirty;
const bool update_fragment_texture_env = m_graphics_state & rsx::pipeline_state::fragment_texture_state_dirty;
const bool update_instruction_buffers = !!m_interpreter_state && m_shader_interpreter.is_interpreter(m_program);
const bool update_raster_env = rsx::method_registers.polygon_stipple_enabled() && (m_graphics_state & rsx::pipeline_state::polygon_stipple_pattern_dirty);
if (manually_flush_ring_buffers)
{
if (update_fragment_env) m_fragment_env_buffer->reserve_storage_on_heap(128);
if (update_vertex_env) m_vertex_env_buffer->reserve_storage_on_heap(256);
if (update_fragment_texture_env) m_texture_parameters_buffer->reserve_storage_on_heap(256);
if (update_fragment_constants) m_fragment_constants_buffer->reserve_storage_on_heap(utils::align(fragment_constants_size, 256));
if (update_transform_constants) m_transform_constants_buffer->reserve_storage_on_heap(8192);
if (update_raster_env) m_raster_env_ring_buffer->reserve_storage_on_heap(128);
if (update_instruction_buffers)
{
m_vertex_instructions_buffer->reserve_storage_on_heap(513 * 16);
m_fragment_instructions_buffer->reserve_storage_on_heap(current_fp_metadata.program_ucode_length);
}
}
if (update_vertex_env)
{
// Vertex state
auto mapping = m_vertex_env_buffer->alloc_from_heap(144, m_uniform_buffer_offset_align);
auto buf = static_cast<u8*>(mapping.first);
fill_scale_offset_data(buf, false);
fill_user_clip_data(buf + 64);
*(reinterpret_cast<u32*>(buf + 128)) = rsx::method_registers.transform_branch_bits();
*(reinterpret_cast<f32*>(buf + 132)) = rsx::method_registers.point_size() * rsx::get_resolution_scale();
*(reinterpret_cast<f32*>(buf + 136)) = rsx::method_registers.clip_min();
*(reinterpret_cast<f32*>(buf + 140)) = rsx::method_registers.clip_max();
m_vertex_env_buffer->bind_range(GL_VERTEX_PARAMS_BIND_SLOT, mapping.second, 144);
}
if (update_transform_constants)
{
// Vertex constants
u32 mem_offset = 0;
auto mem_alloc = [&](usz size) -> std::pair<void*, usz>
{
const auto mapping = m_transform_constants_buffer->alloc_from_heap(static_cast<u32>(size), m_uniform_buffer_offset_align);
mem_offset = mapping.second;
return { mapping.first, size };
};
rsx::io_buffer io_buf(mem_alloc);
upload_transform_constants(io_buf);
if (!io_buf.empty())
{
m_transform_constants_buffer->bind_range(GL_VERTEX_CONSTANT_BUFFERS_BIND_SLOT, mem_offset, ::size32(io_buf));
}
}
if (update_fragment_constants && !update_instruction_buffers)
{
// Fragment constants
auto mapping = m_fragment_constants_buffer->alloc_from_heap(fragment_constants_size, m_uniform_buffer_offset_align);
auto buf = static_cast<u8*>(mapping.first);
m_prog_buffer.fill_fragment_constants_buffer({ reinterpret_cast<float*>(buf), fragment_constants_size },
*ensure(m_fragment_prog), current_fragment_program, true);
m_fragment_constants_buffer->bind_range(GL_FRAGMENT_CONSTANT_BUFFERS_BIND_SLOT, mapping.second, fragment_constants_size);
}
if (update_fragment_env)
{
// Fragment state
auto mapping = m_fragment_env_buffer->alloc_from_heap(32, m_uniform_buffer_offset_align);
auto buf = static_cast<u8*>(mapping.first);
fill_fragment_state_buffer(buf, current_fragment_program);
m_fragment_env_buffer->bind_range(GL_FRAGMENT_STATE_BIND_SLOT, mapping.second, 32);
}
if (update_fragment_texture_env)
{
// Fragment texture parameters
auto mapping = m_texture_parameters_buffer->alloc_from_heap(768, m_uniform_buffer_offset_align);
current_fragment_program.texture_params.write_to(mapping.first, current_fp_metadata.referenced_textures_mask);
m_texture_parameters_buffer->bind_range(GL_FRAGMENT_TEXTURE_PARAMS_BIND_SLOT, mapping.second, 768);
}
if (update_raster_env)
{
auto mapping = m_raster_env_ring_buffer->alloc_from_heap(128, m_uniform_buffer_offset_align);
std::memcpy(mapping.first, rsx::method_registers.polygon_stipple_pattern(), 128);
m_raster_env_ring_buffer->bind_range(GL_RASTERIZER_STATE_BIND_SLOT, mapping.second, 128);
m_graphics_state.clear(rsx::pipeline_state::polygon_stipple_pattern_dirty);
}
if (update_instruction_buffers)
{
if (m_interpreter_state & rsx::vertex_program_dirty)
{
// Attach vertex buffer data
const auto vp_block_length = current_vp_metadata.ucode_length + 16;
auto vp_mapping = m_vertex_instructions_buffer->alloc_from_heap(vp_block_length, 16);
auto vp_buf = static_cast<u8*>(vp_mapping.first);
auto vp_config = reinterpret_cast<u32*>(vp_buf);
vp_config[0] = current_vertex_program.base_address;
vp_config[1] = current_vertex_program.entry;
vp_config[2] = current_vertex_program.output_mask;
vp_config[3] = rsx::method_registers.two_side_light_en() ? 1u : 0u;
std::memcpy(vp_buf + 16, current_vertex_program.data.data(), current_vp_metadata.ucode_length);
m_vertex_instructions_buffer->bind_range(GL_INTERPRETER_VERTEX_BLOCK, vp_mapping.second, vp_block_length);
m_vertex_instructions_buffer->notify();
}
if (m_interpreter_state & rsx::fragment_program_dirty)
{
// Attach fragment buffer data
const auto fp_block_length = current_fp_metadata.program_ucode_length + 80;
auto fp_mapping = m_fragment_instructions_buffer->alloc_from_heap(fp_block_length, 16);
auto fp_buf = static_cast<u8*>(fp_mapping.first);
// Control mask
const auto control_masks = reinterpret_cast<u32*>(fp_buf);
control_masks[0] = rsx::method_registers.shader_control();
control_masks[1] = current_fragment_program.texture_state.texture_dimensions;
// Bind textures
m_shader_interpreter.update_fragment_textures(fs_sampler_state, current_fp_metadata.referenced_textures_mask, reinterpret_cast<u32*>(fp_buf + 16));
std::memcpy(fp_buf + 80, current_fragment_program.get_data(), current_fragment_program.ucode_length);
m_fragment_instructions_buffer->bind_range(GL_INTERPRETER_FRAGMENT_BLOCK, fp_mapping.second, fp_block_length);
m_fragment_instructions_buffer->notify();
}
}
if (manually_flush_ring_buffers)
{
if (update_fragment_env) m_fragment_env_buffer->unmap();
if (update_vertex_env) m_vertex_env_buffer->unmap();
if (update_fragment_texture_env) m_texture_parameters_buffer->unmap();
if (update_fragment_constants) m_fragment_constants_buffer->unmap();
if (update_transform_constants) m_transform_constants_buffer->unmap();
if (update_raster_env) m_raster_env_ring_buffer->unmap();
if (update_instruction_buffers)
{
m_vertex_instructions_buffer->unmap();
m_fragment_instructions_buffer->unmap();
}
}
m_graphics_state.clear(
rsx::pipeline_state::fragment_state_dirty |
rsx::pipeline_state::vertex_state_dirty |
rsx::pipeline_state::transform_constants_dirty |
rsx::pipeline_state::fragment_constants_dirty |
rsx::pipeline_state::fragment_texture_state_dirty);
}
void GLGSRender::upload_transform_constants(const rsx::io_buffer& buffer)
{
const usz transform_constants_size = (!m_vertex_prog || m_vertex_prog->has_indexed_constants) ? 8192 : m_vertex_prog->constant_ids.size() * 16;
if (transform_constants_size)
{
const auto constant_ids = (transform_constants_size == 8192)
? std::span<const u16>{}
: std::span<const u16>(m_vertex_prog->constant_ids);
buffer.reserve(transform_constants_size);
fill_vertex_program_constants_data(buffer.data(), constant_ids);
}
}
void GLGSRender::update_vertex_env(const gl::vertex_upload_info& upload_info)
{
if (manually_flush_ring_buffers)
{
m_vertex_layout_buffer->reserve_storage_on_heap(128 + 16);
}
// Vertex layout state
auto mapping = m_vertex_layout_buffer->alloc_from_heap(128 + 16, m_uniform_buffer_offset_align);
auto buf = static_cast<u32*>(mapping.first);
buf[0] = upload_info.vertex_index_base;
buf[1] = upload_info.vertex_index_offset;
buf += 4;
fill_vertex_layout_state(m_vertex_layout, upload_info.first_vertex, upload_info.allocated_vertex_count, reinterpret_cast<s32*>(buf), upload_info.persistent_mapping_offset, upload_info.volatile_mapping_offset);
m_vertex_layout_buffer->bind_range(GL_VERTEX_LAYOUT_BIND_SLOT, mapping.second, 128 + 16);
if (manually_flush_ring_buffers)
{
m_vertex_layout_buffer->unmap();
}
}
void GLGSRender::patch_transform_constants(rsx::context* ctx, u32 index, u32 count)
{
if (!m_vertex_prog)
{
// Shouldn't be reachable, but handle it correctly anyway
m_graphics_state |= rsx::pipeline_state::transform_constants_dirty;
return;
}
std::pair<u32, u32> data_range {};
void* data_source = nullptr;
const auto bound_range = m_transform_constants_buffer->bound_range();
if (m_vertex_prog->has_indexed_constants)
{
// We're working with a full range. We can do a direct patch in this case since no index translation is required.
const auto byte_count = count * 16;
const auto byte_offset = index * 16;
data_range = { bound_range.first + byte_offset, byte_count};
data_source = ®S(ctx)->transform_constants[index];
}
else if (auto xform_id = m_vertex_prog->TranslateConstantsRange(index, count); xform_id >= 0)
{
const auto write_offset = xform_id * 16;
const auto byte_count = count * 16;
data_range = { bound_range.first + write_offset, byte_count };
data_source = ®S(ctx)->transform_constants[index];
}
else
{
auto allocate_mem = [&](usz size) -> std::pair<void*, usz>
{
m_scratch_buffer.resize(size);
return { m_scratch_buffer.data(), size };
};
rsx::io_buffer iobuf(allocate_mem);
upload_transform_constants(iobuf);
data_range = { bound_range.first, ::size32(iobuf) };
data_source = iobuf.data();
}
// Move data to memory that the GPU can work with
if (manually_flush_ring_buffers)
{
m_scratch_ring_buffer->reserve_storage_on_heap(data_range.second);
}
auto mapping = m_scratch_ring_buffer->alloc_from_heap(data_range.second, 16);
std::memcpy(mapping.first, data_source, data_range.second);
if (manually_flush_ring_buffers)
{
m_scratch_ring_buffer->unmap();
}
m_scratch_ring_buffer->notify();
// Do the transfer to patch the constants on the host device
m_scratch_ring_buffer->copy_to(m_transform_constants_buffer.get(), mapping.second, data_range.first, data_range.second);
}
bool GLGSRender::on_access_violation(u32 address, bool is_writing)
{
const bool can_flush = is_current_thread();
const rsx::invalidation_cause cause = is_writing
? (can_flush ? rsx::invalidation_cause::write : rsx::invalidation_cause::deferred_write)
: (can_flush ? rsx::invalidation_cause::read : rsx::invalidation_cause::deferred_read);
auto cmd = can_flush ? gl::command_context{ gl_state } : gl::command_context{};
auto result = m_gl_texture_cache.invalidate_address(cmd, address, cause);
if (result.invalidate_samplers)
{
std::lock_guard lock(m_sampler_mutex);
m_samplers_dirty.store(true);
}
if (!result.violation_handled)
{
return zcull_ctrl->on_access_violation(address);
}
if (result.num_flushable > 0)
{
auto &task = post_flush_request(address, result);
m_eng_interrupt_mask |= rsx::backend_interrupt;
vm::temporary_unlock();
task.producer_wait();
}
return true;
}
void GLGSRender::on_invalidate_memory_range(const utils::address_range &range, rsx::invalidation_cause cause)
{
gl::command_context cmd{ gl_state };
auto data = m_gl_texture_cache.invalidate_range(cmd, range, cause);
AUDIT(data.empty());
if (cause == rsx::invalidation_cause::unmap && data.violation_handled)
{
m_gl_texture_cache.purge_unreleased_sections();
{
std::lock_guard lock(m_sampler_mutex);
m_samplers_dirty.store(true);
}
}
}
void GLGSRender::on_semaphore_acquire_wait()
{
if (!work_queue.empty() ||
(async_flip_requested & flip_request::emu_requested))
{
do_local_task(rsx::FIFO::state::lock_wait);
}
}
void GLGSRender::do_local_task(rsx::FIFO::state state)
{
if (!work_queue.empty())
{
std::lock_guard lock(queue_guard);
work_queue.remove_if([](auto &q) { return q.received; });
for (auto& q : work_queue)
{
if (q.processed) continue;
gl::command_context cmd{ gl_state };
q.result = m_gl_texture_cache.flush_all(cmd, q.section_data);
q.processed = true;
}
}
else if (!in_begin_end && state != rsx::FIFO::state::lock_wait)
{
if (m_graphics_state & rsx::pipeline_state::framebuffer_reads_dirty)
{
//This will re-engage locks and break the texture cache if another thread is waiting in access violation handler!
//Only call when there are no waiters
m_gl_texture_cache.do_update();
m_graphics_state.clear(rsx::pipeline_state::framebuffer_reads_dirty);
}
}
rsx::thread::do_local_task(state);
if (state == rsx::FIFO::state::lock_wait)
{
// Critical check finished
return;
}
if (m_overlay_manager)
{
const auto should_ignore = in_begin_end && state != rsx::FIFO::state::empty;
if ((async_flip_requested & flip_request::native_ui) && !should_ignore && !is_stopped())
{
rsx::display_flip_info_t info{};
info.buffer = current_display_buffer;
flip(info);
}
}
}
gl::work_item& GLGSRender::post_flush_request(u32 address, gl::texture_cache::thrashed_set& flush_data)
{
std::lock_guard lock(queue_guard);
auto &result = work_queue.emplace_back();
result.address_to_flush = address;
result.section_data = std::move(flush_data);
return result;
}
bool GLGSRender::scaled_image_from_memory(const rsx::blit_src_info& src, const rsx::blit_dst_info& dst, bool interpolate)
{
gl::command_context cmd{ gl_state };
if (m_gl_texture_cache.blit(cmd, src, dst, interpolate, m_rtts))
{
m_samplers_dirty.store(true);
return true;
}
return false;
}
void GLGSRender::notify_tile_unbound(u32 tile)
{
// TODO: Handle texture writeback
if (false)
{
u32 addr = rsx::get_address(tiles[tile].offset, tiles[tile].location);
on_notify_pre_memory_unmapped(addr, tiles[tile].size, *std::make_unique<std::vector<std::pair<u64, u64>>>());
m_rtts.invalidate_surface_address(addr, false);
}
{
std::lock_guard lock(m_sampler_mutex);
m_samplers_dirty.store(true);
}
}
bool GLGSRender::release_GCM_label(u32 address, u32 args)
{
if (!backend_config.supports_host_gpu_labels)
{
return false;
}
auto host_ctx = ensure(m_host_dma_ctrl->host_ctx());
if (host_ctx->texture_loads_completed())
{
// We're about to poll waiting for GPU state, ensure the context is still valid.
gl::check_state();
// All texture loads already seen by the host GPU
// Wait for all previously submitted labels to be flushed
m_host_dma_ctrl->drain_label_queue();
return false;
}
const auto mapping = gl::map_dma(address, 4);
const auto write_data = std::bit_cast<u32, be_t<u32>>(args);
const auto release_event_id = host_ctx->on_label_acquire();
// We don't have async texture loads yet, so just release both the label and the commands complete
u64 write_buf[2] = { write_data, release_event_id };
const auto host_read_offset = m_enqueued_host_write_buffer->alloc(16, 16);
m_enqueued_host_write_buffer->get().sub_data(host_read_offset, 16, write_buf);
// Now write to DMA and then to host context
m_enqueued_host_write_buffer->get().copy_to(mapping.second, host_read_offset, mapping.first, 4);
m_enqueued_host_write_buffer->get().copy_to(m_host_gpu_context_data.get(), host_read_offset + 8, ::offset32(&rsx::host_gpu_context_t::commands_complete_event), 8);
m_enqueued_host_write_buffer->push_barrier(host_read_offset, 16);
host_ctx->on_label_release();
return true;
}
void GLGSRender::enqueue_host_context_write(u32 offset, u32 size, const void* data)
{
ensure(size <= 8);
const u32 host_read_offset = m_enqueued_host_write_buffer->alloc(8, 16);
m_enqueued_host_write_buffer->get().sub_data(host_read_offset, size, data);
m_enqueued_host_write_buffer->get().copy_to(m_host_gpu_context_data.get(), host_read_offset, offset, size);
m_enqueued_host_write_buffer->push_barrier(host_read_offset, 16);
}
void GLGSRender::on_guest_texture_read()
{
if (!backend_config.supports_host_gpu_labels)
{
return;
}
// Tag the read as being in progress
u64 event_id = m_host_dma_ctrl->host_ctx()->inc_counter();
m_host_dma_ctrl->host_ctx()->texture_load_request_event = event_id;
enqueue_host_context_write(::offset32(&rsx::host_gpu_context_t::texture_load_complete_event), 8, &event_id);
}
void GLGSRender::begin_occlusion_query(rsx::reports::occlusion_query_info* query)
{
query->result = 0;
glBeginQuery(m_occlusion_type, query->driver_handle);
}
void GLGSRender::end_occlusion_query(rsx::reports::occlusion_query_info* query)
{
ensure(query->active);
glEndQuery(m_occlusion_type);
}
bool GLGSRender::check_occlusion_query_status(rsx::reports::occlusion_query_info* query)
{
if (!query->num_draws)
return true;
GLint status = GL_TRUE;
glGetQueryObjectiv(query->driver_handle, GL_QUERY_RESULT_AVAILABLE, &status);
return status != GL_FALSE;
}
void GLGSRender::get_occlusion_query_result(rsx::reports::occlusion_query_info* query)
{
if (query->num_draws)
{
GLint result = 0;
glGetQueryObjectiv(query->driver_handle, GL_QUERY_RESULT, &result);
query->result += result;
}
}
void GLGSRender::discard_occlusion_query(rsx::reports::occlusion_query_info* query)
{
if (query->active)
{
//Discard is being called on an active query, close it
glEndQuery(m_occlusion_type);
}
}
| 42,435
|
C++
|
.cpp
| 1,099
| 35.813467
| 210
| 0.722821
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,487
|
GLTexture.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/GL/GLTexture.cpp
|
#include "stdafx.h"
#include "GLTexture.h"
#include "GLCompute.h"
#include "GLRenderTargets.h"
#include "GLOverlays.h"
#include "GLGSRender.h"
#include "glutils/blitter.h"
#include "glutils/ring_buffer.h"
#include "../GCM.h"
#include "../RSXThread.h"
#include "../RSXTexture.h"
#include "util/asm.hpp"
namespace gl
{
namespace debug
{
extern void set_vis_texture(texture*);
}
scratch_ring_buffer g_typeless_transfer_buffer;
legacy_ring_buffer g_upload_transfer_buffer;
scratch_ring_buffer g_compute_decode_buffer;
scratch_ring_buffer g_deswizzle_scratch_buffer;
void destroy_global_texture_resources()
{
g_typeless_transfer_buffer.remove();
g_upload_transfer_buffer.remove();
g_compute_decode_buffer.remove();
g_deswizzle_scratch_buffer.remove();
}
template <typename WordType, bool SwapBytes>
void do_deswizzle_transformation(gl::command_context& cmd, u32 block_size, buffer* dst, u32 dst_offset, buffer* src, u32 src_offset, u32 data_length, u16 width, u16 height, u16 depth)
{
switch (block_size)
{
case 4:
gl::get_compute_task<gl::cs_deswizzle_3d<u32, WordType, SwapBytes>>()->run(
cmd, dst, dst_offset, src, src_offset,
data_length, width, height, depth, 1);
break;
case 8:
gl::get_compute_task<gl::cs_deswizzle_3d<u64, WordType, SwapBytes>>()->run(
cmd, dst, dst_offset, src, src_offset,
data_length, width, height, depth, 1);
break;
case 16:
gl::get_compute_task<gl::cs_deswizzle_3d<u128, WordType, SwapBytes>>()->run(
cmd, dst, dst_offset, src, src_offset,
data_length, width, height, depth, 1);
break;
default:
fmt::throw_exception("Unreachable");
}
}
GLenum get_target(rsx::texture_dimension_extended type)
{
switch (type)
{
case rsx::texture_dimension_extended::texture_dimension_1d: return GL_TEXTURE_1D;
case rsx::texture_dimension_extended::texture_dimension_2d: return GL_TEXTURE_2D;
case rsx::texture_dimension_extended::texture_dimension_cubemap: return GL_TEXTURE_CUBE_MAP;
case rsx::texture_dimension_extended::texture_dimension_3d: return GL_TEXTURE_3D;
}
fmt::throw_exception("Unknown texture target");
}
GLenum get_sized_internal_format(u32 texture_format)
{
switch (texture_format)
{
case CELL_GCM_TEXTURE_B8: return GL_R8;
case CELL_GCM_TEXTURE_A1R5G5B5: return GL_BGR5_A1;
case CELL_GCM_TEXTURE_A4R4G4B4: return GL_RGBA4;
case CELL_GCM_TEXTURE_R5G6B5: return GL_RGB565;
case CELL_GCM_TEXTURE_A8R8G8B8: return GL_BGRA8;
case CELL_GCM_TEXTURE_G8B8: return GL_RG8;
case CELL_GCM_TEXTURE_R6G5B5: return GL_RGB565;
case CELL_GCM_TEXTURE_DEPTH24_D8: return GL_DEPTH24_STENCIL8;
case CELL_GCM_TEXTURE_DEPTH24_D8_FLOAT: return GL_DEPTH32F_STENCIL8;
case CELL_GCM_TEXTURE_DEPTH16: return GL_DEPTH_COMPONENT16;
case CELL_GCM_TEXTURE_DEPTH16_FLOAT: return GL_DEPTH_COMPONENT32F;
case CELL_GCM_TEXTURE_X16: return GL_R16;
case CELL_GCM_TEXTURE_Y16_X16: return GL_RG16;
case CELL_GCM_TEXTURE_R5G5B5A1: return GL_RGB5_A1;
case CELL_GCM_TEXTURE_W16_Z16_Y16_X16_FLOAT: return GL_RGBA16F;
case CELL_GCM_TEXTURE_W32_Z32_Y32_X32_FLOAT: return GL_RGBA32F;
case CELL_GCM_TEXTURE_X32_FLOAT: return GL_R32F;
case CELL_GCM_TEXTURE_D1R5G5B5: return GL_BGR5_A1;
case CELL_GCM_TEXTURE_D8R8G8B8: return GL_BGRA8;
case CELL_GCM_TEXTURE_Y16_X16_FLOAT: return GL_RG16F;
case CELL_GCM_TEXTURE_COMPRESSED_DXT1: return GL_COMPRESSED_RGBA_S3TC_DXT1_EXT;
case CELL_GCM_TEXTURE_COMPRESSED_DXT23: return GL_COMPRESSED_RGBA_S3TC_DXT3_EXT;
case CELL_GCM_TEXTURE_COMPRESSED_DXT45: return GL_COMPRESSED_RGBA_S3TC_DXT5_EXT;
case CELL_GCM_TEXTURE_COMPRESSED_HILO8: return GL_RG8;
case CELL_GCM_TEXTURE_COMPRESSED_HILO_S8: return GL_RG8_SNORM;
case CELL_GCM_TEXTURE_COMPRESSED_B8R8_G8R8: return GL_BGRA8;
case CELL_GCM_TEXTURE_COMPRESSED_R8B8_R8G8: return GL_BGRA8;
}
fmt::throw_exception("Unknown texture format 0x%x", texture_format);
}
std::tuple<GLenum, GLenum> get_format_type(u32 texture_format)
{
switch (texture_format)
{
case CELL_GCM_TEXTURE_B8: return std::make_tuple(GL_RED, GL_UNSIGNED_BYTE);
case CELL_GCM_TEXTURE_A1R5G5B5: return std::make_tuple(GL_BGRA, GL_UNSIGNED_SHORT_1_5_5_5_REV);
case CELL_GCM_TEXTURE_A4R4G4B4: return std::make_tuple(GL_BGRA, GL_UNSIGNED_SHORT_4_4_4_4);
case CELL_GCM_TEXTURE_R5G6B5: return std::make_tuple(GL_RGB, GL_UNSIGNED_SHORT_5_6_5);
case CELL_GCM_TEXTURE_A8R8G8B8: return std::make_tuple(GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV);
case CELL_GCM_TEXTURE_G8B8: return std::make_tuple(GL_RG, GL_UNSIGNED_BYTE);
case CELL_GCM_TEXTURE_R6G5B5: return std::make_tuple(GL_RGB, GL_UNSIGNED_SHORT_5_6_5);
case CELL_GCM_TEXTURE_DEPTH24_D8: return std::make_tuple(GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8);
case CELL_GCM_TEXTURE_DEPTH24_D8_FLOAT: return std::make_tuple(GL_DEPTH_STENCIL, GL_FLOAT_32_UNSIGNED_INT_24_8_REV);
case CELL_GCM_TEXTURE_DEPTH16: return std::make_tuple(GL_DEPTH_COMPONENT, GL_UNSIGNED_SHORT);
case CELL_GCM_TEXTURE_DEPTH16_FLOAT: return std::make_tuple(GL_DEPTH_COMPONENT, GL_FLOAT);
case CELL_GCM_TEXTURE_X16: return std::make_tuple(GL_RED, GL_UNSIGNED_SHORT);
case CELL_GCM_TEXTURE_Y16_X16: return std::make_tuple(GL_RG, GL_UNSIGNED_SHORT);
case CELL_GCM_TEXTURE_R5G5B5A1: return std::make_tuple(GL_RGBA, GL_UNSIGNED_SHORT_5_5_5_1);
case CELL_GCM_TEXTURE_W16_Z16_Y16_X16_FLOAT: return std::make_tuple(GL_RGBA, GL_HALF_FLOAT);
case CELL_GCM_TEXTURE_W32_Z32_Y32_X32_FLOAT: return std::make_tuple(GL_RGBA, GL_FLOAT);
case CELL_GCM_TEXTURE_X32_FLOAT: return std::make_tuple(GL_RED, GL_FLOAT);
case CELL_GCM_TEXTURE_D1R5G5B5: return std::make_tuple(GL_BGRA, GL_UNSIGNED_SHORT_1_5_5_5_REV);
case CELL_GCM_TEXTURE_D8R8G8B8: return std::make_tuple(GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV);
case CELL_GCM_TEXTURE_Y16_X16_FLOAT: return std::make_tuple(GL_RG, GL_HALF_FLOAT);
case CELL_GCM_TEXTURE_COMPRESSED_DXT1: return std::make_tuple(GL_COMPRESSED_RGBA_S3TC_DXT1_EXT, GL_UNSIGNED_BYTE);
case CELL_GCM_TEXTURE_COMPRESSED_DXT23: return std::make_tuple(GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, GL_UNSIGNED_BYTE);
case CELL_GCM_TEXTURE_COMPRESSED_DXT45: return std::make_tuple(GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, GL_UNSIGNED_BYTE);
case CELL_GCM_TEXTURE_COMPRESSED_HILO8: return std::make_tuple(GL_RG, GL_UNSIGNED_BYTE);
case CELL_GCM_TEXTURE_COMPRESSED_HILO_S8: return std::make_tuple(GL_RG, GL_BYTE);
case CELL_GCM_TEXTURE_COMPRESSED_B8R8_G8R8: return std::make_tuple(GL_BGRA, GL_UNSIGNED_BYTE);
case CELL_GCM_TEXTURE_COMPRESSED_R8B8_R8G8: return std::make_tuple(GL_BGRA, GL_UNSIGNED_BYTE);
}
fmt::throw_exception("Compressed or unknown texture format 0x%x", texture_format);
}
pixel_buffer_layout get_format_type(texture::internal_format format)
{
switch (format)
{
case texture::internal_format::compressed_rgba_s3tc_dxt1:
case texture::internal_format::compressed_rgba_s3tc_dxt3:
case texture::internal_format::compressed_rgba_s3tc_dxt5:
return { GL_RGBA, GL_UNSIGNED_BYTE, 1, false };
case texture::internal_format::r8:
return { GL_RED, GL_UNSIGNED_BYTE, 1, false };
case texture::internal_format::r16:
return { GL_RED, GL_UNSIGNED_SHORT, 2, true };
case texture::internal_format::r32f:
return { GL_RED, GL_FLOAT, 4, true };
case texture::internal_format::rg8:
return { GL_RG, GL_UNSIGNED_SHORT, 2, true };
case texture::internal_format::rg16:
return { GL_RG, GL_UNSIGNED_SHORT, 2, true };
case texture::internal_format::rg16f:
return { GL_RG, GL_HALF_FLOAT, 2, true };
case texture::internal_format::rgb565:
return { GL_RGB, GL_UNSIGNED_SHORT_5_6_5, 2, true };
case texture::internal_format::rgb5a1:
return { GL_RGB, GL_UNSIGNED_SHORT_5_5_5_1, 2, true };
case texture::internal_format::bgr5a1:
return { GL_RGB, GL_UNSIGNED_SHORT_1_5_5_5_REV, 2, true };
case texture::internal_format::rgba4:
return { GL_BGRA, GL_UNSIGNED_SHORT_4_4_4_4, 2, false };
case texture::internal_format::rgba8:
return { GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV, 4, true };
case texture::internal_format::bgra8:
return { GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, 4, true };
case texture::internal_format::rgba16f:
return { GL_RGBA, GL_HALF_FLOAT, 2, true };
case texture::internal_format::rgba32f:
return { GL_RGBA, GL_FLOAT, 4, true };
case texture::internal_format::depth16:
return { GL_DEPTH_COMPONENT, GL_UNSIGNED_SHORT, 2, true };
case texture::internal_format::depth32f:
return { GL_DEPTH_COMPONENT, GL_FLOAT, 2, true };
case texture::internal_format::depth24_stencil8:
case texture::internal_format::depth32f_stencil8:
return { GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8, 4, true };
default:
fmt::throw_exception("Unexpected internal format 0x%X", static_cast<u32>(format));
}
}
pixel_buffer_layout get_format_type(const gl::texture* tex)
{
auto ret = get_format_type(tex->get_internal_format());
if (tex->format_class() == RSX_FORMAT_CLASS_DEPTH24_FLOAT_X8_PACK32)
{
ret.type = GL_FLOAT_32_UNSIGNED_INT_24_8_REV;
}
return ret;
}
std::array<GLenum, 4> get_swizzle_remap(u32 texture_format)
{
// NOTE: This must be in ARGB order in all forms below.
switch (texture_format)
{
case CELL_GCM_TEXTURE_A1R5G5B5:
case CELL_GCM_TEXTURE_R5G5B5A1:
case CELL_GCM_TEXTURE_R6G5B5:
case CELL_GCM_TEXTURE_R5G6B5:
case CELL_GCM_TEXTURE_A4R4G4B4:
case CELL_GCM_TEXTURE_A8R8G8B8:
case CELL_GCM_TEXTURE_COMPRESSED_DXT1:
case CELL_GCM_TEXTURE_COMPRESSED_DXT23:
case CELL_GCM_TEXTURE_COMPRESSED_DXT45:
case CELL_GCM_TEXTURE_COMPRESSED_B8R8_G8R8:
case CELL_GCM_TEXTURE_COMPRESSED_R8B8_R8G8:
return{ GL_ALPHA, GL_RED, GL_GREEN, GL_BLUE };
case CELL_GCM_TEXTURE_DEPTH24_D8:
case CELL_GCM_TEXTURE_DEPTH24_D8_FLOAT:
case CELL_GCM_TEXTURE_DEPTH16:
case CELL_GCM_TEXTURE_DEPTH16_FLOAT:
return{ GL_RED, GL_RED, GL_RED, GL_RED };
case CELL_GCM_TEXTURE_B8:
return{ GL_ONE, GL_RED, GL_RED, GL_RED };
case CELL_GCM_TEXTURE_X16:
return{ GL_RED, GL_ONE, GL_RED, GL_ONE };
case CELL_GCM_TEXTURE_X32_FLOAT:
return{ GL_RED, GL_RED, GL_RED, GL_RED };
case CELL_GCM_TEXTURE_G8B8:
return{ GL_GREEN, GL_RED, GL_GREEN, GL_RED };
case CELL_GCM_TEXTURE_Y16_X16:
return{ GL_GREEN, GL_RED, GL_GREEN, GL_RED };
case CELL_GCM_TEXTURE_Y16_X16_FLOAT:
return{ GL_RED, GL_GREEN, GL_RED, GL_GREEN };
case CELL_GCM_TEXTURE_W16_Z16_Y16_X16_FLOAT:
case CELL_GCM_TEXTURE_W32_Z32_Y32_X32_FLOAT:
return{ GL_ALPHA, GL_RED, GL_GREEN, GL_BLUE };
case CELL_GCM_TEXTURE_D1R5G5B5:
case CELL_GCM_TEXTURE_D8R8G8B8:
return{ GL_ONE, GL_RED, GL_GREEN, GL_BLUE };
case CELL_GCM_TEXTURE_COMPRESSED_HILO8:
case CELL_GCM_TEXTURE_COMPRESSED_HILO_S8:
return{ GL_RED, GL_GREEN, GL_RED, GL_GREEN };
}
fmt::throw_exception("Unknown format 0x%x", texture_format);
}
cs_shuffle_base* get_trivial_transform_job(const pixel_buffer_layout& pack_info)
{
if (!pack_info.swap_bytes)
{
return nullptr;
}
switch (pack_info.size)
{
case 1:
return nullptr;
case 2:
return get_compute_task<gl::cs_shuffle_16>();
case 4:
return get_compute_task<gl::cs_shuffle_32>();
default:
fmt::throw_exception("Unsupported format");
}
}
void* copy_image_to_buffer(gl::command_context& cmd, const pixel_buffer_layout& pack_info, const gl::texture* src, gl::buffer* dst,
u32 dst_offset, const int src_level, const coord3u& src_region, image_memory_requirements* mem_info)
{
auto initialize_scratch_mem = [&]() -> bool // skip_transform
{
const u64 max_mem = (mem_info->memory_required) ? mem_info->memory_required : mem_info->image_size_in_bytes;
if (!(*dst) || max_mem > static_cast<u64>(dst->size()))
{
if (*dst) dst->remove();
dst->create(buffer::target::ssbo, max_mem, nullptr, buffer::memory_type::local, 0);
}
if (auto as_vi = dynamic_cast<const gl::viewable_image*>(src);
src->get_target() == gl::texture::target::texture2D &&
as_vi)
{
// RGBA8 <-> D24X8 bitcasts are some very common conversions due to some PS3 coding hacks & workarounds.
switch (src->get_internal_format())
{
case gl::texture::internal_format::depth24_stencil8:
gl::get_compute_task<gl::cs_d24x8_to_ssbo>()->run(cmd,
const_cast<gl::viewable_image*>(as_vi), dst, dst_offset,
{ {src_region.x, src_region.y}, {src_region.width, src_region.height} },
pack_info);
return true;
case gl::texture::internal_format::rgba8:
case gl::texture::internal_format::bgra8:
gl::get_compute_task<gl::cs_rgba8_to_ssbo>()->run(cmd,
const_cast<gl::viewable_image*>(as_vi), dst, dst_offset,
{ {src_region.x, src_region.y}, {src_region.width, src_region.height} },
pack_info);
return true;
default:
break;
}
}
dst->bind(buffer::target::pixel_pack);
src->copy_to(reinterpret_cast<void*>(static_cast<uintptr_t>(dst_offset)), static_cast<texture::format>(pack_info.format), static_cast<texture::type>(pack_info.type), src_level, src_region, {});
return false;
};
void* result = reinterpret_cast<void*>(static_cast<uintptr_t>(dst_offset));
if (src->aspect() == image_aspect::color ||
pack_info.type == GL_UNSIGNED_SHORT ||
pack_info.type == GL_UNSIGNED_INT_24_8)
{
if (!initialize_scratch_mem())
{
if (auto job = get_trivial_transform_job(pack_info))
{
job->run(cmd, dst, static_cast<u32>(mem_info->image_size_in_bytes), dst_offset);
}
}
}
else if (pack_info.type == GL_FLOAT)
{
ensure(mem_info->image_size_in_bytes == (mem_info->image_size_in_texels * 4));
mem_info->memory_required = (mem_info->image_size_in_texels * 6);
ensure(!initialize_scratch_mem());
if (pack_info.swap_bytes) [[ likely ]]
{
get_compute_task<cs_fconvert_task<f32, f16, false, true>>()->run(cmd, dst, dst_offset,
static_cast<u32>(mem_info->image_size_in_bytes), static_cast<u32>(mem_info->image_size_in_bytes));
}
else
{
get_compute_task<cs_fconvert_task<f32, f16, false, false>>()->run(cmd, dst, dst_offset,
static_cast<u32>(mem_info->image_size_in_bytes), static_cast<u32>(mem_info->image_size_in_bytes));
}
result = reinterpret_cast<void*>(mem_info->image_size_in_bytes + dst_offset);
}
else if (pack_info.type == GL_FLOAT_32_UNSIGNED_INT_24_8_REV)
{
ensure(mem_info->image_size_in_bytes == (mem_info->image_size_in_texels * 8));
mem_info->memory_required = (mem_info->image_size_in_texels * 12);
ensure(!initialize_scratch_mem());
if (pack_info.swap_bytes)
{
get_compute_task<cs_shuffle_d32fx8_to_x8d24f<true>>()->run(cmd, dst, dst_offset,
static_cast<u32>(mem_info->image_size_in_bytes), static_cast<u32>(mem_info->image_size_in_texels));
}
else
{
get_compute_task<cs_shuffle_d32fx8_to_x8d24f<false>>()->run(cmd, dst, dst_offset,
static_cast<u32>(mem_info->image_size_in_bytes), static_cast<u32>(mem_info->image_size_in_texels));
}
result = reinterpret_cast<void*>(mem_info->image_size_in_bytes + dst_offset);
}
else
{
fmt::throw_exception("Invalid depth/stencil type 0x%x", pack_info.type);
}
glMemoryBarrier(GL_SHADER_STORAGE_BARRIER_BIT | GL_PIXEL_BUFFER_BARRIER_BIT);
return result;
}
void copy_buffer_to_image(gl::command_context& cmd, const pixel_buffer_layout& unpack_info, gl::buffer* src, gl::texture* dst,
const void* src_offset, const int dst_level, const coord3u& dst_region, image_memory_requirements* mem_info)
{
buffer scratch_mem;
buffer* transfer_buf = src;
bool skip_barrier = false;
u32 in_offset = static_cast<u32>(reinterpret_cast<u64>(src_offset));
u32 out_offset = in_offset;
const auto& caps = gl::get_driver_caps();
auto initialize_scratch_mem = [&]()
{
if (in_offset >= mem_info->memory_required)
{
return;
}
const u64 max_mem = mem_info->memory_required + mem_info->image_size_in_bytes;
if ((max_mem + in_offset) <= static_cast<u64>(src->size()))
{
out_offset = static_cast<u32>(in_offset + mem_info->image_size_in_bytes);
return;
}
scratch_mem.create(buffer::target::pixel_pack, max_mem, nullptr, buffer::memory_type::local, 0);
glMemoryBarrier(GL_BUFFER_UPDATE_BARRIER_BIT);
src->copy_to(&scratch_mem, in_offset, 0, mem_info->image_size_in_bytes);
in_offset = 0;
out_offset = static_cast<u32>(mem_info->image_size_in_bytes);
transfer_buf = &scratch_mem;
};
if ((dst->aspect() & image_aspect::stencil) == 0 || caps.ARB_shader_stencil_export_supported)
{
// We do not need to use the driver's builtin transport mechanism
glMemoryBarrier(GL_SHADER_STORAGE_BARRIER_BIT);
std::unique_ptr<gl::texture> scratch;
std::unique_ptr<gl::texture_view> scratch_view;
coordu image_region = { {dst_region.x, dst_region.y}, {dst_region.width, dst_region.height} };
switch (dst->get_target())
{
case texture::target::texture3D:
{
// Upload to splatted image and do the final copy GPU-side
image_region.height *= dst_region.depth;
scratch = std::make_unique<gl::texture>(
GL_TEXTURE_2D,
image_region.x + image_region.width, image_region.y + image_region.height, 1, 1,
static_cast<GLenum>(dst->get_internal_format()), dst->format_class());
scratch_view = std::make_unique<gl::nil_texture_view>(scratch.get());
break;
}
case texture::target::textureCUBE:
{
const subresource_range range = { image_aspect::depth | image_aspect::color, static_cast<GLuint>(dst_level), 1, dst_region.z , 1 };
scratch_view = std::make_unique<gl::texture_view>(dst, GL_TEXTURE_2D, range);
break;
}
case texture::target::texture1D:
{
scratch = std::make_unique<gl::texture>(
GL_TEXTURE_2D,
image_region.x + image_region.width, 1, 1, 1,
static_cast<GLenum>(dst->get_internal_format()), dst->format_class());
scratch_view = std::make_unique<gl::nil_texture_view>(scratch.get());
break;
}
default:
{
ensure(dst->layers() == 1);
if (dst->levels() > 1) [[ likely ]]
{
const subresource_range range = { image_aspect::depth | image_aspect::color, static_cast<GLuint>(dst_level), 1, 0 , 1 };
scratch_view = std::make_unique<gl::texture_view>(dst, GL_TEXTURE_2D, range);
}
else
{
scratch_view = std::make_unique<gl::nil_texture_view>(dst);
}
break;
}
}
// If possible, decode using a compute transform to potentially have asynchronous scheduling
bool use_compute_transform = (
dst->aspect() == gl::image_aspect::color && // Cannot use image_load_store with depth images
caps.subvendor_ATI == false); // The old AMD/ATI driver does not support image writeonly without format specifier
if (use_compute_transform)
{
switch (dst->get_internal_format())
{
case texture::internal_format::bgr5a1:
case texture::internal_format::rgb5a1:
case texture::internal_format::rgb565:
case texture::internal_format::rgba4:
// Packed formats are a problem with image_load_store
use_compute_transform = false;
break;
default:
break;
}
}
if (use_compute_transform)
{
gl::get_compute_task<gl::cs_ssbo_to_color_image>()->run(cmd, transfer_buf, scratch_view.get(), out_offset, image_region, unpack_info);
}
else
{
gl::get_overlay_pass<gl::rp_ssbo_to_generic_texture>()->run(cmd, transfer_buf, scratch_view.get(), out_offset, image_region, unpack_info);
}
glMemoryBarrier(GL_TEXTURE_FETCH_BARRIER_BIT | GL_TEXTURE_UPDATE_BARRIER_BIT);
switch (dst->get_target())
{
case texture::target::texture1D:
{
const position3u transfer_offset = { dst_region.position.x, 0, 0 };
g_hw_blitter->copy_image(cmd, scratch.get(), dst, 0, dst_level, transfer_offset, transfer_offset, { dst_region.width, 1, 1 });
break;
}
case texture::target::texture3D:
{
// Memcpy
for (u32 layer = dst_region.z, i = 0; i < dst_region.depth; ++i, ++layer)
{
const position3u src_offset = { dst_region.position.x, dst_region.position.y + (i * dst_region.height), 0 };
const position3u dst_offset = { dst_region.position.x, dst_region.position.y, layer };
g_hw_blitter->copy_image(cmd, scratch.get(), dst, 0, dst_level, src_offset, dst_offset, { dst_region.width, dst_region.height, 1 });
}
break;
}
default: break;
}
}
else
{
// Stencil format on NV. Use driver upload path
if (unpack_info.type == GL_UNSIGNED_INT_24_8)
{
if (auto job = get_trivial_transform_job(unpack_info))
{
job->run(cmd, src, static_cast<u32>(mem_info->image_size_in_bytes), in_offset);
}
else
{
skip_barrier = true;
}
}
else if (unpack_info.type == GL_FLOAT_32_UNSIGNED_INT_24_8_REV)
{
mem_info->memory_required = (mem_info->image_size_in_texels * 8);
initialize_scratch_mem();
if (unpack_info.swap_bytes)
{
get_compute_task<cs_shuffle_x8d24f_to_d32fx8<true>>()->run(cmd, transfer_buf, in_offset, out_offset, static_cast<u32>(mem_info->image_size_in_texels));
}
else
{
get_compute_task<cs_shuffle_x8d24f_to_d32fx8<false>>()->run(cmd, transfer_buf, in_offset, out_offset, static_cast<u32>(mem_info->image_size_in_texels));
}
}
else
{
fmt::throw_exception("Invalid depth/stencil type 0x%x", unpack_info.type);
}
if (!skip_barrier)
{
glMemoryBarrier(GL_PIXEL_BUFFER_BARRIER_BIT);
}
glBindBuffer(GL_SHADER_STORAGE_BUFFER, GL_NONE);
transfer_buf->bind(buffer::target::pixel_unpack);
dst->copy_from(reinterpret_cast<void*>(u64(out_offset)), static_cast<texture::format>(unpack_info.format),
static_cast<texture::type>(unpack_info.type), dst_level, dst_region, {});
}
}
gl::viewable_image* create_texture(u32 gcm_format, u16 width, u16 height, u16 depth, u16 mipmaps,
rsx::texture_dimension_extended type)
{
const GLenum target = get_target(type);
const GLenum internal_format = get_sized_internal_format(gcm_format);
const auto format_class = rsx::classify_format(gcm_format);
return new gl::viewable_image(target, width, height, depth, mipmaps, internal_format, format_class);
}
void fill_texture(gl::command_context& cmd, texture* dst, int format,
const std::vector<rsx::subresource_layout> &input_layouts,
bool is_swizzled, GLenum gl_format, GLenum gl_type, rsx::simple_array<std::byte>& staging_buffer)
{
const auto driver_caps = gl::get_driver_caps();
rsx::texture_uploader_capabilities caps
{
.supports_byteswap = true,
.supports_vtc_decoding = false,
.supports_hw_deswizzle = driver_caps.ARB_compute_shader_supported,
.supports_zero_copy = false,
.alignment = 4
};
pixel_unpack_settings unpack_settings;
unpack_settings.row_length(0).alignment(4);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, GL_NONE);
glBindBuffer(GL_PIXEL_PACK_BUFFER, GL_NONE);
if (rsx::is_compressed_host_format(format)) [[likely]]
{
caps.supports_vtc_decoding = driver_caps.vendor_NVIDIA;
unpack_settings.apply();
const GLsizei format_block_size = (format == CELL_GCM_TEXTURE_COMPRESSED_DXT1) ? 8 : 16;
for (const rsx::subresource_layout& layout : input_layouts)
{
rsx::io_buffer io_buf = staging_buffer;
upload_texture_subresource(io_buf, layout, format, is_swizzled, caps);
switch (dst->get_target())
{
case texture::target::texture1D:
{
const GLsizei size = layout.width_in_block * format_block_size;
ensure(usz(size) <= staging_buffer.size());
DSA_CALL(CompressedTextureSubImage1D, dst->id(), GL_TEXTURE_1D, layout.level, 0, layout.width_in_texel, gl_format, size, staging_buffer.data());
break;
}
case texture::target::texture2D:
{
const GLsizei size = layout.width_in_block * layout.height_in_block * format_block_size;
ensure(usz(size) <= staging_buffer.size());
DSA_CALL(CompressedTextureSubImage2D, dst->id(), GL_TEXTURE_2D, layout.level, 0, 0, layout.width_in_texel, layout.height_in_texel, gl_format, size, staging_buffer.data());
break;
}
case texture::target::textureCUBE:
{
const GLsizei size = layout.width_in_block * layout.height_in_block * format_block_size;
ensure(usz(size) <= staging_buffer.size());
if (gl::get_driver_caps().ARB_dsa_supported)
{
glCompressedTextureSubImage3D(dst->id(), layout.level, 0, 0, layout.layer, layout.width_in_texel, layout.height_in_texel, 1, gl_format, size, staging_buffer.data());
}
else
{
glCompressedTextureSubImage2DEXT(dst->id(), GL_TEXTURE_CUBE_MAP_POSITIVE_X + layout.layer, layout.level, 0, 0, layout.width_in_texel, layout.height_in_texel, gl_format, size, staging_buffer.data());
}
break;
}
case texture::target::texture3D:
{
const GLsizei size = layout.width_in_block * layout.height_in_block * layout.depth * format_block_size;
ensure(usz(size) <= staging_buffer.size());
DSA_CALL(CompressedTextureSubImage3D, dst->id(), GL_TEXTURE_3D, layout.level, 0, 0, 0, layout.width_in_texel, layout.height_in_texel, layout.depth, gl_format, size, staging_buffer.data());
break;
}
default:
{
fmt::throw_exception("Unreachable");
}
}
}
}
else
{
std::pair<void*, u32> upload_scratch_mem = {}, compute_scratch_mem = {};
image_memory_requirements mem_info;
pixel_buffer_layout mem_layout;
std::span<std::byte> dst_buffer = staging_buffer;
void* out_pointer = staging_buffer.data();
u8 block_size_in_bytes = rsx::get_format_block_size_in_bytes(format);
u64 image_linear_size = staging_buffer.size();
const auto min_required_buffer_size = std::max<u64>(utils::align(image_linear_size * 4, 0x100000), 16 * 0x100000);
if (driver_caps.ARB_compute_shader_supported)
{
if (g_upload_transfer_buffer.size() < static_cast<GLsizeiptr>(min_required_buffer_size))
{
g_upload_transfer_buffer.remove();
g_upload_transfer_buffer.create(gl::buffer::target::pixel_unpack, min_required_buffer_size);
}
if (g_compute_decode_buffer.size() < min_required_buffer_size)
{
g_compute_decode_buffer.remove();
g_compute_decode_buffer.create(gl::buffer::target::ssbo, min_required_buffer_size);
}
out_pointer = nullptr;
}
for (const rsx::subresource_layout& layout : input_layouts)
{
if (driver_caps.ARB_compute_shader_supported)
{
u64 row_pitch = rsx::align2<u64, u64>(layout.width_in_block * block_size_in_bytes, caps.alignment);
if (!rsx::is_compressed_host_format(format))
{
// Handle emulated compressed formats with host unpack (R8G8 compressed)
row_pitch = std::max<u64>(row_pitch, dst->pitch());
}
image_linear_size = row_pitch * layout.height_in_block * layout.depth;
compute_scratch_mem = { nullptr, g_compute_decode_buffer.alloc(static_cast<u32>(image_linear_size), 256) };
compute_scratch_mem.first = reinterpret_cast<void*>(static_cast<uintptr_t>(compute_scratch_mem.second));
g_upload_transfer_buffer.reserve_storage_on_heap(static_cast<u32>(image_linear_size));
upload_scratch_mem = g_upload_transfer_buffer.alloc_from_heap(static_cast<u32>(image_linear_size), 256);
dst_buffer = { reinterpret_cast<std::byte*>(upload_scratch_mem.first), image_linear_size };
}
rsx::io_buffer io_buf = dst_buffer;
caps.supports_hw_deswizzle = (is_swizzled && driver_caps.ARB_compute_shader_supported && image_linear_size > 4096);
auto op = upload_texture_subresource(io_buf, layout, format, is_swizzled, caps);
// Define upload region
coord3u region;
region.x = 0;
region.y = 0;
region.z = layout.layer;
region.width = layout.width_in_texel;
region.height = layout.height_in_texel;
region.depth = layout.depth;
if (driver_caps.ARB_compute_shader_supported)
{
// 0. Preconf
mem_layout.alignment = static_cast<u8>(caps.alignment);
mem_layout.swap_bytes = op.require_swap;
mem_layout.format = gl_format;
mem_layout.type = gl_type;
mem_layout.size = block_size_in_bytes;
// 2. Upload memory to GPU
if (!op.require_deswizzle)
{
g_upload_transfer_buffer.unmap();
g_upload_transfer_buffer.copy_to(&g_compute_decode_buffer.get(), upload_scratch_mem.second, compute_scratch_mem.second, image_linear_size);
}
else
{
// 2.1 Copy data to deswizzle buf
if (g_deswizzle_scratch_buffer.size() < min_required_buffer_size)
{
g_deswizzle_scratch_buffer.remove();
g_deswizzle_scratch_buffer.create(gl::buffer::target::ssbo, min_required_buffer_size);
}
u32 deswizzle_data_offset = g_deswizzle_scratch_buffer.alloc(static_cast<u32>(image_linear_size), 256);
g_upload_transfer_buffer.unmap();
g_upload_transfer_buffer.copy_to(&g_deswizzle_scratch_buffer.get(), upload_scratch_mem.second, deswizzle_data_offset, static_cast<u32>(image_linear_size));
// 2.2 Apply compute transform to deswizzle input and dump it in compute_scratch_mem
ensure(op.element_size == 2 || op.element_size == 4);
const auto block_size = op.element_size * op.block_length;
if (op.require_swap)
{
mem_layout.swap_bytes = false;
if (op.element_size == 4) [[ likely ]]
{
do_deswizzle_transformation<u32, true>(cmd, block_size,
&g_compute_decode_buffer.get(), compute_scratch_mem.second, &g_deswizzle_scratch_buffer.get(), deswizzle_data_offset,
static_cast<u32>(image_linear_size), layout.width_in_texel, layout.height_in_texel, layout.depth);
}
else
{
do_deswizzle_transformation<u16, true>(cmd, block_size,
&g_compute_decode_buffer.get(), compute_scratch_mem.second, &g_deswizzle_scratch_buffer.get(), deswizzle_data_offset,
static_cast<u32>(image_linear_size), layout.width_in_texel, layout.height_in_texel, layout.depth);
}
}
else
{
if (op.element_size == 4) [[ likely ]]
{
do_deswizzle_transformation<u32, false>(cmd, block_size,
&g_compute_decode_buffer.get(), compute_scratch_mem.second, &g_deswizzle_scratch_buffer.get(), deswizzle_data_offset,
static_cast<u32>(image_linear_size), layout.width_in_texel, layout.height_in_texel, layout.depth);
}
else
{
do_deswizzle_transformation<u16, false>(cmd, block_size,
&g_compute_decode_buffer.get(), compute_scratch_mem.second, &g_deswizzle_scratch_buffer.get(), deswizzle_data_offset,
static_cast<u32>(image_linear_size), layout.width_in_texel, layout.height_in_texel, layout.depth);
}
}
// Barrier
g_deswizzle_scratch_buffer.push_barrier(deswizzle_data_offset, static_cast<u32>(image_linear_size));
}
// 3. Update configuration
mem_info.image_size_in_texels = image_linear_size / block_size_in_bytes;
mem_info.image_size_in_bytes = image_linear_size;
mem_info.memory_required = 0;
// 4. Dispatch compute routines
copy_buffer_to_image(cmd, mem_layout, &g_compute_decode_buffer.get(), dst, compute_scratch_mem.first, layout.level, region, &mem_info);
// Barrier
g_compute_decode_buffer.push_barrier(compute_scratch_mem.second, static_cast<u32>(image_linear_size));
}
else
{
unpack_settings.swap_bytes(op.require_swap);
dst->copy_from(out_pointer, static_cast<texture::format>(gl_format), static_cast<texture::type>(gl_type), layout.level, region, unpack_settings);
}
}
}
}
std::array<GLenum, 4> apply_swizzle_remap(const std::array<GLenum, 4>& swizzle_remap, const rsx::texture_channel_remap_t& decoded_remap)
{
return decoded_remap.remap<GLenum>(swizzle_remap, GL_ZERO, GL_ONE);
}
void upload_texture(gl::command_context& cmd, texture* dst, u32 gcm_format, bool is_swizzled, const std::vector<rsx::subresource_layout>& subresources_layout)
{
// Calculate staging buffer size
rsx::simple_array<std::byte> data_upload_buf;
if (rsx::is_compressed_host_format(gcm_format))
{
const auto& desc = subresources_layout[0];
const u32 texture_data_sz = desc.width_in_block * desc.height_in_block * desc.depth * rsx::get_format_block_size_in_bytes(gcm_format);
data_upload_buf.resize(texture_data_sz);
}
else
{
const auto aligned_pitch = utils::align<u32>(dst->pitch(), 4);
const u32 texture_data_sz = dst->depth() * dst->height() * aligned_pitch;
data_upload_buf.resize(texture_data_sz);
}
// TODO: GL drivers support byteswapping and this should be used instead of doing so manually
const auto format_type = get_format_type(gcm_format);
const GLenum gl_format = std::get<0>(format_type);
const GLenum gl_type = std::get<1>(format_type);
fill_texture(cmd, dst, gcm_format, subresources_layout, is_swizzled, gl_format, gl_type, data_upload_buf);
// Notify the renderer of the upload
auto renderer = static_cast<GLGSRender*>(rsx::get_current_renderer());
renderer->on_guest_texture_read();
}
u32 get_format_texel_width(GLenum format)
{
switch (format)
{
case GL_R8:
return 1;
case GL_R32F:
case GL_RG16:
case GL_RG16F:
case GL_RGBA8:
case GL_BGRA8:
case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
return 4;
case GL_R16:
case GL_RG8:
case GL_RGB565:
return 2;
case GL_RGBA16F:
return 8;
case GL_RGBA32F:
return 16;
case GL_DEPTH_COMPONENT16:
case GL_DEPTH_COMPONENT32F:
return 2;
case GL_DEPTH24_STENCIL8:
case GL_DEPTH32F_STENCIL8:
return 4;
default:
fmt::throw_exception("Unexpected internal format 0x%X", static_cast<u32>(format));
}
}
std::pair<bool, u32> get_format_convert_flags(GLenum format)
{
switch (format)
{
// 8-bit
case GL_R8:
return { false, 1 };
case GL_RGBA8:
case GL_BGRA8:
return { true, 4 };
// 16-bit
case GL_RG8:
case GL_RG16:
case GL_RG16F:
case GL_R16:
case GL_RGB565:
return { true, 2 };
// 32-bit
case GL_R32F:
case GL_RGBA32F:
return { true, 4 };
// DXT
case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
return { false, 1 };
// Depth
case GL_DEPTH_COMPONENT16:
case GL_DEPTH_COMPONENT32F:
return { true, 2 };
case GL_DEPTH24_STENCIL8:
case GL_DEPTH32F_STENCIL8:
return { true, 4 };
default:
break;
}
fmt::throw_exception("Unexpected internal format 0x%X", static_cast<u32>(format));
}
bool formats_are_bitcast_compatible(GLenum format1, GLenum format2)
{
if (format1 == format2) [[likely]]
{
return true;
}
// Formats are compatible if the following conditions are met:
// 1. Texel sizes must match
// 2. Both formats require no transforms (basic memcpy) or...
// 3. Both formats have the same transform (e.g RG16_UNORM to RG16_SFLOAT, both are down and uploaded with a 2-byte byteswap)
if (format1 == GL_BGRA8 || format2 == GL_BGRA8)
{
return false;
}
if (get_format_texel_width(format1) != get_format_texel_width(format2))
{
return false;
}
const auto transform_a = get_format_convert_flags(format1);
const auto transform_b = get_format_convert_flags(format2);
if (transform_a.first == transform_b.first)
{
return !transform_a.first || (transform_a.second == transform_b.second);
}
return false;
}
bool formats_are_bitcast_compatible(const texture* texture1, const texture* texture2)
{
if (const u32 transfer_class = texture1->format_class() | texture2->format_class();
transfer_class > RSX_FORMAT_CLASS_COLOR)
{
// If any one of the two images is a depth format, the other must match exactly or bust
return (texture1->format_class() == texture2->format_class());
}
return formats_are_bitcast_compatible(static_cast<GLenum>(texture1->get_internal_format()), static_cast<GLenum>(texture2->get_internal_format()));
}
void copy_typeless(gl::command_context& cmd, texture * dst, const texture * src, const coord3u& dst_region, const coord3u& src_region)
{
const auto src_bpp = src->pitch() / src->width();
const auto dst_bpp = dst->pitch() / dst->width();
image_memory_requirements src_mem = { src_region.width * src_region.height, src_region.width * src_bpp * src_region.height, 0ull };
image_memory_requirements dst_mem = { dst_region.width * dst_region.height, dst_region.width * dst_bpp * dst_region.height, 0ull };
const auto& caps = gl::get_driver_caps();
auto pack_info = get_format_type(src);
auto unpack_info = get_format_type(dst);
// D32FS8 can be read back as D24S8 or D32S8X24. In case of the latter, double memory requirements
if (pack_info.type == GL_FLOAT_32_UNSIGNED_INT_24_8_REV)
{
src_mem.image_size_in_bytes *= 2;
}
if (unpack_info.type == GL_FLOAT_32_UNSIGNED_INT_24_8_REV)
{
dst_mem.image_size_in_bytes *= 2;
}
if (caps.ARB_compute_shader_supported) [[likely]]
{
bool skip_transform = false;
if ((src->aspect() | dst->aspect()) == gl::image_aspect::color)
{
skip_transform = (pack_info.format == unpack_info.format &&
pack_info.type == unpack_info.type &&
pack_info.swap_bytes == unpack_info.swap_bytes &&
pack_info.size == unpack_info.size);
}
if (skip_transform) [[likely]]
{
// Disable byteswap to make the transport operation passthrough
pack_info.swap_bytes = false;
unpack_info.swap_bytes = false;
}
u32 scratch_offset = 0;
const u64 min_storage_requirement = src_mem.image_size_in_bytes + dst_mem.image_size_in_bytes;
const u64 min_required_buffer_size = utils::align(min_storage_requirement, 256);
if (g_typeless_transfer_buffer.size() >= min_required_buffer_size) [[ likely ]]
{
scratch_offset = g_typeless_transfer_buffer.alloc(static_cast<u32>(min_storage_requirement), 256);
}
else
{
const auto new_size = std::max(min_required_buffer_size, g_typeless_transfer_buffer.size() + (64 * 0x100000));
g_typeless_transfer_buffer.create(gl::buffer::target::ssbo, new_size);
}
void* data_ptr = copy_image_to_buffer(cmd, pack_info, src, &g_typeless_transfer_buffer.get(), scratch_offset, 0, src_region, &src_mem);
copy_buffer_to_image(cmd, unpack_info, &g_typeless_transfer_buffer.get(), dst, data_ptr, 0, dst_region, &dst_mem);
// Not truly range-accurate, but should cover most of what we care about
g_typeless_transfer_buffer.push_barrier(scratch_offset, static_cast<u32>(min_storage_requirement));
// Cleanup
// NOTE: glBindBufferRange also binds the buffer to the old-school target.
// Unbind it to avoid glitching later
glBindBuffer(GL_SHADER_STORAGE_BUFFER, GL_NONE);
glBindBuffer(GL_PIXEL_PACK_BUFFER, GL_NONE);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, GL_NONE);
}
else
{
const u64 max_mem = std::max(src_mem.image_size_in_bytes, dst_mem.image_size_in_bytes);
if (max_mem > static_cast<u64>(g_typeless_transfer_buffer.size()))
{
g_typeless_transfer_buffer.remove();
g_typeless_transfer_buffer.create(buffer::target::pixel_pack, max_mem);
}
// Simplify pack/unpack information to something OpenGL can natively digest
auto remove_depth_transformation = [](const texture* tex, pixel_buffer_layout& pack_info)
{
if (tex->aspect() & image_aspect::depth)
{
switch (pack_info.type)
{
case GL_UNSIGNED_INT_24_8:
pack_info.swap_bytes = false;
break;
case GL_FLOAT_32_UNSIGNED_INT_24_8_REV:
pack_info.type = GL_UNSIGNED_INT_24_8;
pack_info.swap_bytes = false;
break;
case GL_FLOAT:
pack_info.type = GL_HALF_FLOAT;
break;
default: break;
}
}
};
remove_depth_transformation(src, pack_info);
remove_depth_transformation(dst, unpack_info);
// Attempt to compensate for the lack of compute shader modifiers
// If crossing the aspect boundary between color and depth
// and one image is depth, invert byteswap for the other one to compensate
const auto cross_aspect_test = (image_aspect::color | image_aspect::depth);
const auto test = (src->aspect() | dst->aspect()) & cross_aspect_test;
if (test == cross_aspect_test)
{
if (src->aspect() & image_aspect::depth)
{
// Source is depth, modify unpack rule
if (pack_info.size == 4 && unpack_info.size == 4)
{
unpack_info.swap_bytes = !unpack_info.swap_bytes;
}
}
else
{
// Dest is depth, modify pack rule
if (pack_info.size == 4 && unpack_info.size == 4)
{
pack_info.swap_bytes = !pack_info.swap_bytes;
}
}
}
// Start pack operation
pixel_pack_settings pack_settings{};
pack_settings.swap_bytes(pack_info.swap_bytes);
g_typeless_transfer_buffer.get().bind(buffer::target::pixel_pack);
src->copy_to(nullptr, static_cast<texture::format>(pack_info.format), static_cast<texture::type>(pack_info.type), 0, src_region, pack_settings);
glBindBuffer(GL_PIXEL_PACK_BUFFER, GL_NONE);
// Start unpack operation
pixel_unpack_settings unpack_settings{};
unpack_settings.swap_bytes(unpack_info.swap_bytes);
g_typeless_transfer_buffer.get().bind(buffer::target::pixel_unpack);
dst->copy_from(nullptr, static_cast<texture::format>(unpack_info.format), static_cast<texture::type>(unpack_info.type), 0, dst_region, unpack_settings);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, GL_NONE);
}
}
void copy_typeless(gl::command_context& cmd, texture* dst, const texture* src)
{
const coord3u src_area = { {}, src->size3D() };
const coord3u dst_area = { {}, dst->size3D() };
copy_typeless(cmd, dst, src, dst_area, src_area);
}
void clear_attachments(gl::command_context& cmd, const clear_cmd_info& info)
{
// Compile the clear command at the end. Other intervening operations will
GLbitfield clear_mask = 0;
if (info.aspect_mask & gl::image_aspect::color)
{
for (u32 buffer_id = 0; buffer_id < info.clear_color.attachment_count; ++buffer_id)
{
cmd->color_maski(buffer_id, info.clear_color.mask);
}
cmd->clear_color(info.clear_color.r, info.clear_color.g, info.clear_color.b, info.clear_color.a);
clear_mask |= GL_COLOR_BUFFER_BIT;
}
if (info.aspect_mask & gl::image_aspect::depth)
{
cmd->depth_mask(GL_TRUE);
cmd->clear_depth(info.clear_depth.value);
clear_mask |= GL_DEPTH_BUFFER_BIT;
}
if (info.aspect_mask & gl::image_aspect::stencil)
{
cmd->stencil_mask(info.clear_stencil.mask);
cmd->clear_stencil(info.clear_stencil.value);
clear_mask |= GL_STENCIL_BUFFER_BIT;
}
glClear(clear_mask);
}
}
| 42,442
|
C++
|
.cpp
| 1,019
| 37.438665
| 204
| 0.69683
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,488
|
OpenGL.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/GL/OpenGL.cpp
|
#include "stdafx.h"
#include "OpenGL.h"
#if defined(HAVE_WAYLAND)
#include <EGL/egl.h>
#endif
#ifdef _WIN32
extern "C"
{
// NVIDIA Optimus: Default dGPU instead of iGPU (Driver: 302+)
__declspec(dllexport) DWORD NvOptimusEnablement = 0x00000001;
// AMD: Request dGPU High Performance (Driver: 13.35+)
__declspec(dllexport) int AmdPowerXpressRequestHighPerformance = 1;
}
#define OPENGL_PROC(p, n) p gl##n = nullptr
#define WGL_PROC(p, n) p wgl##n = nullptr
#define OPENGL_PROC2(p, n, tn) OPENGL_PROC(p, n)
#include "GLProcTable.h"
#undef OPENGL_PROC
#undef OPENGL_PROC2
#undef WGL_PROC
#endif
void gl::init()
{
#ifdef _WIN32
#define OPENGL_PROC(p, n) OPENGL_PROC2(p, gl##n, gl##n)
#define WGL_PROC(p, n) OPENGL_PROC2(p, wgl##n, wgl##n)
#define OPENGL_PROC2(p, n, tn) /*if(!gl##n)*/ if(!(n = reinterpret_cast<p>(wglGetProcAddress(#tn)))) rsx_log.error("OpenGL: initialization of " #tn " failed.")
#include "GLProcTable.h"
#undef OPENGL_PROC
#undef WGL_PROC
#undef OPENGL_PROC2
#endif
#ifdef __unix__
glewExperimental = true;
glewInit();
#ifdef HAVE_X11
glxewInit();
#endif
#endif
}
void gl::set_swapinterval(int interval)
{
#ifdef _WIN32
wglSwapIntervalEXT(interval);
#elif defined(HAVE_X11)
if (glXSwapIntervalEXT)
{
if (auto window = glXGetCurrentDrawable())
{
glXSwapIntervalEXT(glXGetCurrentDisplay(), window, interval);
return;
}
}
#ifdef HAVE_WAYLAND
if (auto egl_display = eglGetCurrentDisplay(); egl_display != EGL_NO_DISPLAY)
{
eglSwapInterval(egl_display, interval);
return;
}
#endif
//No existing drawable or missing swap extension, EGL?
rsx_log.error("Failed to set swap interval");
#else
rsx_log.error("Swap control not implemented for this platform. Vsync options not available. (interval=%d)", interval);
#endif
}
| 1,770
|
C++
|
.cpp
| 66
| 25.181818
| 159
| 0.741156
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,489
|
GLVertexProgram.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/GL/GLVertexProgram.cpp
|
#include "stdafx.h"
#include "GLVertexProgram.h"
#include "Emu/System.h"
#include "Emu/system_config.h"
#include "GLCommonDecompiler.h"
#include "../Program/GLSLCommon.h"
#include <algorithm>
std::string GLVertexDecompilerThread::getFloatTypeName(usz elementCount)
{
return glsl::getFloatTypeNameImpl(elementCount);
}
std::string GLVertexDecompilerThread::getIntTypeName(usz /*elementCount*/)
{
return "ivec4";
}
std::string GLVertexDecompilerThread::getFunction(FUNCTION f)
{
return glsl::getFunctionImpl(f);
}
std::string GLVertexDecompilerThread::compareFunction(COMPARE f, const std::string &Op0, const std::string &Op1, bool scalar)
{
return glsl::compareFunctionImpl(f, Op0, Op1, scalar);
}
void GLVertexDecompilerThread::insertHeader(std::stringstream &OS)
{
OS << "#version 430\n";
OS << "layout(std140, binding = " << GL_VERTEX_PARAMS_BIND_SLOT << ") uniform VertexContextBuffer\n";
OS << "{\n";
OS << " mat4 scale_offset_mat;\n";
OS << " ivec4 user_clip_enabled[2];\n";
OS << " vec4 user_clip_factor[2];\n";
OS << " uint transform_branch_bits;\n";
OS << " float point_size;\n";
OS << " float z_near;\n";
OS << " float z_far;\n";
OS << "};\n\n";
OS << "layout(std140, binding = " << GL_VERTEX_LAYOUT_BIND_SLOT << ") uniform VertexLayoutBuffer\n";
OS << "{\n";
OS << " uint vertex_base_index;\n";
OS << " uint vertex_index_offset;\n";
OS << " uvec4 input_attributes_blob[16 / 2];\n";
OS << "};\n\n";
}
void GLVertexDecompilerThread::insertInputs(std::stringstream& OS, const std::vector<ParamType>& /*inputs*/)
{
OS << "layout(location=0) uniform usamplerBuffer persistent_input_stream;\n"; //Data stream with persistent vertex data (cacheable)
OS << "layout(location=1) uniform usamplerBuffer volatile_input_stream;\n"; //Data stream with per-draw data (registers and immediate draw data)
}
void GLVertexDecompilerThread::insertConstants(std::stringstream& OS, const std::vector<ParamType>& constants)
{
for (const ParamType &PT: constants)
{
for (const ParamItem &PI : PT.items)
{
if (PI.name.starts_with("vc["))
{
OS << "layout(std140, binding = " << GL_VERTEX_CONSTANT_BUFFERS_BIND_SLOT << ") uniform VertexConstantsBuffer\n";
OS << "{\n";
OS << " vec4 " << PI.name << ";\n";
OS << "};\n\n";
continue;
}
OS << "uniform " << PT.type << " " << PI.name << ";\n";
}
}
}
static const vertex_reg_info reg_table[] =
{
{ "gl_Position", false, "dst_reg0", "", false },
{ "diff_color", true, "dst_reg1", "", false, "", "", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_FRONTDIFFUSE | CELL_GCM_ATTRIB_OUTPUT_MASK_BACKDIFFUSE },
{ "spec_color", true, "dst_reg2", "", false, "", "", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_FRONTSPECULAR | CELL_GCM_ATTRIB_OUTPUT_MASK_BACKSPECULAR },
//These are only present when back variants are specified, otherwise the default diff/spec color vars are for both front and back
{ "diff_color1", true, "dst_reg3", "", false, "", "", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_FRONTDIFFUSE | CELL_GCM_ATTRIB_OUTPUT_MASK_BACKDIFFUSE },
{ "spec_color1", true, "dst_reg4", "", false, "", "", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_FRONTSPECULAR | CELL_GCM_ATTRIB_OUTPUT_MASK_BACKSPECULAR },
//Fog output shares a data source register with clip planes 0-2 so only declare when specified
{ "fog_c", true, "dst_reg5", ".xxxx", true, "", "", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_FOG },
//Warning: Always define all 3 clip plane groups together to avoid flickering with openGL
{ "gl_ClipDistance[0]", false, "dst_reg5", ".y * user_clip_factor[0].x", false, "user_clip_enabled[0].x > 0", "0.5", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_UC0 },
{ "gl_ClipDistance[1]", false, "dst_reg5", ".z * user_clip_factor[0].y", false, "user_clip_enabled[0].y > 0", "0.5", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_UC1 },
{ "gl_ClipDistance[2]", false, "dst_reg5", ".w * user_clip_factor[0].z", false, "user_clip_enabled[0].z > 0", "0.5", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_UC2 },
{ "gl_PointSize", false, "dst_reg6", ".x", false, "", "", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_POINTSIZE },
{ "gl_ClipDistance[3]", false, "dst_reg6", ".y * user_clip_factor[0].w", false, "user_clip_enabled[0].w > 0", "0.5", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_UC3 },
{ "gl_ClipDistance[4]", false, "dst_reg6", ".z * user_clip_factor[1].x", false, "user_clip_enabled[1].x > 0", "0.5", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_UC4 },
{ "gl_ClipDistance[5]", false, "dst_reg6", ".w * user_clip_factor[1].y", false, "user_clip_enabled[1].y > 0", "0.5", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_UC5 },
{ "tc0", true, "dst_reg7", "", false, "", "", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_TEX0 },
{ "tc1", true, "dst_reg8", "", false, "", "", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_TEX1 },
{ "tc2", true, "dst_reg9", "", false, "", "", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_TEX2 },
{ "tc3", true, "dst_reg10", "", false, "", "", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_TEX3 },
{ "tc4", true, "dst_reg11", "", false, "", "", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_TEX4 },
{ "tc5", true, "dst_reg12", "", false, "", "", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_TEX5 },
{ "tc6", true, "dst_reg13", "", false, "", "", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_TEX6 },
{ "tc7", true, "dst_reg14", "", false, "", "", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_TEX7 },
{ "tc8", true, "dst_reg15", "", false, "", "", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_TEX8 },
{ "tc9", true, "dst_reg6", "", false, "", "", "", true, CELL_GCM_ATTRIB_OUTPUT_MASK_TEX9 } // In this line, dst_reg6 is correct since dst_reg goes from 0 to 15.
};
void GLVertexDecompilerThread::insertOutputs(std::stringstream& OS, const std::vector<ParamType>& /*outputs*/)
{
for (auto &i : reg_table)
{
if (i.need_declare)
{
// All outputs must be declared always to allow setting default values
OS << "layout(location=" << gl::get_varying_register_location(i.name) << ") out vec4 " << i.name << ";\n";
}
}
}
void GLVertexDecompilerThread::insertMainStart(std::stringstream & OS)
{
const auto& dev_caps = gl::get_driver_caps();
glsl::shader_properties properties2{};
properties2.domain = glsl::glsl_vertex_program;
properties2.require_lit_emulation = properties.has_lit_op;
properties2.emulate_zclip_transform = true;
properties2.emulate_depth_clip_only = dev_caps.NV_depth_buffer_float_supported;
properties2.low_precision_tests = dev_caps.vendor_NVIDIA;
properties2.require_explicit_invariance = dev_caps.vendor_MESA || (dev_caps.vendor_NVIDIA && g_cfg.video.shader_precision != gpu_preset_level::low);
insert_glsl_legacy_function(OS, properties2);
glsl::insert_vertex_input_fetch(OS, glsl::glsl_rules_opengl4, dev_caps.vendor_INTEL == false);
// Declare global registers with optional initialization
std::string registers;
if (ParamType *vec4Types = m_parr.SearchParam(PF_PARAM_OUT, "vec4"))
{
for (auto &PI : vec4Types->items)
{
if (registers.length())
registers += ", ";
else
registers = "vec4 ";
registers += PI.name;
if (!PI.value.empty())
{
// Simplify default initialization
if (PI.value == "vec4(0.0, 0.0, 0.0, 0.0)")
registers += " = vec4(0.)";
else
registers += " = " + PI.value;
}
}
}
if (!registers.empty())
{
OS << registers << ";\n";
}
OS << "void vs_main()\n";
OS << "{\n";
//Declare temporary registers, ignoring those mapped to outputs
for (const ParamType &PT : m_parr.params[PF_PARAM_NONE])
{
for (const ParamItem &PI : PT.items)
{
if (PI.name.starts_with("dst_reg"))
continue;
OS << " " << PT.type << " " << PI.name;
if (!PI.value.empty())
OS << " = " << PI.value;
OS << ";\n";
}
}
for (const ParamType &PT : m_parr.params[PF_PARAM_IN])
{
for (const ParamItem &PI : PT.items)
{
OS << " vec4 " << PI.name << "= read_location(" << std::to_string(PI.location) << ");\n";
}
}
}
void GLVertexDecompilerThread::insertMainEnd(std::stringstream & OS)
{
OS << "}\n\n";
OS << "void main ()\n";
OS << "{\n";
OS << "\n" << " vs_main();\n\n";
for (auto &i : reg_table)
{
if (!i.check_mask || i.test(rsx_vertex_program.output_mask))
{
if (m_parr.HasParam(PF_PARAM_OUT, "vec4", i.src_reg))
{
std::string condition = (!i.cond.empty()) ? "(" + i.cond + ") " : "";
if (condition.empty() || i.default_val.empty())
{
if (!condition.empty()) condition = "if " + condition;
OS << " " << condition << i.name << " = " << i.src_reg << i.src_reg_mask << ";\n";
}
else
{
//Insert if-else condition
OS << " " << i.name << " = " << condition << "? " << i.src_reg << i.src_reg_mask << ": " << i.default_val << ";\n";
}
// Register was marked for output and a properly initialized source register exists
// Nothing more to do
continue;
}
}
if (i.need_declare)
{
OS << " " << i.name << " = vec4(0., 0., 0., 1.);\n";
}
else if (i.check_mask_value == CELL_GCM_ATTRIB_OUTPUT_MASK_POINTSIZE)
{
// Default point size if none was generated by the program
OS << " gl_PointSize = point_size;\n";
}
}
OS << " gl_Position = gl_Position * scale_offset_mat;\n";
OS << " gl_Position = apply_zclip_xform(gl_Position, z_near, z_far);\n";
//Since our clip_space is symmetrical [-1, 1] we map it to linear space using the eqn:
//ln = (clip * 2) - 1 to fully utilize the 0-1 range of the depth buffer
//RSX matrices passed already map to the [0, 1] range but mapping to classic OGL requires that we undo this step
//This can be made unnecessary using the call glClipControl(GL_LOWER_LEFT, GL_ZERO_TO_ONE).
//However, ClipControl only made it to opengl core in ver 4.5 though, so this is a workaround.
//NOTE: It is completely valid for games to use very large w values, causing the post-multiplied z to be in the hundreds
//It is therefore critical that this step is done post-transform and the result re-scaled by w
//SEE Naruto: UNS
//NOTE: On GPUs, poor fp32 precision means dividing z by w, then multiplying by w again gives slightly incorrect results
//This equation is simplified algebraically to an addition and subtraction which gives more accurate results (Fixes flickering skybox in Dark Souls 2)
//OS << " float ndc_z = gl_Position.z / gl_Position.w;\n";
//OS << " ndc_z = (ndc_z * 2.) - 1.;\n";
//OS << " gl_Position.z = ndc_z * gl_Position.w;\n";
OS << " gl_Position.z = (gl_Position.z + gl_Position.z) - gl_Position.w;\n";
OS << "}\n";
}
void GLVertexDecompilerThread::Task()
{
m_shader = Decompile();
}
GLVertexProgram::GLVertexProgram() = default;
GLVertexProgram::~GLVertexProgram()
{
Delete();
}
void GLVertexProgram::Decompile(const RSXVertexProgram& prog)
{
std::string source;
GLVertexDecompilerThread decompiler(prog, source, parr);
decompiler.Task();
has_indexed_constants = decompiler.properties.has_indexed_constants;
constant_ids = std::vector<u16>(decompiler.m_constant_ids.begin(), decompiler.m_constant_ids.end());
shader.create(::glsl::program_domain::glsl_vertex_program, source);
id = shader.id();
}
void GLVertexProgram::Delete()
{
shader.remove();
id = 0;
}
| 11,077
|
C++
|
.cpp
| 246
| 42.406504
| 162
| 0.656685
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,490
|
GLRenderTargets.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/GL/GLRenderTargets.cpp
|
#include "stdafx.h"
#include "GLGSRender.h"
#include "Emu/RSX/rsx_methods.h"
#include <span>
color_format rsx::internals::surface_color_format_to_gl(rsx::surface_color_format color_format)
{
//color format
switch (color_format)
{
case rsx::surface_color_format::r5g6b5:
return{ ::gl::texture::type::ushort_5_6_5, ::gl::texture::format::rgb, ::gl::texture::internal_format::rgb565, true };
case rsx::surface_color_format::a8r8g8b8:
return{ ::gl::texture::type::uint_8_8_8_8_rev, ::gl::texture::format::bgra, ::gl::texture::internal_format::bgra8, true };
//These formats discard their alpha component, forced to 0 or 1
//All XBGR formats will have remapping before they can be read back in shaders as DRGB8
//Prefix o = 1, z = 0
case rsx::surface_color_format::x1r5g5b5_o1r5g5b5:
return{ ::gl::texture::type::ushort_5_5_5_1, ::gl::texture::format::rgb, ::gl::texture::internal_format::bgr5a1, true,
{ ::gl::texture::channel::one, ::gl::texture::channel::r, ::gl::texture::channel::g, ::gl::texture::channel::b } };
case rsx::surface_color_format::x1r5g5b5_z1r5g5b5:
return{ ::gl::texture::type::ushort_5_5_5_1, ::gl::texture::format::rgb, ::gl::texture::internal_format::bgr5a1, true,
{ ::gl::texture::channel::zero, ::gl::texture::channel::r, ::gl::texture::channel::g, ::gl::texture::channel::b } };
case rsx::surface_color_format::x8r8g8b8_z8r8g8b8:
return{ ::gl::texture::type::uint_8_8_8_8_rev, ::gl::texture::format::bgra, ::gl::texture::internal_format::bgra8, true,
{ ::gl::texture::channel::zero, ::gl::texture::channel::r, ::gl::texture::channel::g, ::gl::texture::channel::b } };
case rsx::surface_color_format::x8b8g8r8_o8b8g8r8:
return{ ::gl::texture::type::uint_8_8_8_8_rev, ::gl::texture::format::rgba, ::gl::texture::internal_format::rgba8, true,
{ ::gl::texture::channel::one, ::gl::texture::channel::r, ::gl::texture::channel::g, ::gl::texture::channel::b } };
case rsx::surface_color_format::x8b8g8r8_z8b8g8r8:
return{ ::gl::texture::type::uint_8_8_8_8_rev, ::gl::texture::format::rgba, ::gl::texture::internal_format::rgba8, true,
{ ::gl::texture::channel::zero, ::gl::texture::channel::r, ::gl::texture::channel::g, ::gl::texture::channel::b } };
case rsx::surface_color_format::x8r8g8b8_o8r8g8b8:
return{ ::gl::texture::type::uint_8_8_8_8_rev, ::gl::texture::format::bgra, ::gl::texture::internal_format::bgra8, true,
{ ::gl::texture::channel::one, ::gl::texture::channel::r, ::gl::texture::channel::g, ::gl::texture::channel::b } };
case rsx::surface_color_format::w16z16y16x16:
return{ ::gl::texture::type::f16, ::gl::texture::format::rgba, ::gl::texture::internal_format::rgba16f, true};
case rsx::surface_color_format::w32z32y32x32:
return{ ::gl::texture::type::f32, ::gl::texture::format::rgba, ::gl::texture::internal_format::rgba32f, true};
case rsx::surface_color_format::b8:
return{ ::gl::texture::type::ubyte, ::gl::texture::format::r, ::gl::texture::internal_format::r8, false,
{ ::gl::texture::channel::one, ::gl::texture::channel::r, ::gl::texture::channel::r, ::gl::texture::channel::r } };
case rsx::surface_color_format::g8b8:
return{ ::gl::texture::type::ubyte, ::gl::texture::format::rg, ::gl::texture::internal_format::rg8, false,
{ ::gl::texture::channel::g, ::gl::texture::channel::r, ::gl::texture::channel::g, ::gl::texture::channel::r } };
case rsx::surface_color_format::x32:
return{ ::gl::texture::type::f32, ::gl::texture::format::r, ::gl::texture::internal_format::r32f, true,
{ ::gl::texture::channel::r, ::gl::texture::channel::r, ::gl::texture::channel::r, ::gl::texture::channel::r } };
case rsx::surface_color_format::a8b8g8r8:
return{ ::gl::texture::type::uint_8_8_8_8_rev, ::gl::texture::format::rgba, ::gl::texture::internal_format::rgba8, true };
default:
fmt::throw_exception("Unsupported surface color format 0x%x", static_cast<u32>(color_format));
}
}
depth_format rsx::internals::surface_depth_format_to_gl(rsx::surface_depth_format2 depth_format)
{
switch (depth_format)
{
case rsx::surface_depth_format2::z16_uint:
return{ ::gl::texture::type::ushort, ::gl::texture::format::depth, ::gl::texture::internal_format::depth16 };
case rsx::surface_depth_format2::z16_float:
return{ ::gl::texture::type::f32, ::gl::texture::format::depth, ::gl::texture::internal_format::depth32f };
case rsx::surface_depth_format2::z24s8_uint:
if (g_cfg.video.force_high_precision_z_buffer && ::gl::get_driver_caps().ARB_depth_buffer_float_supported)
return{ ::gl::texture::type::uint_24_8, ::gl::texture::format::depth_stencil, ::gl::texture::internal_format::depth32f_stencil8 };
else
return{ ::gl::texture::type::uint_24_8, ::gl::texture::format::depth_stencil, ::gl::texture::internal_format::depth24_stencil8 };
case rsx::surface_depth_format2::z24s8_float:
return{ ::gl::texture::type::float32_uint8, ::gl::texture::format::depth_stencil, ::gl::texture::internal_format::depth32f_stencil8 };
default:
fmt::throw_exception("Unsupported depth format 0x%x", static_cast<u32>(depth_format));
}
}
u8 rsx::internals::get_pixel_size(rsx::surface_depth_format format)
{
switch (format)
{
case rsx::surface_depth_format::z16: return 2;
case rsx::surface_depth_format::z24s8: return 4;
default: fmt::throw_exception("Unknown depth format");
}
}
void GLGSRender::init_buffers(rsx::framebuffer_creation_context context, bool /*skip_reading*/)
{
const bool clipped_scissor = (context == rsx::framebuffer_creation_context::context_draw);
if (m_current_framebuffer_context == context && !m_graphics_state.test(rsx::rtt_config_dirty) && m_draw_fbo)
{
// Fast path
// Framebuffer usage has not changed, framebuffer exists and config regs have not changed
set_scissor(clipped_scissor);
return;
}
m_graphics_state.clear(
rsx::rtt_config_dirty |
rsx::rtt_config_contested |
rsx::rtt_config_valid |
rsx::rtt_cache_state_dirty);
get_framebuffer_layout(context, m_framebuffer_layout);
if (!m_graphics_state.test(rsx::rtt_config_valid))
{
return;
}
if (m_draw_fbo && m_framebuffer_layout.ignore_change)
{
// Nothing has changed, we're still using the same framebuffer
// Update flags to match current
m_draw_fbo->bind();
set_viewport();
set_scissor(clipped_scissor);
return;
}
gl::command_context cmd{ gl_state };
m_rtts.prepare_render_target(cmd,
m_framebuffer_layout.color_format, m_framebuffer_layout.depth_format,
m_framebuffer_layout.width, m_framebuffer_layout.height,
m_framebuffer_layout.target, m_framebuffer_layout.aa_mode, m_framebuffer_layout.raster_type,
m_framebuffer_layout.color_addresses, m_framebuffer_layout.zeta_address,
m_framebuffer_layout.actual_color_pitch, m_framebuffer_layout.actual_zeta_pitch);
std::array<GLuint, 4> color_targets;
GLuint depth_stencil_target;
const u8 color_bpp = get_format_block_size_in_bytes(m_framebuffer_layout.color_format);
const auto samples = get_format_sample_count(m_framebuffer_layout.aa_mode);
for (int i = 0; i < rsx::limits::color_buffers_count; ++i)
{
if (m_surface_info[i].pitch && g_cfg.video.write_color_buffers)
{
const utils::address_range surface_range = m_surface_info[i].get_memory_range();
m_gl_texture_cache.set_memory_read_flags(surface_range, rsx::memory_read_flags::flush_once);
m_gl_texture_cache.flush_if_cache_miss_likely(cmd, surface_range);
}
if (std::get<0>(m_rtts.m_bound_render_targets[i]))
{
auto rtt = std::get<1>(m_rtts.m_bound_render_targets[i]);
color_targets[i] = rtt->id();
ensure(rtt->get_rsx_pitch() == m_framebuffer_layout.actual_color_pitch[i]); // "Pitch mismatch!"
m_surface_info[i].address = m_framebuffer_layout.color_addresses[i];
m_surface_info[i].pitch = m_framebuffer_layout.actual_color_pitch[i];
m_surface_info[i].width = m_framebuffer_layout.width;
m_surface_info[i].height = m_framebuffer_layout.height;
m_surface_info[i].color_format = m_framebuffer_layout.color_format;
m_surface_info[i].bpp = color_bpp;
m_surface_info[i].samples = samples;
m_gl_texture_cache.notify_surface_changed(m_surface_info[i].get_memory_range(m_framebuffer_layout.aa_factors));
}
else
{
color_targets[i] = GL_NONE;
m_surface_info[i] = {};
}
}
if (m_depth_surface_info.pitch && g_cfg.video.write_depth_buffer)
{
const utils::address_range surface_range = m_depth_surface_info.get_memory_range();
m_gl_texture_cache.set_memory_read_flags(surface_range, rsx::memory_read_flags::flush_once);
m_gl_texture_cache.flush_if_cache_miss_likely(cmd, surface_range);
}
if (std::get<0>(m_rtts.m_bound_depth_stencil))
{
auto ds = std::get<1>(m_rtts.m_bound_depth_stencil);
depth_stencil_target = ds->id();
ensure(std::get<1>(m_rtts.m_bound_depth_stencil)->get_rsx_pitch() == m_framebuffer_layout.actual_zeta_pitch); // "Pitch mismatch!"
m_depth_surface_info.address = m_framebuffer_layout.zeta_address;
m_depth_surface_info.pitch = m_framebuffer_layout.actual_zeta_pitch;
m_depth_surface_info.width = m_framebuffer_layout.width;
m_depth_surface_info.height = m_framebuffer_layout.height;
m_depth_surface_info.depth_format = m_framebuffer_layout.depth_format;
m_depth_surface_info.bpp = get_format_block_size_in_bytes(m_framebuffer_layout.depth_format);
m_depth_surface_info.samples = samples;
m_gl_texture_cache.notify_surface_changed(m_depth_surface_info.get_memory_range(m_framebuffer_layout.aa_factors));
}
else
{
depth_stencil_target = GL_NONE;
m_depth_surface_info = {};
}
m_graphics_state.clear(rsx::rtt_config_valid);
if (m_draw_fbo)
{
// Release resource
static_cast<gl::framebuffer_holder*>(m_draw_fbo)->release();
}
for (auto &fbo : m_framebuffer_cache)
{
if (fbo.matches(color_targets, depth_stencil_target))
{
fbo.add_ref();
m_draw_fbo = &fbo;
m_draw_fbo->bind();
m_draw_fbo->set_extents({ m_framebuffer_layout.width, m_framebuffer_layout.height });
m_graphics_state.set(rsx::rtt_config_valid);
break;
}
}
if (!m_graphics_state.test(rsx::rtt_config_valid))
{
m_framebuffer_cache.emplace_back();
m_framebuffer_cache.back().add_ref();
m_draw_fbo = &m_framebuffer_cache.back();
m_draw_fbo->create();
m_draw_fbo->bind();
m_draw_fbo->set_extents({ m_framebuffer_layout.width, m_framebuffer_layout.height });
for (int i = 0; i < 4; ++i)
{
if (color_targets[i])
{
m_draw_fbo->color[i] = color_targets[i];
}
}
if (depth_stencil_target)
{
if (is_depth_stencil_format(m_framebuffer_layout.depth_format))
{
m_draw_fbo->depth_stencil = depth_stencil_target;
}
else
{
m_draw_fbo->depth = depth_stencil_target;
}
}
}
switch (rsx::method_registers.surface_color_target())
{
case rsx::surface_target::none: break;
case rsx::surface_target::surface_a:
m_draw_fbo->draw_buffer(m_draw_fbo->color[0]);
m_draw_fbo->read_buffer(m_draw_fbo->color[0]);
break;
case rsx::surface_target::surface_b:
m_draw_fbo->draw_buffer(m_draw_fbo->color[1]);
m_draw_fbo->read_buffer(m_draw_fbo->color[1]);
break;
case rsx::surface_target::surfaces_a_b:
m_draw_fbo->draw_buffers({ m_draw_fbo->color[0], m_draw_fbo->color[1] });
m_draw_fbo->read_buffer(m_draw_fbo->color[0]);
break;
case rsx::surface_target::surfaces_a_b_c:
m_draw_fbo->draw_buffers({ m_draw_fbo->color[0], m_draw_fbo->color[1], m_draw_fbo->color[2] });
m_draw_fbo->read_buffer(m_draw_fbo->color[0]);
break;
case rsx::surface_target::surfaces_a_b_c_d:
m_draw_fbo->draw_buffers({ m_draw_fbo->color[0], m_draw_fbo->color[1], m_draw_fbo->color[2], m_draw_fbo->color[3] });
m_draw_fbo->read_buffer(m_draw_fbo->color[0]);
break;
}
if (!m_draw_fbo->check())
{
m_graphics_state.clear(rsx::rtt_config_valid);
return;
}
m_graphics_state.set(rsx::rtt_config_valid);
check_zcull_status(true);
set_viewport();
set_scissor(clipped_scissor);
m_gl_texture_cache.clear_ro_tex_invalidate_intr();
if (!m_rtts.superseded_surfaces.empty())
{
for (auto& surface : m_rtts.superseded_surfaces)
{
m_gl_texture_cache.discard_framebuffer_memory_region(cmd, surface->get_memory_range());
}
m_rtts.superseded_surfaces.clear();
}
if (!m_rtts.orphaned_surfaces.empty())
{
gl::texture::format format;
gl::texture::type type;
bool swap_bytes;
for (auto& [base_addr, surface] : m_rtts.orphaned_surfaces)
{
bool lock = surface->is_depth_surface() ? !!g_cfg.video.write_depth_buffer :
!!g_cfg.video.write_color_buffers;
if (lock &&
#ifdef TEXTURE_CACHE_DEBUG
!m_gl_texture_cache.is_protected(
base_addr,
surface->get_memory_range(),
rsx::texture_upload_context::framebuffer_storage)
#else
!surface->is_locked()
#endif
)
{
lock = false;
}
if (!lock) [[likely]]
{
m_gl_texture_cache.commit_framebuffer_memory_region(cmd, surface->get_memory_range());
continue;
}
if (surface->is_depth_surface())
{
const auto depth_format_gl = rsx::internals::surface_depth_format_to_gl(surface->get_surface_depth_format());
format = depth_format_gl.format;
type = depth_format_gl.type;
swap_bytes = (type != gl::texture::type::uint_24_8);
}
else
{
const auto color_format_gl = rsx::internals::surface_color_format_to_gl(surface->get_surface_color_format());
format = color_format_gl.format;
type = color_format_gl.type;
swap_bytes = color_format_gl.swap_bytes;
}
m_gl_texture_cache.lock_memory_region(
cmd, surface, surface->get_memory_range(), false,
surface->get_surface_width<rsx::surface_metrics::pixels>(), surface->get_surface_height<rsx::surface_metrics::pixels>(), surface->get_rsx_pitch(),
format, type, swap_bytes);
}
m_rtts.orphaned_surfaces.clear();
}
const auto color_format = rsx::internals::surface_color_format_to_gl(m_framebuffer_layout.color_format);
for (u8 i = 0; i < rsx::limits::color_buffers_count; ++i)
{
if (!m_surface_info[i].address || !m_surface_info[i].pitch) continue;
const auto surface_range = m_surface_info[i].get_memory_range();
if (g_cfg.video.write_color_buffers)
{
// Mark buffer regions as NO_ACCESS on Cell-visible side
m_gl_texture_cache.lock_memory_region(
cmd, m_rtts.m_bound_render_targets[i].second, surface_range, true,
m_surface_info[i].width, m_surface_info[i].height, m_surface_info[i].pitch,
color_format.format, color_format.type, color_format.swap_bytes);
}
else
{
m_gl_texture_cache.commit_framebuffer_memory_region(cmd, surface_range);
}
}
if (m_depth_surface_info.address && m_depth_surface_info.pitch)
{
const auto surface_range = m_depth_surface_info.get_memory_range();
if (g_cfg.video.write_depth_buffer)
{
const auto depth_format_gl = rsx::internals::surface_depth_format_to_gl(m_framebuffer_layout.depth_format);
m_gl_texture_cache.lock_memory_region(
cmd, m_rtts.m_bound_depth_stencil.second, surface_range, true,
m_depth_surface_info.width, m_depth_surface_info.height, m_depth_surface_info.pitch,
depth_format_gl.format, depth_format_gl.type, depth_format_gl.type != gl::texture::type::uint_24_8);
}
else
{
m_gl_texture_cache.commit_framebuffer_memory_region(cmd, surface_range);
}
}
if (m_gl_texture_cache.get_ro_tex_invalidate_intr())
{
// Invalidate cached sampler state
m_samplers_dirty.store(true);
}
}
// Render target helpers
void gl::render_target::clear_memory(gl::command_context& cmd)
{
if (aspect() & gl::image_aspect::depth)
{
gl::g_hw_blitter->fast_clear_image(cmd, this, 1.f, 255);
}
else
{
gl::g_hw_blitter->fast_clear_image(cmd, this, {});
}
state_flags &= ~rsx::surface_state_flags::erase_bkgnd;
}
void gl::render_target::load_memory(gl::command_context& cmd)
{
const bool is_swizzled = (raster_type == rsx::surface_raster_type::swizzle);
rsx::subresource_layout subres{};
subres.width_in_block = subres.width_in_texel = surface_width * samples_x;
subres.height_in_block = subres.height_in_texel = surface_height * samples_y;
subres.pitch_in_block = rsx_pitch / get_bpp();
subres.depth = 1;
subres.data = { vm::get_super_ptr<const std::byte>(base_addr), static_cast<std::span<const std::byte>::size_type>(rsx_pitch * surface_height * samples_y) };
// TODO: MSAA support
if (g_cfg.video.resolution_scale_percent == 100 && spp == 1) [[likely]]
{
gl::upload_texture(cmd, this, get_gcm_format(), is_swizzled, { subres });
}
else
{
auto tmp = std::make_unique<gl::texture>(GL_TEXTURE_2D, subres.width_in_block, subres.height_in_block, 1, 1, static_cast<GLenum>(get_internal_format()), format_class());
gl::upload_texture(cmd, tmp.get(), get_gcm_format(), is_swizzled, { subres });
gl::g_hw_blitter->scale_image(cmd, tmp.get(), this,
{ 0, 0, subres.width_in_block, subres.height_in_block },
{ 0, 0, static_cast<int>(width()), static_cast<int>(height()) },
!is_depth_surface(),
{});
}
}
void gl::render_target::initialize_memory(gl::command_context& cmd, rsx::surface_access /*access*/)
{
const bool memory_load = is_depth_surface() ?
!!g_cfg.video.read_depth_buffer :
!!g_cfg.video.read_color_buffers;
if (!memory_load)
{
clear_memory(cmd);
}
else
{
load_memory(cmd);
}
}
void gl::render_target::memory_barrier(gl::command_context& cmd, rsx::surface_access access)
{
const bool read_access = access.is_read();
const bool is_depth = is_depth_surface();
const bool should_read_buffers = is_depth ? !!g_cfg.video.read_depth_buffer : !!g_cfg.video.read_color_buffers;
if (should_read_buffers)
{
// TODO: Decide what to do when memory loads are disabled but the underlying has memory changed
// NOTE: Assume test() is expensive when in a pinch
if (last_use_tag && state_flags == rsx::surface_state_flags::ready && !test())
{
// TODO: Figure out why merely returning and failing the test does not work when reading (TLoU)
// The result should have been the same either way
state_flags |= rsx::surface_state_flags::erase_bkgnd;
}
}
if (old_contents.empty())
{
// No memory to inherit
if (dirty() && (read_access || state_flags & rsx::surface_state_flags::erase_bkgnd))
{
// Initialize memory contents if we did not find anything usable
initialize_memory(cmd, access);
on_write();
}
return;
}
const bool dst_is_depth = !!(aspect() & gl::image_aspect::depth);
const auto dst_bpp = get_bpp();
unsigned first = prepare_rw_barrier_for_transfer(this);
u64 newest_tag = 0;
for (auto i = first; i < old_contents.size(); ++i)
{
auto §ion = old_contents[i];
auto src_texture = gl::as_rtt(section.source);
const auto src_bpp = src_texture->get_bpp();
rsx::typeless_xfer typeless_info{};
if (get_internal_format() == src_texture->get_internal_format())
{
// Copy data from old contents onto this one
ensure(src_bpp == dst_bpp);
}
else
{
// Mem cast, generate typeless xfer info
if (!formats_are_bitcast_compatible(this, src_texture))
{
typeless_info.src_is_typeless = true;
typeless_info.src_context = rsx::texture_upload_context::framebuffer_storage;
typeless_info.src_native_format_override = static_cast<u32>(get_internal_format());
typeless_info.src_gcm_format = src_texture->get_gcm_format();
typeless_info.src_scaling_hint = static_cast<f32>(src_bpp) / dst_bpp;
}
}
section.init_transfer(this);
if (state_flags & rsx::surface_state_flags::erase_bkgnd)
{
const auto area = section.dst_rect();
if (area.x1 > 0 || area.y1 > 0 || unsigned(area.x2) < width() || unsigned(area.y2) < height())
{
initialize_memory(cmd, access);
}
else
{
state_flags &= ~rsx::surface_state_flags::erase_bkgnd;
}
}
gl::g_hw_blitter->scale_image(cmd, section.source, this,
section.src_rect(),
section.dst_rect(),
!dst_is_depth, typeless_info);
newest_tag = src_texture->last_use_tag;
}
// Memory has been transferred, discard old contents and update memory flags
// TODO: Preserve memory outside surface clip region
on_write(newest_tag);
}
| 19,940
|
C++
|
.cpp
| 478
| 38.66318
| 171
| 0.699649
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,491
|
GLOverlays.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/GL/GLOverlays.cpp
|
#include "GLOverlays.h"
#include "Emu/system_config.h"
#include "../rsx_utils.h"
#include "../Program/RSXOverlay.h"
namespace gl
{
// Lame
std::unordered_map<u32, std::unique_ptr<gl::overlay_pass>> g_overlay_passes;
void destroy_overlay_passes()
{
for (auto& [key, prog] : g_overlay_passes)
{
prog->destroy();
}
g_overlay_passes.clear();
}
void overlay_pass::create()
{
if (!compiled)
{
fs.create(::glsl::program_domain::glsl_fragment_program, fs_src);
fs.compile();
vs.create(::glsl::program_domain::glsl_vertex_program, vs_src);
vs.compile();
program_handle.create();
program_handle.attach(vs);
program_handle.attach(fs);
program_handle.link();
fbo.create();
m_sampler.create();
m_sampler.apply_defaults(static_cast<GLenum>(m_input_filter));
m_vertex_data_buffer.create();
int old_vao;
glGetIntegerv(GL_VERTEX_ARRAY_BINDING, &old_vao);
m_vao.create();
m_vao.bind();
m_vao.array_buffer = m_vertex_data_buffer;
auto ptr = buffer_pointer(&m_vao);
m_vao[0] = ptr;
glBindVertexArray(old_vao);
compiled = true;
}
}
void overlay_pass::destroy()
{
if (compiled)
{
program_handle.remove();
vs.remove();
fs.remove();
fbo.remove();
m_vao.remove();
m_vertex_data_buffer.remove();
m_sampler.remove();
compiled = false;
}
}
void overlay_pass::emit_geometry()
{
int old_vao;
glGetIntegerv(GL_VERTEX_ARRAY_BINDING, &old_vao);
m_vao.bind();
glDrawArrays(primitives, 0, num_drawable_elements);
glBindVertexArray(old_vao);
}
void overlay_pass::run(gl::command_context& cmd, const areau& region, GLuint target_texture, GLuint image_aspect_bits, bool enable_blending)
{
if (!compiled)
{
rsx_log.error("You must initialize overlay passes with create() before calling run()");
return;
}
GLint viewport[4];
std::unique_ptr<fbo::save_binding_state> save_fbo;
if (target_texture)
{
save_fbo = std::make_unique<fbo::save_binding_state>(fbo);
switch (image_aspect_bits)
{
case gl::image_aspect::color:
fbo.color[0] = target_texture;
fbo.draw_buffer(fbo.color[0]);
break;
case gl::image_aspect::depth:
fbo.draw_buffer(fbo.no_color);
fbo.depth = target_texture;
break;
case gl::image_aspect::depth | gl::image_aspect::stencil:
fbo.draw_buffer(fbo.no_color);
fbo.depth_stencil = target_texture;
break;
default:
fmt::throw_exception("Unsupported image aspect combination 0x%x", image_aspect_bits);
}
enable_depth_writes = (image_aspect_bits & m_write_aspect_mask) & gl::image_aspect::depth;
enable_stencil_writes = (image_aspect_bits & m_write_aspect_mask) & gl::image_aspect::stencil;
}
if (!target_texture || fbo.check())
{
// Save state (TODO)
glGetIntegerv(GL_VIEWPORT, viewport);
// Set initial state
glViewport(region.x1, region.y1, region.width(), region.height());
cmd->color_maski(0, GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
cmd->depth_mask(image_aspect_bits == gl::image_aspect::color ? GL_FALSE : GL_TRUE);
cmd->disable(GL_CULL_FACE);
cmd->disable(GL_SCISSOR_TEST);
cmd->clip_planes(GL_NONE);
if (enable_depth_writes)
{
// Disabling depth test will also disable depth writes which is not desired
cmd->depth_func(GL_ALWAYS);
cmd->enable(GL_DEPTH_TEST);
}
else
{
cmd->disable(GL_DEPTH_TEST);
}
if (enable_stencil_writes)
{
// Disabling stencil test also disables stencil writes.
cmd->enable(GL_STENCIL_TEST);
cmd->stencil_mask(0xFF);
cmd->stencil_func(GL_ALWAYS, 0xFF, 0xFF);
cmd->stencil_op(GL_KEEP, GL_KEEP, GL_REPLACE);
}
else
{
cmd->disable(GL_STENCIL_TEST);
}
if (enable_blending)
{
cmd->enablei(GL_BLEND, 0);
glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, GL_ZERO, GL_ONE);
glBlendEquation(GL_FUNC_ADD);
}
else
{
cmd->disablei(GL_BLEND, 0);
}
// Render
cmd->use_program(program_handle.id());
on_load();
bind_resources();
emit_geometry();
glViewport(viewport[0], viewport[1], viewport[2], viewport[3]);
if (target_texture)
{
fbo.color[0] = GL_NONE;
fbo.depth = GL_NONE;
fbo.depth_stencil = GL_NONE;
}
}
else
{
rsx_log.error("Overlay pass failed because framebuffer was not complete. Run with debug output enabled to diagnose the problem");
}
}
ui_overlay_renderer::ui_overlay_renderer()
{
vs_src =
#include "../Program/GLSLSnippets/OverlayRenderVS.glsl"
;
fs_src =
#include "../Program/GLSLSnippets/OverlayRenderFS.glsl"
;
vs_src = fmt::replace_all(vs_src,
{
{ "#version 450", "#version 420" },
{ "%preprocessor", "// %preprocessor" }
});
fs_src = fmt::replace_all(fs_src, "%preprocessor", "// %preprocessor");
// Smooth filtering required for inputs
m_input_filter = gl::filter::linear;
}
gl::texture_view* ui_overlay_renderer::load_simple_image(rsx::overlays::image_info* desc, bool temp_resource, u32 owner_uid)
{
auto tex = std::make_unique<gl::texture>(GL_TEXTURE_2D, desc->w, desc->h, 1, 1, GL_RGBA8);
tex->copy_from(desc->data, gl::texture::format::rgba, gl::texture::type::uint_8_8_8_8, {});
GLenum remap[] = { GL_RED, GL_ALPHA, GL_BLUE, GL_GREEN };
auto view = std::make_unique<gl::texture_view>(tex.get(), remap);
auto result = view.get();
if (!temp_resource)
{
resources.push_back(std::move(tex));
view_cache[view_cache.size()] = std::move(view);
}
else
{
u64 key = reinterpret_cast<u64>(desc);
temp_image_cache[key] = std::make_pair(owner_uid, std::move(tex));
temp_view_cache[key] = std::move(view);
}
return result;
}
void ui_overlay_renderer::create()
{
overlay_pass::create();
rsx::overlays::resource_config configuration;
configuration.load_files();
for (const auto &res : configuration.texture_raw_data)
{
load_simple_image(res.get(), false, -1);
}
configuration.free_resources();
}
void ui_overlay_renderer::destroy()
{
temp_image_cache.clear();
temp_view_cache.clear();
resources.clear();
font_cache.clear();
view_cache.clear();
overlay_pass::destroy();
}
void ui_overlay_renderer::remove_temp_resources(u64 key)
{
std::vector<u64> keys_to_remove;
for (const auto& temp_image : temp_image_cache)
{
if (temp_image.second.first == key)
{
keys_to_remove.push_back(temp_image.first);
}
}
for (const auto& _key : keys_to_remove)
{
temp_image_cache.erase(_key);
temp_view_cache.erase(_key);
}
}
gl::texture_view* ui_overlay_renderer::find_font(rsx::overlays::font* font)
{
const auto font_size = font->get_glyph_data_dimensions();
u64 key = reinterpret_cast<u64>(font);
auto found = view_cache.find(key);
if (found != view_cache.end())
{
if (const auto this_size = found->second->image()->size3D();
font_size.width == this_size.width &&
font_size.height == this_size.height &&
font_size.depth == this_size.depth)
{
return found->second.get();
}
}
// Create font file
const std::vector<u8> glyph_data = font->get_glyph_data();
auto tex = std::make_unique<gl::texture>(GL_TEXTURE_2D_ARRAY, font_size.width, font_size.height, font_size.depth, 1, GL_R8);
tex->copy_from(glyph_data.data(), gl::texture::format::r, gl::texture::type::ubyte, {});
GLenum remap[] = { GL_RED, GL_RED, GL_RED, GL_RED };
auto view = std::make_unique<gl::texture_view>(tex.get(), remap);
auto result = view.get();
font_cache[key] = std::move(tex);
view_cache[key] = std::move(view);
return result;
}
gl::texture_view* ui_overlay_renderer::find_temp_image(rsx::overlays::image_info* desc, u32 owner_uid)
{
auto key = reinterpret_cast<u64>(desc);
auto cached = temp_view_cache.find(key);
if (cached != temp_view_cache.end())
{
return cached->second.get();
}
return load_simple_image(desc, true, owner_uid);
}
void ui_overlay_renderer::set_primitive_type(rsx::overlays::primitive_type type)
{
m_current_primitive_type = type;
switch (type)
{
case rsx::overlays::primitive_type::quad_list:
case rsx::overlays::primitive_type::triangle_strip:
primitives = GL_TRIANGLE_STRIP;
break;
case rsx::overlays::primitive_type::line_list:
primitives = GL_LINES;
break;
case rsx::overlays::primitive_type::line_strip:
primitives = GL_LINE_STRIP;
break;
case rsx::overlays::primitive_type::triangle_fan:
primitives = GL_TRIANGLE_FAN;
break;
default:
fmt::throw_exception("Unexpected primitive type %d", static_cast<s32>(type));
}
}
void ui_overlay_renderer::emit_geometry()
{
if (m_current_primitive_type == rsx::overlays::primitive_type::quad_list)
{
// Emulate quads with disjointed triangle strips
int num_quads = num_drawable_elements / 4;
std::vector<GLint> firsts;
std::vector<GLsizei> counts;
firsts.resize(num_quads);
counts.resize(num_quads);
for (int n = 0; n < num_quads; ++n)
{
firsts[n] = (n * 4);
counts[n] = 4;
}
int old_vao;
glGetIntegerv(GL_VERTEX_ARRAY_BINDING, &old_vao);
m_vao.bind();
glMultiDrawArrays(GL_TRIANGLE_STRIP, firsts.data(), counts.data(), num_quads);
glBindVertexArray(old_vao);
}
else
{
overlay_pass::emit_geometry();
}
}
void ui_overlay_renderer::run(gl::command_context& cmd_, const areau& viewport, GLuint target, rsx::overlays::overlay& ui)
{
program_handle.uniforms["viewport"] = color4f(static_cast<f32>(viewport.width()), static_cast<f32>(viewport.height()), static_cast<f32>(viewport.x1), static_cast<f32>(viewport.y1));
program_handle.uniforms["ui_scale"] = color4f(static_cast<f32>(ui.virtual_width), static_cast<f32>(ui.virtual_height), 1.f, 1.f);
saved_sampler_state save_30(30, m_sampler);
saved_sampler_state save_31(31, m_sampler);
if (ui.status_flags & rsx::overlays::status_bits::invalidate_image_cache)
{
remove_temp_resources(ui.uid);
ui.status_flags.clear(rsx::overlays::status_bits::invalidate_image_cache);
}
for (auto& cmd : ui.get_compiled().draw_commands)
{
set_primitive_type(cmd.config.primitives);
upload_vertex_data(cmd.verts.data(), ::size32(cmd.verts));
num_drawable_elements = ::size32(cmd.verts);
auto texture_mode = rsx::overlays::texture_sampling_mode::texture2D;
switch (cmd.config.texture_ref)
{
case rsx::overlays::image_resource_id::game_icon:
case rsx::overlays::image_resource_id::backbuffer:
// TODO
case rsx::overlays::image_resource_id::none:
{
texture_mode = rsx::overlays::texture_sampling_mode::none;
cmd_->bind_texture(31, GL_TEXTURE_2D, GL_NONE);
break;
}
case rsx::overlays::image_resource_id::raw_image:
{
cmd_->bind_texture(31, GL_TEXTURE_2D, find_temp_image(static_cast<rsx::overlays::image_info*>(cmd.config.external_data_ref), ui.uid)->id());
break;
}
case rsx::overlays::image_resource_id::font_file:
{
texture_mode = rsx::overlays::texture_sampling_mode::font3D;
cmd_->bind_texture(30, GL_TEXTURE_2D_ARRAY, find_font(cmd.config.font_ref)->id());
break;
}
default:
{
cmd_->bind_texture(31, GL_TEXTURE_2D, view_cache[cmd.config.texture_ref - 1]->id());
break;
}
}
rsx::overlays::vertex_options vert_opts;
program_handle.uniforms["vertex_config"] = vert_opts
.disable_vertex_snap(cmd.config.disable_vertex_snap)
.get();
rsx::overlays::fragment_options draw_opts;
program_handle.uniforms["fragment_config"] = draw_opts
.texture_mode(texture_mode)
.clip_fragments(cmd.config.clip_region)
.pulse_glow(cmd.config.pulse_glow)
.get();
program_handle.uniforms["timestamp"] = cmd.config.get_sinus_value();
program_handle.uniforms["albedo"] = cmd.config.color;
program_handle.uniforms["clip_bounds"] = cmd.config.clip_rect;
program_handle.uniforms["blur_intensity"] = static_cast<f32>(cmd.config.blur_strength);
overlay_pass::run(cmd_, viewport, target, gl::image_aspect::color, true);
}
ui.update(get_system_time());
}
video_out_calibration_pass::video_out_calibration_pass()
{
vs_src =
#include "../Program/GLSLSnippets/GenericVSPassthrough.glsl"
;
fs_src =
#include "../Program/GLSLSnippets/VideoOutCalibrationPass.glsl"
;
std::pair<std::string_view, std::string> repl_list[] =
{
{ "%sampler_binding", fmt::format("(%d - x)", GL_TEMP_IMAGE_SLOT(0)) },
{ "%set_decorator, ", "" },
};
fs_src = fmt::replace_all(fs_src, repl_list);
m_input_filter = gl::filter::linear;
}
void video_out_calibration_pass::run(gl::command_context& cmd, const areau& viewport, const rsx::simple_array<GLuint>& source, f32 gamma, bool limited_rgb, stereo_render_mode_options stereo_mode, gl::filter input_filter)
{
if (m_input_filter != input_filter)
{
m_input_filter = input_filter;
m_sampler.set_parameteri(GL_TEXTURE_MIN_FILTER, static_cast<GLenum>(m_input_filter));
m_sampler.set_parameteri(GL_TEXTURE_MAG_FILTER, static_cast<GLenum>(m_input_filter));
}
program_handle.uniforms["gamma"] = gamma;
program_handle.uniforms["limit_range"] = limited_rgb + 0;
program_handle.uniforms["stereo_display_mode"] = static_cast<u8>(stereo_mode);
program_handle.uniforms["stereo_image_count"] = (source[1] == GL_NONE? 1 : 2);
saved_sampler_state saved(GL_TEMP_IMAGE_SLOT(0), m_sampler);
cmd->bind_texture(GL_TEMP_IMAGE_SLOT(0), GL_TEXTURE_2D, source[0]);
saved_sampler_state saved2(GL_TEMP_IMAGE_SLOT(1), m_sampler);
cmd->bind_texture(GL_TEMP_IMAGE_SLOT(1), GL_TEXTURE_2D, source[1]);
overlay_pass::run(cmd, viewport, GL_NONE, gl::image_aspect::color, false);
}
rp_ssbo_to_generic_texture::rp_ssbo_to_generic_texture()
{
vs_src =
#include "../Program/GLSLSnippets/GenericVSPassthrough.glsl"
;
fs_src =
#include "../Program/GLSLSnippets/CopyBufferToGenericImage.glsl"
;
const auto& caps = gl::get_driver_caps();
const bool stencil_export_supported = caps.ARB_shader_stencil_export_supported;
const bool legacy_format_support = caps.subvendor_ATI;
std::pair<std::string_view, std::string> repl_list[] =
{
{ "%set, ", "" },
{ "%loc", std::to_string(GL_COMPUTE_BUFFER_SLOT(0)) },
{ "%push_block", fmt::format("binding=%d, std140", GL_COMPUTE_BUFFER_SLOT(1)) },
{ "%stencil_export_supported", stencil_export_supported ? "1" : "0" },
{ "%legacy_format_support", legacy_format_support ? "1" : "0" }
};
fs_src = fmt::replace_all(fs_src, repl_list);
if (stencil_export_supported)
{
m_write_aspect_mask |= gl::image_aspect::stencil;
}
}
void rp_ssbo_to_generic_texture::run(gl::command_context& cmd,
const buffer* src, const texture_view* dst,
const u32 src_offset, const coordu& dst_region,
const pixel_buffer_layout& layout)
{
const u32 bpp = dst->image()->pitch() / dst->image()->width();
const u32 row_length = utils::align(dst_region.width * bpp, std::max<int>(layout.alignment, 1)) / bpp;
program_handle.uniforms["src_pitch"] = row_length;
program_handle.uniforms["swap_bytes"] = layout.swap_bytes;
program_handle.uniforms["format"] = static_cast<GLenum>(dst->image()->get_internal_format());
src->bind_range(gl::buffer::target::ssbo, GL_COMPUTE_BUFFER_SLOT(0), src_offset, row_length * bpp * dst_region.height);
cmd->stencil_mask(0xFF);
overlay_pass::run(cmd, dst_region, dst->id(), dst->aspect());
}
void rp_ssbo_to_generic_texture::run(gl::command_context& cmd,
const buffer* src, texture* dst,
const u32 src_offset, const coordu& dst_region,
const pixel_buffer_layout& layout)
{
gl::nil_texture_view view(dst);
run(cmd, src, &view, src_offset, dst_region, layout);
}
}
| 15,597
|
C++
|
.cpp
| 460
| 30.252174
| 221
| 0.684508
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,492
|
GLCommonDecompiler.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/GL/GLCommonDecompiler.cpp
|
#include "stdafx.h"
#include "GLCommonDecompiler.h"
namespace gl
{
static constexpr std::array<std::pair<std::string_view, int>, 17> varying_registers =
{{
{"diff_color", 1},
{"spec_color", 2},
{"diff_color1", 3},
{"spec_color1", 4},
{"fogc", 5},
{"fog_c", 5},
{"tc0", 6},
{"tc1", 7},
{"tc2", 8},
{"tc3", 9},
{"tc4", 10},
{"tc5", 11},
{"tc6", 12},
{"tc7", 13},
{"tc8", 14},
{"tc9", 15}
}};
int get_varying_register_location(std::string_view varying_register_name)
{
for (const auto& varying_register : varying_registers)
{
if (varying_register.first == varying_register_name)
{
return varying_register.second;
}
}
fmt::throw_exception("Unknown register name: %s", varying_register_name);
}
}
| 755
|
C++
|
.cpp
| 35
| 18.771429
| 86
| 0.615063
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
5,493
|
GLShaderInterpreter.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/GL/GLShaderInterpreter.cpp
|
#include "stdafx.h"
#include "GLShaderInterpreter.h"
#include "GLGSRender.h"
#include "GLVertexProgram.h"
#include "GLFragmentProgram.h"
#include "../rsx_methods.h"
#include "../Program/ShaderInterpreter.h"
#include "../Program/GLSLCommon.h"
namespace gl
{
using glsl::shader;
namespace interpreter
{
void texture_pool_allocator::create(::glsl::program_domain domain)
{
GLenum pname;
switch (domain)
{
default:
rsx_log.fatal("Unexpected program domain %d", static_cast<int>(domain));
[[fallthrough]];
case ::glsl::program_domain::glsl_vertex_program:
pname = GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS; break;
case ::glsl::program_domain::glsl_fragment_program:
pname = GL_MAX_TEXTURE_IMAGE_UNITS; break;
}
glGetIntegerv(pname, &max_image_units);
}
void texture_pool_allocator::allocate(int size)
{
if ((used + size) > max_image_units)
{
rsx_log.fatal("Out of image binding slots!");
}
used += size;
texture_pool pool;
pool.pool_size = size;
pools.push_back(pool);
}
}
void shader_interpreter::create()
{
build_vs();
build_program(::program_common::interpreter::COMPILER_OPT_ENABLE_TEXTURES);
build_program(::program_common::interpreter::COMPILER_OPT_ENABLE_TEXTURES | ::program_common::interpreter::COMPILER_OPT_ENABLE_F32_EXPORT);
}
void shader_interpreter::destroy()
{
for (auto& prog : m_program_cache)
{
prog.second->fs.remove();
prog.second->prog.remove();
}
m_vs.remove();
}
glsl::program* shader_interpreter::get(const interpreter::program_metadata& metadata)
{
// Build options
u64 opt = 0;
if (rsx::method_registers.alpha_test_enabled()) [[unlikely]]
{
switch (rsx::method_registers.alpha_func())
{
case rsx::comparison_function::always:
break;
case rsx::comparison_function::never:
return nullptr;
case rsx::comparison_function::greater_or_equal:
opt |= program_common::interpreter::COMPILER_OPT_ENABLE_ALPHA_TEST_GE;
break;
case rsx::comparison_function::greater:
opt |= program_common::interpreter::COMPILER_OPT_ENABLE_ALPHA_TEST_G;
break;
case rsx::comparison_function::less_or_equal:
opt |= program_common::interpreter::COMPILER_OPT_ENABLE_ALPHA_TEST_LE;
break;
case rsx::comparison_function::less:
opt |= program_common::interpreter::COMPILER_OPT_ENABLE_ALPHA_TEST_L;
break;
case rsx::comparison_function::equal:
opt |= program_common::interpreter::COMPILER_OPT_ENABLE_ALPHA_TEST_EQ;
break;
case rsx::comparison_function::not_equal:
opt |= program_common::interpreter::COMPILER_OPT_ENABLE_ALPHA_TEST_NE;
break;
}
}
if (rsx::method_registers.shader_control() & CELL_GCM_SHADER_CONTROL_DEPTH_EXPORT) opt |= program_common::interpreter::COMPILER_OPT_ENABLE_DEPTH_EXPORT;
if (rsx::method_registers.shader_control() & CELL_GCM_SHADER_CONTROL_32_BITS_EXPORTS) opt |= program_common::interpreter::COMPILER_OPT_ENABLE_F32_EXPORT;
if (rsx::method_registers.shader_control() & RSX_SHADER_CONTROL_USES_KIL) opt |= program_common::interpreter::COMPILER_OPT_ENABLE_KIL;
if (metadata.referenced_textures_mask) opt |= program_common::interpreter::COMPILER_OPT_ENABLE_TEXTURES;
if (metadata.has_branch_instructions) opt |= program_common::interpreter::COMPILER_OPT_ENABLE_FLOW_CTRL;
if (metadata.has_pack_instructions) opt |= program_common::interpreter::COMPILER_OPT_ENABLE_PACKING;
if (rsx::method_registers.polygon_stipple_enabled()) opt |= program_common::interpreter::COMPILER_OPT_ENABLE_STIPPLING;
if (auto it = m_program_cache.find(opt); it != m_program_cache.end()) [[likely]]
{
m_current_interpreter = it->second.get();
}
else
{
m_current_interpreter = build_program(opt);
}
return &m_current_interpreter->prog;
}
void shader_interpreter::build_vs()
{
::glsl::shader_properties properties{};
properties.domain = ::glsl::program_domain::glsl_vertex_program;
properties.require_lit_emulation = true;
// TODO: Extend decompiler thread
// TODO: Rename decompiler thread, it no longer spawns a thread
RSXVertexProgram null_prog;
std::string shader_str;
ParamArray arr;
GLVertexDecompilerThread comp(null_prog, shader_str, arr);
ParamType uniforms = { PF_PARAM_UNIFORM, "vec4" };
uniforms.items.emplace_back("vc[468]", -1);
std::stringstream builder;
comp.insertHeader(builder);
builder << "#define Z_NEGATIVE_ONE_TO_ONE\n\n";
comp.insertConstants(builder, { uniforms });
comp.insertInputs(builder, {});
// Insert vp stream input
builder << "\n"
"layout(std140, binding = " << GL_INTERPRETER_VERTEX_BLOCK << ") readonly restrict buffer VertexInstructionBlock\n"
"{\n"
" uint base_address;\n"
" uint entry;\n"
" uint output_mask;\n"
" uint control;\n"
" uvec4 vp_instructions[];\n"
"};\n\n";
::glsl::insert_glsl_legacy_function(builder, properties);
::glsl::insert_vertex_input_fetch(builder, ::glsl::glsl_rules::glsl_rules_opengl4);
builder << program_common::interpreter::get_vertex_interpreter();
const std::string s = builder.str();
m_vs.create(::glsl::program_domain::glsl_vertex_program, s);
m_vs.compile();
}
void shader_interpreter::build_fs(u64 compiler_options, interpreter::cached_program& prog_data)
{
// Allocate TIUs
auto& allocator = prog_data.allocator;
if (compiler_options & program_common::interpreter::COMPILER_OPT_ENABLE_TEXTURES)
{
allocator.create(::glsl::program_domain::glsl_fragment_program);
if (allocator.max_image_units >= 32)
{
// 16 + 4 + 4 + 4
allocator.allocate(4); // 1D
allocator.allocate(16); // 2D
allocator.allocate(4); // CUBE
allocator.allocate(4); // 3D
}
else if (allocator.max_image_units >= 24)
{
// 16 + 4 + 2 + 2
allocator.allocate(2); // 1D
allocator.allocate(16); // 2D
allocator.allocate(2); // CUBE
allocator.allocate(4); // 3D
}
else if (allocator.max_image_units >= 16)
{
// 10 + 2 + 2 + 2
allocator.allocate(2); // 1D
allocator.allocate(10); // 2D
allocator.allocate(2); // CUBE
allocator.allocate(2); // 3D
}
else
{
// Unusable
rsx_log.fatal("Failed to allocate enough TIUs for shader interpreter.");
}
}
u32 len;
ParamArray arr;
std::string shader_str;
RSXFragmentProgram frag;
GLFragmentDecompilerThread comp(shader_str, arr, frag, len);
std::stringstream builder;
builder <<
"#version 450\n"
"#extension GL_ARB_bindless_texture : require\n\n";
::glsl::insert_subheader_block(builder);
comp.insertConstants(builder);
if (compiler_options & program_common::interpreter::COMPILER_OPT_ENABLE_ALPHA_TEST_GE)
{
builder << "#define ALPHA_TEST_GEQUAL\n";
}
if (compiler_options & program_common::interpreter::COMPILER_OPT_ENABLE_ALPHA_TEST_G)
{
builder << "#define ALPHA_TEST_GREATER\n";
}
if (compiler_options & program_common::interpreter::COMPILER_OPT_ENABLE_ALPHA_TEST_LE)
{
builder << "#define ALPHA_TEST_LEQUAL\n";
}
if (compiler_options & program_common::interpreter::COMPILER_OPT_ENABLE_ALPHA_TEST_L)
{
builder << "#define ALPHA_TEST_LESS\n";
}
if (compiler_options & program_common::interpreter::COMPILER_OPT_ENABLE_ALPHA_TEST_EQ)
{
builder << "#define ALPHA_TEST_EQUAL\n";
}
if (compiler_options & program_common::interpreter::COMPILER_OPT_ENABLE_ALPHA_TEST_NE)
{
builder << "#define ALPHA_TEST_NEQUAL\n";
}
if (!(compiler_options & program_common::interpreter::COMPILER_OPT_ENABLE_F32_EXPORT))
{
builder << "#define WITH_HALF_OUTPUT_REGISTER\n";
}
if (compiler_options & program_common::interpreter::COMPILER_OPT_ENABLE_DEPTH_EXPORT)
{
builder << "#define WITH_DEPTH_EXPORT\n";
}
if (compiler_options & program_common::interpreter::COMPILER_OPT_ENABLE_FLOW_CTRL)
{
builder << "#define WITH_FLOW_CTRL\n";
}
if (compiler_options & program_common::interpreter::COMPILER_OPT_ENABLE_PACKING)
{
builder << "#define WITH_PACKING\n";
}
if (compiler_options & program_common::interpreter::COMPILER_OPT_ENABLE_KIL)
{
builder << "#define WITH_KIL\n";
}
if (compiler_options & program_common::interpreter::COMPILER_OPT_ENABLE_STIPPLING)
{
builder << "#define WITH_STIPPLING\n";
}
if (compiler_options & program_common::interpreter::COMPILER_OPT_ENABLE_TEXTURES)
{
builder << "#define WITH_TEXTURES\n\n";
const char* type_names[] = { "sampler1D", "sampler2D", "samplerCube", "sampler3D" };
for (int i = 0; i < 4; ++i)
{
builder << "uniform " << type_names[i] << " " << type_names[i] << "_array[" << allocator.pools[i].pool_size << "];\n";
}
builder << "\n"
"#define IS_TEXTURE_RESIDENT(index) (texture_handles[index] < 0xFF)\n"
"#define SAMPLER1D(index) sampler1D_array[texture_handles[index]]\n"
"#define SAMPLER2D(index) sampler2D_array[texture_handles[index]]\n"
"#define SAMPLER3D(index) sampler3D_array[texture_handles[index]]\n"
"#define SAMPLERCUBE(index) samplerCube_array[texture_handles[index]]\n\n";
}
else if (compiler_options)
{
builder << "\n";
}
builder <<
"layout(std430, binding =" << GL_INTERPRETER_FRAGMENT_BLOCK << ") readonly restrict buffer FragmentInstructionBlock\n"
"{\n"
" uint shader_control;\n"
" uint texture_control;\n"
" uint reserved1;\n"
" uint reserved2;\n"
" uint texture_handles[16];\n"
" uvec4 fp_instructions[];\n"
"};\n\n";
builder << program_common::interpreter::get_fragment_interpreter();
const std::string s = builder.str();
prog_data.fs.create(::glsl::program_domain::glsl_fragment_program, s);
prog_data.fs.compile();
}
interpreter::cached_program* shader_interpreter::build_program(u64 compiler_options)
{
auto data = new interpreter::cached_program();
build_fs(compiler_options, *data);
data->prog.create().
attach(m_vs).
attach(data->fs).
link();
data->prog.uniforms[0] = GL_STREAM_BUFFER_START + 0;
data->prog.uniforms[1] = GL_STREAM_BUFFER_START + 1;
if (compiler_options & program_common::interpreter::COMPILER_OPT_ENABLE_TEXTURES)
{
// Initialize texture bindings
int assigned = 0;
auto& allocator = data->allocator;
const char* type_names[] = { "sampler1D_array", "sampler2D_array", "samplerCube_array", "sampler3D_array" };
for (int i = 0; i < 4; ++i)
{
for (int j = 0; j < allocator.pools[i].pool_size; ++j)
{
allocator.pools[i].allocate(assigned++);
}
data->prog.uniforms[type_names[i]] = allocator.pools[i].allocated;
}
}
m_program_cache[compiler_options].reset(data);
return data;
}
bool shader_interpreter::is_interpreter(const glsl::program* program)
{
return (program == &m_current_interpreter->prog);
}
void shader_interpreter::update_fragment_textures(
const std::array<std::unique_ptr<rsx::sampled_image_descriptor_base>, 16>& descriptors,
u16 reference_mask, u32* out)
{
if (reference_mask == 0 || !m_current_interpreter)
{
return;
}
// Reset allocation
auto& allocator = m_current_interpreter->allocator;
for (unsigned i = 0; i < 4; ++i)
{
allocator.pools[i].num_used = 0;
allocator.pools[i].flags = 0;
}
rsx::simple_array<std::pair<int, int>> replacement_map;
for (int i = 0; i < rsx::limits::fragment_textures_count; ++i)
{
if (reference_mask & (1 << i))
{
auto sampler_state = static_cast<gl::texture_cache::sampled_image_descriptor*>(descriptors[i].get());
ensure(sampler_state);
int pool_id = static_cast<int>(sampler_state->image_type);
auto& pool = allocator.pools[pool_id];
const int old = pool.allocated[pool.num_used];
if (!pool.allocate(i))
{
rsx_log.error("Could not allocate texture resource for shader interpreter.");
break;
}
out[i] = (pool.num_used - 1);
if (old != i)
{
// Check if the candidate target has also been replaced
bool found = false;
for (auto& e : replacement_map)
{
if (e.second == old)
{
// This replacement consumed this 'old' value
e.second = i;
found = true;
break;
}
}
if (!found)
{
replacement_map.push_back({ old, i });
}
}
}
else
{
out[i] = 0xFF;
}
}
// Bind TIU locations
if (replacement_map.empty()) [[likely]]
{
return;
}
// Overlapping texture bindings are trouble. Cannot bind one TIU to two types of samplers simultaneously
for (unsigned i = 0; i < replacement_map.size(); ++i)
{
for (int j = 0; j < 4; ++j)
{
auto& pool = allocator.pools[j];
for (int k = pool.num_used; k < pool.pool_size; ++k)
{
if (pool.allocated[k] == replacement_map[i].second)
{
pool.allocated[k] = replacement_map[i].first;
pool.flags |= static_cast<u32>(interpreter::texture_pool_flags::dirty);
// Exit nested loop
j = 4;
break;
}
}
}
}
if (allocator.pools[0].flags) m_current_interpreter->prog.uniforms["sampler1D_array"] = allocator.pools[0].allocated;
if (allocator.pools[1].flags) m_current_interpreter->prog.uniforms["sampler2D_array"] = allocator.pools[1].allocated;
if (allocator.pools[2].flags) m_current_interpreter->prog.uniforms["samplerCube_array"] = allocator.pools[2].allocated;
if (allocator.pools[3].flags) m_current_interpreter->prog.uniforms["sampler3D_array"] = allocator.pools[3].allocated;
}
}
| 13,358
|
C++
|
.cpp
| 387
| 30.684755
| 155
| 0.687016
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,494
|
GLTextureCache.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/GL/GLTextureCache.cpp
|
#include "stdafx.h"
#include "Emu/RSX/RSXThread.h"
#include "GLTexture.h"
#include "GLTextureCache.h"
#include "../Common/BufferUtils.h"
#include "util/asm.hpp"
namespace gl
{
static u64 encode_properties(GLenum sized_internal_fmt, GLenum target, u16 width, u16 height, u16 depth, u8 mipmaps)
{
// Generate cache key
// 00..13 = width
// 14..27 = height
// 28..35 = depth
// 36..39 = mipmaps
// 40..41 = type
// 42..57 = format
ensure(((width | height) & ~0x3fff) == 0, "Image dimensions are too large - lower your resolution scale.");
ensure(mipmaps <= 13);
GLuint target_encoding = 0;
switch (target)
{
case GL_TEXTURE_1D:
target_encoding = 0; break;
case GL_TEXTURE_2D:
target_encoding = 1; break;
case GL_TEXTURE_3D:
target_encoding = 2; break;
case GL_TEXTURE_CUBE_MAP:
target_encoding = 3; break;
default:
fmt::throw_exception("Unsupported destination target 0x%x", target);
}
const u64 key =
(static_cast<u64>(width) << 0) |
(static_cast<u64>(height) << 14) |
(static_cast<u64>(depth) << 28) |
(static_cast<u64>(mipmaps) << 36) |
(static_cast<u64>(target_encoding) << 40) |
(static_cast<u64>(sized_internal_fmt) << 42);
return key;
}
void cached_texture_section::finish_flush()
{
// Free resources
pbo.unmap();
const auto valid_range = get_confirmed_range_delta();
const u32 valid_offset = valid_range.first;
const u32 valid_length = valid_range.second;
void *dst = get_ptr(get_section_base() + valid_offset);
if (!gl::get_driver_caps().ARB_compute_shader_supported)
{
switch (type)
{
case gl::texture::type::sbyte:
case gl::texture::type::ubyte:
{
// byte swapping does not work on byte types, use uint_8_8_8_8 for rgba8 instead to avoid penalty
ensure(!pack_unpack_swap_bytes);
break;
}
case gl::texture::type::uint_24_8:
{
// Swap bytes on D24S8 does not swap the whole dword, just shuffles the 3 bytes for D24
// In this regard, D24S8 is the same structure on both PC and PS3, but the endianness of the whole block is reversed on PS3
ensure(pack_unpack_swap_bytes == false);
ensure(real_pitch == (width * 4));
if (rsx_pitch == real_pitch) [[likely]]
{
copy_data_swap_u32(static_cast<u32*>(dst), static_cast<u32*>(dst), valid_length / 4);
}
else
{
const u32 num_rows = utils::align(valid_length, rsx_pitch) / rsx_pitch;
u32* data = static_cast<u32*>(dst);
for (u32 row = 0; row < num_rows; ++row)
{
copy_data_swap_u32(data, data, width);
data += rsx_pitch / 4;
}
}
break;
}
default:
break;
}
}
if (is_swizzled())
{
// This format is completely worthless to CPU processing algorithms where cache lines on die are linear.
// If this is happening, usually it means it was not a planned readback (e.g shared pages situation)
rsx_log.trace("[Performance warning] CPU readback of swizzled data");
// Read-modify-write to avoid corrupting already resident memory outside texture region
std::vector<u8> tmp_data(rsx_pitch * height);
std::memcpy(tmp_data.data(), dst, tmp_data.size());
switch (type)
{
case gl::texture::type::uint_8_8_8_8_rev:
case gl::texture::type::uint_8_8_8_8:
case gl::texture::type::uint_24_8:
rsx::convert_linear_swizzle<u32, false>(tmp_data.data(), dst, width, height, rsx_pitch);
break;
case gl::texture::type::ushort_5_6_5:
case gl::texture::type::ushort:
rsx::convert_linear_swizzle<u16, false>(tmp_data.data(), dst, width, height, rsx_pitch);
break;
default:
rsx_log.error("Unexpected swizzled texture format 0x%x", static_cast<u32>(format));
}
}
}
gl::texture_view* texture_cache::create_temporary_subresource_impl(gl::command_context& cmd, gl::texture* src, GLenum sized_internal_fmt, GLenum dst_target,
u32 gcm_format, u16 x, u16 y, u16 width, u16 height, u16 depth, u8 mipmaps, const rsx::texture_channel_remap_t& remap, bool copy)
{
if (sized_internal_fmt == GL_NONE)
{
sized_internal_fmt = gl::get_sized_internal_format(gcm_format);
}
temporary_image_t* dst = nullptr;
const auto match_key = encode_properties(sized_internal_fmt, dst_target, width, height, depth, mipmaps);
// Search image cache
for (auto& e : m_temporary_surfaces)
{
if (e->has_refs())
{
continue;
}
if (e->properties_encoding == match_key)
{
dst = e.get();
break;
}
}
if (!dst)
{
std::unique_ptr<temporary_image_t> data = std::make_unique<temporary_image_t>(dst_target, width, height, depth, mipmaps, sized_internal_fmt, rsx::classify_format(gcm_format));
dst = data.get();
dst->properties_encoding = match_key;
m_temporary_surfaces.emplace_back(std::move(data));
}
dst->add_ref();
if (copy)
{
std::vector<copy_region_descriptor> region =
{{
.src = src,
.xform = rsx::surface_transform::coordinate_transform,
.src_x = x,
.src_y = y,
.src_w = width,
.src_h = height,
.dst_w = width,
.dst_h = height
}};
copy_transfer_regions_impl(cmd, dst, region);
}
if (!src || static_cast<GLenum>(src->get_internal_format()) != sized_internal_fmt)
{
// Apply base component map onto the new texture if a data cast has been done
auto components = get_component_mapping(gcm_format, rsx::component_order::default_);
dst->set_native_component_layout(components);
}
return dst->get_view(remap);
}
void texture_cache::copy_transfer_regions_impl(gl::command_context& cmd, gl::texture* dst_image, const std::vector<copy_region_descriptor>& sources) const
{
const auto dst_bpp = dst_image->pitch() / dst_image->width();
const auto dst_aspect = dst_image->aspect();
for (const auto &slice : sources)
{
if (!slice.src)
{
continue;
}
const bool typeless = !formats_are_bitcast_compatible(slice.src, dst_image);
ensure(typeless || dst_aspect == slice.src->aspect());
std::unique_ptr<gl::texture> tmp;
auto src_image = slice.src;
auto src_x = slice.src_x;
auto src_y = slice.src_y;
auto src_w = slice.src_w;
auto src_h = slice.src_h;
if (slice.xform == rsx::surface_transform::coordinate_transform)
{
// Dimensions were given in 'dst' space. Work out the real source coordinates
const auto src_bpp = slice.src->pitch() / slice.src->width();
src_x = (src_x * dst_bpp) / src_bpp;
src_w = utils::aligned_div<u16>(src_w * dst_bpp, src_bpp);
}
if (auto surface = dynamic_cast<gl::render_target*>(slice.src))
{
surface->transform_samples_to_pixels(src_x, src_w, src_y, src_h);
}
if (typeless) [[unlikely]]
{
const auto src_bpp = slice.src->pitch() / slice.src->width();
const u16 convert_w = u16(slice.src->width() * src_bpp) / dst_bpp;
tmp = std::make_unique<texture>(GL_TEXTURE_2D, convert_w, slice.src->height(), 1, 1, static_cast<GLenum>(dst_image->get_internal_format()), dst_image->format_class());
src_image = tmp.get();
// Compute src region in dst format layout
const u16 src_w2 = u16(src_w * src_bpp) / dst_bpp;
const u16 src_x2 = u16(src_x * src_bpp) / dst_bpp;
if (src_w2 == slice.dst_w && src_h == slice.dst_h && slice.level == 0)
{
// Optimization, avoid typeless copy to tmp followed by data copy to dst
// Combine the two transfers into one
const coord3u src_region = { { src_x, src_y, 0 }, { src_w, src_h, 1 } };
const coord3u dst_region = { { slice.dst_x, slice.dst_y, slice.dst_z }, { slice.dst_w, slice.dst_h, 1 } };
gl::copy_typeless(cmd, dst_image, slice.src, dst_region, src_region);
continue;
}
const coord3u src_region = { { src_x, src_y, 0 }, { src_w, src_h, 1 } };
const coord3u dst_region = { { src_x2, src_y, 0 }, { src_w2, src_h, 1 } };
gl::copy_typeless(cmd, src_image, slice.src, dst_region, src_region);
src_x = src_x2;
src_w = src_w2;
}
if (src_w == slice.dst_w && src_h == slice.dst_h)
{
gl::g_hw_blitter->copy_image(cmd, src_image, dst_image, 0, slice.level,
position3i{ src_x, src_y, 0 },
position3i{ slice.dst_x, slice.dst_y, slice.dst_z },
size3i{ src_w, src_h, 1 });
}
else
{
auto _blitter = gl::g_hw_blitter;
const areai src_rect = { src_x, src_y, src_x + src_w, src_y + src_h };
const areai dst_rect = { slice.dst_x, slice.dst_y, slice.dst_x + slice.dst_w, slice.dst_y + slice.dst_h };
gl::texture* _dst = dst_image;
if (src_image->get_internal_format() != dst_image->get_internal_format() || slice.level != 0 || slice.dst_z != 0) [[ unlikely ]]
{
tmp = std::make_unique<texture>(GL_TEXTURE_2D, dst_rect.x2, dst_rect.y2, 1, 1, static_cast<GLenum>(slice.src->get_internal_format()));
_dst = tmp.get();
}
_blitter->scale_image(cmd, src_image, _dst, src_rect, dst_rect, false, {});
if (_dst != dst_image)
{
// Data cast comes after scaling
gl::g_hw_blitter->copy_image(cmd, tmp.get(), dst_image, 0, slice.level,
position3i{slice.dst_x, slice.dst_y, 0},
position3i{slice.dst_x, slice.dst_y, slice.dst_z},
size3i{slice.dst_w, slice.dst_h, 1});
}
}
}
}
}
| 9,151
|
C++
|
.cpp
| 248
| 32.71371
| 178
| 0.649859
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,495
|
GLVertexBuffers.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/GL/GLVertexBuffers.cpp
|
#include "stdafx.h"
#include "../Common/BufferUtils.h"
#include "../rsx_methods.h"
#include "GLGSRender.h"
#include "GLHelpers.h"
namespace
{
[[maybe_unused]] constexpr std::array<const char*, 16> s_reg_table =
{
"in_pos_buffer", "in_weight_buffer", "in_normal_buffer",
"in_diff_color_buffer", "in_spec_color_buffer",
"in_fog_buffer",
"in_point_size_buffer", "in_7_buffer",
"in_tc0_buffer", "in_tc1_buffer", "in_tc2_buffer", "in_tc3_buffer",
"in_tc4_buffer", "in_tc5_buffer", "in_tc6_buffer", "in_tc7_buffer"
};
}
namespace
{
// return vertex count if primitive type is not native (empty array otherwise)
std::tuple<u32, u32> get_index_array_for_emulated_non_indexed_draw(rsx::primitive_type primitive_mode, gl::ring_buffer &dst, u32 vertex_count)
{
// This is an emulated buffer, so our indices only range from 0->original_vertex_array_length
const auto element_count = get_index_count(primitive_mode, vertex_count);
ensure(!gl::is_primitive_native(primitive_mode));
auto mapping = dst.alloc_from_heap(element_count * sizeof(u16), 256);
auto mapped_buffer = static_cast<char*>(mapping.first);
write_index_array_for_non_indexed_non_native_primitive_to_buffer(mapped_buffer, primitive_mode, vertex_count);
return std::make_tuple(element_count, mapping.second);
}
}
namespace
{
GLenum get_index_type(rsx::index_array_type type)
{
switch (type)
{
case rsx::index_array_type::u16: return GL_UNSIGNED_SHORT;
case rsx::index_array_type::u32: return GL_UNSIGNED_INT;
}
fmt::throw_exception("Invalid index array type (%u)", static_cast<u8>(type));
}
struct vertex_input_state
{
bool index_rebase;
u32 min_index;
u32 max_index;
u32 vertex_draw_count;
u32 vertex_index_offset;
std::optional<std::tuple<GLenum, u32>> index_info;
};
struct draw_command_visitor
{
draw_command_visitor(gl::ring_buffer& index_ring_buffer, rsx::vertex_input_layout& vertex_layout)
: m_index_ring_buffer(index_ring_buffer)
, m_vertex_layout(vertex_layout)
{}
vertex_input_state operator()(const rsx::draw_array_command& /*command*/)
{
const u32 vertex_count = rsx::method_registers.current_draw_clause.get_elements_count();
const u32 min_index = rsx::method_registers.current_draw_clause.min_index();
const u32 max_index = (min_index + vertex_count) - 1;
if (!gl::is_primitive_native(rsx::method_registers.current_draw_clause.primitive))
{
u32 index_count;
u32 offset_in_index_buffer;
std::tie(index_count, offset_in_index_buffer) = get_index_array_for_emulated_non_indexed_draw(
rsx::method_registers.current_draw_clause.primitive, m_index_ring_buffer,
rsx::method_registers.current_draw_clause.get_elements_count());
return{ false, min_index, max_index, index_count, 0, std::make_tuple(static_cast<GLenum>(GL_UNSIGNED_SHORT), offset_in_index_buffer) };
}
return{ false, min_index, max_index, vertex_count, 0, std::optional<std::tuple<GLenum, u32>>() };
}
vertex_input_state operator()(const rsx::draw_indexed_array_command& command)
{
u32 min_index = 0, max_index = 0;
rsx::index_array_type type = rsx::method_registers.current_draw_clause.is_immediate_draw?
rsx::index_array_type::u32:
rsx::method_registers.index_type();
u32 type_size = get_index_type_size(type);
const u32 vertex_count = rsx::method_registers.current_draw_clause.get_elements_count();
u32 index_count = vertex_count;
if (!gl::is_primitive_native(rsx::method_registers.current_draw_clause.primitive))
index_count = static_cast<u32>(get_index_count(rsx::method_registers.current_draw_clause.primitive, vertex_count));
u32 max_size = index_count * type_size;
auto mapping = m_index_ring_buffer.alloc_from_heap(max_size, 256);
void* ptr = mapping.first;
u32 offset_in_index_buffer = mapping.second;
std::tie(min_index, max_index, index_count) = write_index_array_data_to_buffer(
{ reinterpret_cast<std::byte*>(ptr), max_size },
command.raw_index_buffer, type,
rsx::method_registers.current_draw_clause.primitive,
rsx::method_registers.restart_index_enabled(),
rsx::method_registers.restart_index(),
[](auto prim) { return !gl::is_primitive_native(prim); });
if (min_index >= max_index)
{
//empty set, do not draw
return{ false, 0, 0, 0, 0, std::make_tuple(get_index_type(type), offset_in_index_buffer) };
}
// Prefer only reading the vertices that are referenced in the index buffer itself
// Offset data source by min_index verts, but also notify the shader to offset the vertexID (important for modulo op)
const auto index_offset = rsx::method_registers.vertex_data_base_index();
return{ true, min_index, max_index, index_count, index_offset, std::make_tuple(get_index_type(type), offset_in_index_buffer) };
}
vertex_input_state operator()(const rsx::draw_inlined_array& /*command*/)
{
const auto stream_length = rsx::method_registers.current_draw_clause.inline_vertex_array.size();
const u32 vertex_count = u32(stream_length * sizeof(u32)) / m_vertex_layout.interleaved_blocks[0]->attribute_stride;
if (!gl::is_primitive_native(rsx::method_registers.current_draw_clause.primitive))
{
u32 offset_in_index_buffer;
u32 index_count;
std::tie(index_count, offset_in_index_buffer) = get_index_array_for_emulated_non_indexed_draw(
rsx::method_registers.current_draw_clause.primitive, m_index_ring_buffer, vertex_count);
return{ false, 0, vertex_count, index_count, 0, std::make_tuple(static_cast<GLenum>(GL_UNSIGNED_SHORT), offset_in_index_buffer) };
}
return{ false, 0, vertex_count, vertex_count, 0, std::optional<std::tuple<GLenum, u32>>() };
}
private:
gl::ring_buffer& m_index_ring_buffer;
rsx::vertex_input_layout& m_vertex_layout;
};
}
gl::vertex_upload_info GLGSRender::set_vertex_buffer()
{
m_profiler.start();
//Write index buffers and count verts
auto result = std::visit(draw_command_visitor(*m_index_ring_buffer, m_vertex_layout), get_draw_command(rsx::method_registers));
const u32 vertex_count = (result.max_index - result.min_index) + 1;
u32 vertex_base = result.min_index;
u32 index_base = 0;
if (result.index_rebase)
{
vertex_base = rsx::get_index_from_base(vertex_base, rsx::method_registers.vertex_data_base_index());
index_base = result.min_index;
}
//Do actual vertex upload
auto required = calculate_memory_requirements(m_vertex_layout, vertex_base, vertex_count);
std::pair<void*, u32> persistent_mapping = {}, volatile_mapping = {};
gl::vertex_upload_info upload_info =
{
result.vertex_draw_count, // Vertex count
vertex_count, // Allocated vertex count
vertex_base, // First vertex in block
index_base, // Index of attribute at data location 0
result.vertex_index_offset, // Hw index offset
0u, 0u, // Mapping
result.index_info // Index buffer info
};
if (required.first > 0)
{
//Check if cacheable
//Only data in the 'persistent' block may be cached
//TODO: make vertex cache keep local data beyond frame boundaries and hook notify command
bool in_cache = false;
bool to_store = false;
u32 storage_address = -1;
if (m_vertex_layout.interleaved_blocks.size() == 1 &&
rsx::method_registers.current_draw_clause.command != rsx::draw_command::inlined_array)
{
const auto data_offset = (vertex_base * m_vertex_layout.interleaved_blocks[0]->attribute_stride);
storage_address = m_vertex_layout.interleaved_blocks[0]->real_offset_address + data_offset;
if (auto cached = m_vertex_cache->find_vertex_range(storage_address, required.first))
{
ensure(cached->local_address == storage_address);
in_cache = true;
upload_info.persistent_mapping_offset = cached->offset_in_heap;
}
else
{
to_store = true;
}
}
if (!in_cache)
{
persistent_mapping = m_attrib_ring_buffer->alloc_from_heap(required.first, m_min_texbuffer_alignment);
upload_info.persistent_mapping_offset = persistent_mapping.second;
if (to_store)
{
//store ref in vertex cache
m_vertex_cache->store_range(storage_address, required.first, persistent_mapping.second);
}
}
if (!m_persistent_stream_view.in_range(upload_info.persistent_mapping_offset, required.first, upload_info.persistent_mapping_offset))
{
ensure(m_max_texbuffer_size < m_attrib_ring_buffer->size());
const usz view_size = ((upload_info.persistent_mapping_offset + m_max_texbuffer_size) > m_attrib_ring_buffer->size()) ?
(m_attrib_ring_buffer->size() - upload_info.persistent_mapping_offset) : m_max_texbuffer_size;
m_persistent_stream_view.update(m_attrib_ring_buffer.get(), upload_info.persistent_mapping_offset, static_cast<u32>(view_size));
m_gl_persistent_stream_buffer->copy_from(m_persistent_stream_view);
upload_info.persistent_mapping_offset = 0;
}
}
if (required.second > 0)
{
volatile_mapping = m_attrib_ring_buffer->alloc_from_heap(required.second, m_min_texbuffer_alignment);
upload_info.volatile_mapping_offset = volatile_mapping.second;
if (!m_volatile_stream_view.in_range(upload_info.volatile_mapping_offset, required.second, upload_info.volatile_mapping_offset))
{
ensure(m_max_texbuffer_size < m_attrib_ring_buffer->size());
const usz view_size = ((upload_info.volatile_mapping_offset + m_max_texbuffer_size) > m_attrib_ring_buffer->size()) ?
(m_attrib_ring_buffer->size() - upload_info.volatile_mapping_offset) : m_max_texbuffer_size;
m_volatile_stream_view.update(m_attrib_ring_buffer.get(), upload_info.volatile_mapping_offset, static_cast<u32>(view_size));
m_gl_volatile_stream_buffer->copy_from(m_volatile_stream_view);
upload_info.volatile_mapping_offset = 0;
}
}
//Write all the data
write_vertex_data_to_memory(m_vertex_layout, vertex_base, vertex_count, persistent_mapping.first, volatile_mapping.first);
m_frame_stats.vertex_upload_time += m_profiler.duration();
return upload_info;
}
| 10,133
|
C++
|
.cpp
| 213
| 44.159624
| 143
| 0.7097
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,496
|
GLPresent.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/GL/GLPresent.cpp
|
#include "stdafx.h"
#include "GLGSRender.h"
#include "upscalers/bilinear_pass.hpp"
#include "upscalers/fsr_pass.h"
#include "upscalers/nearest_pass.hpp"
#include "Emu/Cell/Modules/cellVideoOut.h"
#include "Emu/RSX/Overlays/overlay_manager.h"
#include "Emu/RSX/Overlays/overlay_debug_overlay.h"
#include "util/video_provider.h"
LOG_CHANNEL(screenshot_log, "SCREENSHOT");
extern atomic_t<bool> g_user_asked_for_screenshot;
extern atomic_t<recording_mode> g_recording_mode;
namespace gl
{
namespace debug
{
std::unique_ptr<texture> g_vis_texture;
void set_vis_texture(texture* visual)
{
const auto target = static_cast<GLenum>(visual->get_target());
const auto ifmt = static_cast<GLenum>(visual->get_internal_format());
g_vis_texture.reset(new texture(target, visual->width(), visual->height(), 1, 1, ifmt, visual->format_class()));
glCopyImageSubData(visual->id(), target, 0, 0, 0, 0, g_vis_texture->id(), target, 0, 0, 0, 0, visual->width(), visual->height(), 1);
}
}
GLenum RSX_display_format_to_gl_format(u8 format)
{
switch (format)
{
default:
rsx_log.error("Unhandled video output format 0x%x", static_cast<s32>(format));
[[fallthrough]];
case CELL_VIDEO_OUT_BUFFER_COLOR_FORMAT_X8R8G8B8:
return GL_BGRA8;
case CELL_VIDEO_OUT_BUFFER_COLOR_FORMAT_X8B8G8R8:
return GL_RGBA8;
case CELL_VIDEO_OUT_BUFFER_COLOR_FORMAT_R16G16B16X16_FLOAT:
return GL_RGBA16F;
}
}
}
gl::texture* GLGSRender::get_present_source(gl::present_surface_info* info, const rsx::avconf& avconfig)
{
gl::texture* image = nullptr;
// @FIXME: This implementation needs to merge into the texture cache's upload_texture routine.
// See notes on the vulkan implementation on what needs to happen before that is viable.
// Check the surface store first
gl::command_context cmd = { gl_state };
const auto format_bpp = rsx::get_format_block_size_in_bytes(info->format);
const auto overlap_info = m_rtts.get_merged_texture_memory_region(cmd,
info->address, info->width, info->height, info->pitch, format_bpp, rsx::surface_access::transfer_read);
if (!overlap_info.empty())
{
const auto& section = overlap_info.back();
auto surface = gl::as_rtt(section.surface);
bool viable = false;
if (section.base_address >= info->address)
{
const auto surface_width = surface->get_surface_width<rsx::surface_metrics::samples>();
const auto surface_height = surface->get_surface_height<rsx::surface_metrics::samples>();
if (section.base_address == info->address)
{
// Check for fit or crop
viable = (surface_width >= info->width && surface_height >= info->height);
}
else
{
// Check for borders and letterboxing
const u32 inset_offset = section.base_address - info->address;
const u32 inset_y = inset_offset / info->pitch;
const u32 inset_x = (inset_offset % info->pitch) / format_bpp;
const u32 full_width = surface_width + inset_x + inset_x;
const u32 full_height = surface_height + inset_y + inset_y;
viable = (full_width == info->width && full_height == info->height);
}
if (viable)
{
image = section.surface->get_surface(rsx::surface_access::transfer_read);
std::tie(info->width, info->height) = rsx::apply_resolution_scale<true>(
std::min(surface_width, info->width),
std::min(surface_height, info->height));
}
}
}
else if (auto surface = m_gl_texture_cache.find_texture_from_dimensions<true>(info->address, info->format);
surface && surface->get_width() >= info->width && surface->get_height() >= info->height)
{
// Hack - this should be the first location to check for output
// The render might have been done offscreen or in software and a blit used to display
if (const auto tex = surface->get_raw_texture(); tex) image = tex;
}
const GLenum expected_format = gl::RSX_display_format_to_gl_format(avconfig.format);
std::unique_ptr<gl::texture>& flip_image = m_flip_tex_color[info->eye];
auto initialize_scratch_image = [&]()
{
if (!flip_image || flip_image->size2D() != sizeu{ info->width, info->height })
{
flip_image = std::make_unique<gl::texture>(GL_TEXTURE_2D, info->width, info->height, 1, 1, expected_format);
}
};
if (!image)
{
rsx_log.warning("Flip texture was not found in cache. Uploading surface from CPU");
gl::pixel_unpack_settings unpack_settings;
unpack_settings.alignment(1).row_length(info->pitch / 4);
initialize_scratch_image();
gl::command_context cmd{ gl_state };
const auto range = utils::address_range::start_length(info->address, info->pitch * info->height);
m_gl_texture_cache.invalidate_range(cmd, range, rsx::invalidation_cause::read);
flip_image->copy_from(vm::base(info->address), static_cast<gl::texture::format>(expected_format), gl::texture::type::uint_8_8_8_8, unpack_settings);
image = flip_image.get();
}
else if (image->get_internal_format() != static_cast<gl::texture::internal_format>(expected_format))
{
initialize_scratch_image();
// Copy
if (gl::formats_are_bitcast_compatible(flip_image.get(), image))
{
const position3u offset{};
gl::g_hw_blitter->copy_image(cmd, image, flip_image.get(), 0, 0, offset, offset, { info->width, info->height, 1 });
}
else
{
const coord3u region = { {/* offsets */}, { info->width, info->height, 1 } };
gl::copy_typeless(cmd, flip_image.get(), image, region, region);
}
image = flip_image.get();
}
return image;
}
void GLGSRender::flip(const rsx::display_flip_info_t& info)
{
if (info.skip_frame)
{
m_frame->flip(m_context, true);
rsx::thread::flip(info);
return;
}
gl::command_context cmd{ gl_state };
u32 buffer_width = display_buffers[info.buffer].width;
u32 buffer_height = display_buffers[info.buffer].height;
u32 buffer_pitch = display_buffers[info.buffer].pitch;
u32 av_format;
const auto& avconfig = g_fxo->get<rsx::avconf>();
if (!buffer_width)
{
buffer_width = avconfig.resolution_x;
buffer_height = avconfig.resolution_y;
}
if (avconfig.state)
{
av_format = avconfig.get_compatible_gcm_format();
if (!buffer_pitch)
buffer_pitch = buffer_width * avconfig.get_bpp();
const u32 video_frame_height = (avconfig.stereo_mode == stereo_render_mode_options::disabled ? avconfig.resolution_y : ((avconfig.resolution_y - 30) / 2));
buffer_width = std::min(buffer_width, avconfig.resolution_x);
buffer_height = std::min(buffer_height, video_frame_height);
}
else
{
av_format = CELL_GCM_TEXTURE_A8R8G8B8;
if (!buffer_pitch)
buffer_pitch = buffer_width * 4;
}
// Disable scissor test (affects blit, clear, etc)
gl_state.disable(GL_SCISSOR_TEST);
// Enable drawing to window backbuffer
gl::screen.bind();
gl::texture *image_to_flip = nullptr, *image_to_flip2 = nullptr;
if (info.buffer < display_buffers_count && buffer_width && buffer_height)
{
// Find the source image
gl::present_surface_info present_info
{
.address = rsx::get_address(display_buffers[info.buffer].offset, CELL_GCM_LOCATION_LOCAL),
.format = av_format,
.width = buffer_width,
.height = buffer_height,
.pitch = buffer_pitch,
.eye = 0
};
image_to_flip = get_present_source(&present_info, avconfig);
if (avconfig.stereo_mode != stereo_render_mode_options::disabled) [[unlikely]]
{
const auto [unused, min_expected_height] = rsx::apply_resolution_scale<true>(RSX_SURFACE_DIMENSION_IGNORED, buffer_height + 30);
if (image_to_flip->height() < min_expected_height)
{
// Get image for second eye
const u32 image_offset = (buffer_height + 30) * buffer_pitch + display_buffers[info.buffer].offset;
present_info.width = buffer_width;
present_info.height = buffer_height;
present_info.address = rsx::get_address(image_offset, CELL_GCM_LOCATION_LOCAL);
present_info.eye = 1;
image_to_flip2 = get_present_source(&present_info, avconfig);
}
else
{
// Account for possible insets
const auto [unused2, scaled_buffer_height] = rsx::apply_resolution_scale<true>(RSX_SURFACE_DIMENSION_IGNORED, buffer_height);
buffer_height = std::min<u32>(image_to_flip->height() - min_expected_height, scaled_buffer_height);
}
}
buffer_width = present_info.width;
buffer_height = present_info.height;
}
if (info.emu_flip)
{
evaluate_cpu_usage_reduction_limits();
}
// Get window state
const int width = m_frame->client_width();
const int height = m_frame->client_height();
// Calculate blit coordinates
areai aspect_ratio;
if (!g_cfg.video.stretch_to_display_area)
{
const sizeu csize(width, height);
const auto converted = avconfig.aspect_convert_region(size2u{ buffer_width, buffer_height }, csize);
aspect_ratio = static_cast<areai>(converted);
}
else
{
aspect_ratio = { 0, 0, width, height };
}
if (!image_to_flip || aspect_ratio.x1 || aspect_ratio.y1)
{
// Clear the window background to opaque black
gl_state.clear_color(0, 0, 0, 255);
gl::screen.clear(gl::buffers::color);
}
if (image_to_flip)
{
if (g_user_asked_for_screenshot || (g_recording_mode != recording_mode::stopped && m_frame->can_consume_frame()))
{
std::vector<u8> sshot_frame(buffer_height * buffer_width * 4);
glGetError();
gl::pixel_pack_settings pack_settings{};
image_to_flip->copy_to(sshot_frame.data(), gl::texture::format::rgba, gl::texture::type::ubyte, pack_settings);
if (GLenum err = glGetError(); err != GL_NO_ERROR)
{
screenshot_log.error("Failed to capture image: 0x%x", err);
}
else if (g_user_asked_for_screenshot.exchange(false))
{
m_frame->take_screenshot(std::move(sshot_frame), buffer_width, buffer_height, false);
}
else
{
m_frame->present_frame(sshot_frame, buffer_width * 4, buffer_width, buffer_height, false);
}
}
const areai screen_area = coordi({}, { static_cast<int>(buffer_width), static_cast<int>(buffer_height) });
const bool use_full_rgb_range_output = g_cfg.video.full_rgb_range_output.get();
const bool backbuffer_has_alpha = m_frame->has_alpha();
if (!m_upscaler || m_output_scaling != g_cfg.video.output_scaling)
{
m_output_scaling = g_cfg.video.output_scaling;
switch (m_output_scaling)
{
case output_scaling_mode::nearest:
m_upscaler = std::make_unique<gl::nearest_upscale_pass>();
break;
case output_scaling_mode::fsr:
m_upscaler = std::make_unique<gl::fsr_upscale_pass>();
break;
case output_scaling_mode::bilinear:
default:
m_upscaler = std::make_unique<gl::bilinear_upscale_pass>();
break;
}
}
if (!backbuffer_has_alpha && use_full_rgb_range_output && rsx::fcmp(avconfig.gamma, 1.f) && avconfig.stereo_mode == stereo_render_mode_options::disabled)
{
// Blit source image to the screen
m_upscaler->scale_output(cmd, image_to_flip, screen_area, aspect_ratio.flipped_vertical(), UPSCALE_AND_COMMIT | UPSCALE_DEFAULT_VIEW);
}
else
{
const f32 gamma = avconfig.gamma;
const bool limited_range = !use_full_rgb_range_output;
const auto filter = m_output_scaling == output_scaling_mode::nearest ? gl::filter::nearest : gl::filter::linear;
rsx::simple_array<gl::texture*> images{ image_to_flip, image_to_flip2 };
if (m_output_scaling == output_scaling_mode::fsr && avconfig.stereo_mode == stereo_render_mode_options::disabled) // 3D will be implemented later
{
for (unsigned i = 0; i < 2 && images[i]; ++i)
{
const rsx::flags32_t mode = (i == 0) ? UPSCALE_LEFT_VIEW : UPSCALE_RIGHT_VIEW;
images[i] = m_upscaler->scale_output(cmd, image_to_flip, screen_area, aspect_ratio.flipped_vertical(), mode);
}
}
gl::screen.bind();
m_video_output_pass.run(cmd, areau(aspect_ratio), images.map(FN(x ? x->id() : GL_NONE)), gamma, limited_range, avconfig.stereo_mode, filter);
}
}
if (m_overlay_manager)
{
if (m_overlay_manager->has_dirty())
{
m_overlay_manager->lock_shared();
std::vector<u32> uids_to_dispose;
uids_to_dispose.reserve(m_overlay_manager->get_dirty().size());
for (const auto& view : m_overlay_manager->get_dirty())
{
m_ui_renderer.remove_temp_resources(view->uid);
uids_to_dispose.push_back(view->uid);
}
m_overlay_manager->unlock_shared();
m_overlay_manager->dispose(uids_to_dispose);
}
if (m_overlay_manager->has_visible())
{
gl::screen.bind();
// Lock to avoid modification during run-update chain
std::lock_guard lock(*m_overlay_manager);
for (const auto& view : m_overlay_manager->get_views())
{
m_ui_renderer.run(cmd, areau(aspect_ratio), 0, *view.get());
}
}
}
if (g_cfg.video.overlay)
{
const auto num_dirty_textures = m_gl_texture_cache.get_unreleased_textures_count();
const auto texture_memory_size = m_gl_texture_cache.get_texture_memory_in_use() / (1024 * 1024);
const auto num_flushes = m_gl_texture_cache.get_num_flush_requests();
const auto num_mispredict = m_gl_texture_cache.get_num_cache_mispredictions();
const auto num_speculate = m_gl_texture_cache.get_num_cache_speculative_writes();
const auto num_misses = m_gl_texture_cache.get_num_cache_misses();
const auto num_unavoidable = m_gl_texture_cache.get_num_unavoidable_hard_faults();
const auto cache_miss_ratio = static_cast<u32>(ceil(m_gl_texture_cache.get_cache_miss_ratio() * 100));
const auto num_texture_upload = m_gl_texture_cache.get_texture_upload_calls_this_frame();
const auto num_texture_upload_miss = m_gl_texture_cache.get_texture_upload_misses_this_frame();
const auto texture_upload_miss_ratio = m_gl_texture_cache.get_texture_upload_miss_percentage();
const auto texture_copies_ellided = m_gl_texture_cache.get_texture_copies_ellided_this_frame();
const auto vertex_cache_hit_count = (info.stats.vertex_cache_request_count - info.stats.vertex_cache_miss_count);
const auto vertex_cache_hit_ratio = info.stats.vertex_cache_request_count
? (vertex_cache_hit_count * 100) / info.stats.vertex_cache_request_count
: 0;
rsx::overlays::set_debug_overlay_text(fmt::format(
"RSX Load: %3d%%\n"
"draw calls: %16d\n"
"draw call setup: %11dus\n"
"vertex upload time: %8dus\n"
"textures upload time: %6dus\n"
"draw call execution: %7dus\n"
"Unreleased textures: %7d\n"
"Texture memory: %12dM\n"
"Flush requests: %12d = %2d (%3d%%) hard faults, %2d unavoidable, %2d misprediction(s), %2d speculation(s)\n"
"Texture uploads: %11u (%u from CPU - %02u%%, %u copies avoided)\n"
"Vertex cache hits: %9u/%u (%u%%)",
get_load(), info.stats.draw_calls, info.stats.setup_time, info.stats.vertex_upload_time,
info.stats.textures_upload_time, info.stats.draw_exec_time, num_dirty_textures, texture_memory_size,
num_flushes, num_misses, cache_miss_ratio, num_unavoidable, num_mispredict, num_speculate,
num_texture_upload, num_texture_upload_miss, texture_upload_miss_ratio, texture_copies_ellided,
vertex_cache_hit_count, info.stats.vertex_cache_request_count, vertex_cache_hit_ratio)
);
}
if (gl::debug::g_vis_texture)
{
// Optionally renders a single debug texture to framebuffer.
// Only programmatic access provided at the moment.
// TODO: Migrate to use overlay system. (kd-11)
gl::fbo m_vis_buffer;
m_vis_buffer.create();
m_vis_buffer.bind();
m_vis_buffer.color = gl::debug::g_vis_texture->id();
m_vis_buffer.read_buffer(m_vis_buffer.color);
m_vis_buffer.draw_buffer(m_vis_buffer.color);
const u32 vis_width = 320;
const u32 vis_height = 240;
areai display_view = areai(aspect_ratio).flipped_vertical();
display_view.x1 = display_view.x2 - vis_width;
display_view.y1 = vis_height;
// Blit
const auto src_region = areau{ 0u, 0u, gl::debug::g_vis_texture->width(), gl::debug::g_vis_texture->height() };
m_vis_buffer.blit(gl::screen, static_cast<areai>(src_region), display_view, gl::buffers::color, gl::filter::linear);
m_vis_buffer.remove();
}
m_frame->flip(m_context);
rsx::thread::flip(info);
// Cleanup
m_gl_texture_cache.on_frame_end();
m_vertex_cache->purge();
auto removed_textures = m_rtts.trim(cmd);
m_framebuffer_cache.remove_if([&](auto& fbo)
{
if (fbo.unused_check_count() >= 2) return true; // Remove if stale
if (fbo.references_any(removed_textures)) return true; // Remove if any of the attachments is invalid
return false;
});
if (m_draw_fbo && !m_graphics_state.test(rsx::rtt_config_dirty))
{
// Always restore the active framebuffer
m_draw_fbo->bind();
set_viewport();
set_scissor(m_graphics_state & rsx::pipeline_state::scissor_setup_clipped);
}
}
| 16,438
|
C++
|
.cpp
| 398
| 37.992462
| 157
| 0.703827
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,497
|
GLCompute.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/GL/GLCompute.cpp
|
#include "GLCompute.h"
#include "GLTexture.h"
#include "Utilities/StrUtil.h"
namespace gl
{
struct bind_image_view_safe
{
GLuint m_layer;
GLenum m_target;
GLuint m_value;
gl::command_context& m_commands;
bind_image_view_safe(gl::command_context& cmd, GLuint layer, gl::texture_view* value)
: m_layer(layer), m_target(value->target()), m_commands(cmd)
{
m_value = cmd->get_bound_texture(layer, m_target);
value->bind(cmd, layer);
}
~bind_image_view_safe()
{
m_commands->bind_texture(m_layer, m_target, m_value);
}
};
void compute_task::initialize()
{
// Set up optimal kernel size
const auto& caps = gl::get_driver_caps();
if (caps.vendor_AMD || caps.vendor_MESA)
{
optimal_group_size = 64;
unroll_loops = false;
}
else if (caps.vendor_NVIDIA)
{
optimal_group_size = 32;
}
else
{
optimal_group_size = 128;
}
optimal_kernel_size = 256 / optimal_group_size;
glGetIntegeri_v(GL_MAX_COMPUTE_WORK_GROUP_COUNT, 0, reinterpret_cast<GLint*>(&max_invocations_x));
initialized = true;
}
void compute_task::create()
{
if (!initialized)
{
initialize();
}
if (!compiled)
{
m_shader.create(::glsl::program_domain::glsl_compute_program, m_src);
m_shader.compile();
m_program.create();
m_program.attach(m_shader);
m_program.link();
compiled = true;
}
}
void compute_task::destroy()
{
if (compiled)
{
m_program.remove();
m_shader.remove();
compiled = false;
}
}
void compute_task::run(gl::command_context& cmd, u32 invocations_x, u32 invocations_y)
{
bind_resources();
cmd->use_program(m_program.id());
glDispatchCompute(invocations_x, invocations_y, 1);
}
void compute_task::run(gl::command_context& cmd, u32 num_invocations)
{
u32 invocations_x, invocations_y;
if (num_invocations <= max_invocations_x) [[likely]]
{
invocations_x = num_invocations;
invocations_y = 1;
}
else
{
// Since all the invocations will run, the optimal distribution is sqrt(count)
const u32 optimal_length = static_cast<u32>(floor(std::sqrt(num_invocations)));
invocations_x = optimal_length;
invocations_y = invocations_x;
if (num_invocations % invocations_x) invocations_y++;
}
run(cmd, invocations_x, invocations_y);
}
cs_shuffle_base::cs_shuffle_base()
{
work_kernel =
" value = data[index];\n"
" data[index] = %f(value);\n";
loop_advance =
" index++;\n";
suffix =
"}\n";
}
void cs_shuffle_base::build(const char* function_name, u32 _kernel_size)
{
// Initialize to allow detecting optimal settings
initialize();
kernel_size = _kernel_size? _kernel_size : optimal_kernel_size;
m_src =
#include "../Program/GLSLSnippets/ShuffleBytes.glsl"
;
const std::pair<std::string_view, std::string> syntax_replace[] =
{
{ "%set, ", ""},
{ "%loc", std::to_string(GL_COMPUTE_BUFFER_SLOT(0)) },
{ "%ws", std::to_string(optimal_group_size) },
{ "%ks", std::to_string(kernel_size) },
{ "%vars", variables },
{ "%f", function_name },
{ "%ub", uniforms },
{ "%md", method_declarations },
};
m_src = fmt::replace_all(m_src, syntax_replace);
work_kernel = fmt::replace_all(work_kernel, syntax_replace);
if (kernel_size <= 1)
{
m_src += " {\n" + work_kernel + " }\n";
}
else if (unroll_loops)
{
work_kernel += loop_advance + "\n";
m_src += std::string
(
" //Unrolled loop\n"
" {\n"
);
// Assemble body with manual loop unroll to try loweing GPR usage
for (u32 n = 0; n < kernel_size; ++n)
{
m_src += work_kernel;
}
m_src += " }\n";
}
else
{
m_src += " for (int loop = 0; loop < KERNEL_SIZE; ++loop)\n";
m_src += " {\n";
m_src += work_kernel;
m_src += loop_advance;
m_src += " }\n";
}
m_src += suffix;
}
void cs_shuffle_base::bind_resources()
{
m_data->bind_range(gl::buffer::target::ssbo, GL_COMPUTE_BUFFER_SLOT(0), m_data_offset, m_data_length);
}
void cs_shuffle_base::run(gl::command_context& cmd, const gl::buffer* data, u32 data_length, u32 data_offset)
{
m_data = data;
m_data_offset = data_offset;
m_data_length = data_length;
const auto num_bytes_per_invocation = optimal_group_size * kernel_size * 4;
const auto num_bytes_to_process = utils::align(data_length, num_bytes_per_invocation);
const auto num_invocations = num_bytes_to_process / num_bytes_per_invocation;
if ((num_bytes_to_process + data_offset) > data->size())
{
// Technically robust buffer access should keep the driver from crashing in OOB situations
rsx_log.error("Inadequate buffer length submitted for a compute operation."
"Required=%d bytes, Available=%d bytes", num_bytes_to_process, data->size());
}
compute_task::run(cmd, num_invocations);
}
template <bool SwapBytes>
cs_shuffle_d32fx8_to_x8d24f<SwapBytes>::cs_shuffle_d32fx8_to_x8d24f()
{
uniforms = "uniform uint in_ptr, out_ptr;\n";
variables =
" uint in_offset = in_ptr >> 2;\n"
" uint out_offset = out_ptr >> 2;\n"
" uint depth, stencil;\n";
work_kernel =
" depth = data[index * 2 + in_offset];\n"
" stencil = data[index * 2 + (in_offset + 1)] & 0xFFu;\n"
" value = f32_to_d24f(depth) << 8;\n"
" value |= stencil;\n"
" data[index + out_ptr] = bswap_u32(value);\n";
if constexpr (!SwapBytes)
{
work_kernel = fmt::replace_all(work_kernel, "bswap_u32(value)", "value", 1);
}
cs_shuffle_base::build("");
}
template <bool SwapBytes>
void cs_shuffle_d32fx8_to_x8d24f<SwapBytes>::bind_resources()
{
m_data->bind_range(gl::buffer::target::ssbo, GL_COMPUTE_BUFFER_SLOT(0), m_data_offset, m_ssbo_length);
}
template <bool SwapBytes>
void cs_shuffle_d32fx8_to_x8d24f<SwapBytes>::run(gl::command_context& cmd, const gl::buffer* data, u32 src_offset, u32 dst_offset, u32 num_texels)
{
u32 data_offset;
if (src_offset > dst_offset)
{
data_offset = dst_offset;
m_ssbo_length = (src_offset + num_texels * 8) - data_offset;
}
else
{
data_offset = src_offset;
m_ssbo_length = (dst_offset + num_texels * 4) - data_offset;
}
m_program.uniforms["in_ptr"] = src_offset - data_offset;
m_program.uniforms["out_ptr"] = dst_offset - data_offset;
cs_shuffle_base::run(cmd, data, num_texels * 4, data_offset);
}
template struct cs_shuffle_d32fx8_to_x8d24f<true>;
template struct cs_shuffle_d32fx8_to_x8d24f<false>;
template <bool SwapBytes>
cs_shuffle_x8d24f_to_d32fx8<SwapBytes>::cs_shuffle_x8d24f_to_d32fx8()
{
uniforms = "uniform uint texel_count, in_ptr, out_ptr;\n";
variables =
" uint in_offset = in_ptr >> 2;\n"
" uint out_offset = out_ptr >> 2;\n"
" uint depth, stencil;\n";
work_kernel =
" value = data[index + in_offset];\n"
" value = bswap_u32(value);\n"
" stencil = (value & 0xFFu);\n"
" depth = (value >> 8);\n"
" data[index * 2 + out_offset] = d24f_to_f32(depth);\n"
" data[index * 2 + (out_offset + 1)] = stencil;\n";
if constexpr (!SwapBytes)
{
work_kernel = fmt::replace_all(work_kernel, "value = bswap_u32(value)", "// value = bswap_u32(value)", 1);
}
cs_shuffle_base::build("");
}
template <bool SwapBytes>
void cs_shuffle_x8d24f_to_d32fx8<SwapBytes>::bind_resources()
{
m_data->bind_range(gl::buffer::target::ssbo, GL_COMPUTE_BUFFER_SLOT(0), m_data_offset, m_ssbo_length);
}
template <bool SwapBytes>
void cs_shuffle_x8d24f_to_d32fx8<SwapBytes>::run(gl::command_context& cmd, const gl::buffer* data, u32 src_offset, u32 dst_offset, u32 num_texels)
{
u32 data_offset;
if (src_offset > dst_offset)
{
data_offset = dst_offset;
m_ssbo_length = (src_offset + num_texels * 4) - data_offset;
}
else
{
data_offset = src_offset;
m_ssbo_length = (dst_offset + num_texels * 8) - data_offset;
}
m_program.uniforms["in_ptr"] = src_offset - data_offset;
m_program.uniforms["out_ptr"] = dst_offset - data_offset;
cs_shuffle_base::run(cmd, data, num_texels * 4, data_offset);
}
template struct cs_shuffle_x8d24f_to_d32fx8<true>;
template struct cs_shuffle_x8d24f_to_d32fx8<false>;
cs_d24x8_to_ssbo::cs_d24x8_to_ssbo()
{
initialize();
const auto raw_data =
#include "../Program/GLSLSnippets/CopyD24x8ToBuffer.glsl"
;
const std::pair<std::string_view, std::string> repl_list[] =
{
{ "%set, ", "" },
{ "%loc", std::to_string(GL_COMPUTE_BUFFER_SLOT(0)) },
{ "%ws", std::to_string(optimal_group_size) },
{ "%wks", std::to_string(optimal_kernel_size) }
};
m_src = fmt::replace_all(raw_data, repl_list);
}
void cs_d24x8_to_ssbo::run(gl::command_context& cmd, gl::viewable_image* src, const gl::buffer* dst, u32 out_offset, const coordu& region, const gl::pixel_buffer_layout& layout)
{
const auto row_pitch = region.width;
m_program.uniforms["swap_bytes"] = layout.swap_bytes;
m_program.uniforms["output_pitch"] = row_pitch;
m_program.uniforms["region_offset"] = color2i(region.x, region.y);
m_program.uniforms["region_size"] = color2i(region.width, region.height);
auto depth_view = src->get_view(rsx::default_remap_vector.with_encoding(GL_REMAP_IDENTITY), gl::image_aspect::depth);
auto stencil_view = src->get_view(rsx::default_remap_vector.with_encoding(GL_REMAP_IDENTITY), gl::image_aspect::stencil);
if (!m_sampler)
{
m_sampler.create();
m_sampler.apply_defaults();
}
// This method is callable in sensitive code and must restore the GL state on exit
gl::saved_sampler_state save_sampler0(GL_COMPUTE_BUFFER_SLOT(0), m_sampler);
gl::saved_sampler_state save_sampler1(GL_COMPUTE_BUFFER_SLOT(1), m_sampler);
gl::bind_image_view_safe save_image1(cmd, GL_COMPUTE_BUFFER_SLOT(0), depth_view);
gl::bind_image_view_safe save_image2(cmd, GL_COMPUTE_BUFFER_SLOT(1), stencil_view);
dst->bind_range(gl::buffer::target::ssbo, GL_COMPUTE_BUFFER_SLOT(2), out_offset, row_pitch * 4 * region.height);
const int num_invocations = utils::aligned_div(region.width * region.height, optimal_kernel_size * optimal_group_size);
compute_task::run(cmd, num_invocations);
}
cs_rgba8_to_ssbo::cs_rgba8_to_ssbo()
{
initialize();
const auto raw_data =
#include "../Program/GLSLSnippets/CopyRGBA8ToBuffer.glsl"
;
const std::pair<std::string_view, std::string> repl_list[] =
{
{ "%set, ", "" },
{ "%loc", std::to_string(GL_COMPUTE_BUFFER_SLOT(0)) },
{ "%ws", std::to_string(optimal_group_size) },
{ "%wks", std::to_string(optimal_kernel_size) }
};
m_src = fmt::replace_all(raw_data, repl_list);
}
void cs_rgba8_to_ssbo::run(gl::command_context& cmd, gl::viewable_image* src, const gl::buffer* dst, u32 out_offset, const coordu& region, const gl::pixel_buffer_layout& layout)
{
const auto row_pitch = region.width;
m_program.uniforms["swap_bytes"] = layout.swap_bytes;
m_program.uniforms["output_pitch"] = row_pitch;
m_program.uniforms["region_offset"] = color2i(region.x, region.y);
m_program.uniforms["region_size"] = color2i(region.width, region.height);
m_program.uniforms["is_bgra"] = (layout.format == static_cast<GLenum>(gl::texture::format::bgra));
m_program.uniforms["block_width"] = static_cast<u32>(layout.size);
auto data_view = src->get_view(rsx::default_remap_vector.with_encoding(GL_REMAP_IDENTITY), gl::image_aspect::color);
if (!m_sampler)
{
m_sampler.create();
m_sampler.apply_defaults();
}
// This method is callable in sensitive code and must restore the GL state on exit
gl::saved_sampler_state save_sampler(GL_COMPUTE_BUFFER_SLOT(0), m_sampler);
gl::bind_image_view_safe save_image(cmd, GL_COMPUTE_BUFFER_SLOT(0), data_view);
dst->bind_range(gl::buffer::target::ssbo, GL_COMPUTE_BUFFER_SLOT(1), out_offset, row_pitch * 4 * region.height);
const int num_invocations = utils::aligned_div(region.width * region.height, optimal_kernel_size * optimal_group_size);
compute_task::run(cmd, num_invocations);
}
cs_ssbo_to_color_image::cs_ssbo_to_color_image()
{
initialize();
const auto raw_data =
#include "../Program/GLSLSnippets/CopyBufferToColorImage.glsl"
;
const std::pair<std::string_view, std::string> repl_list[] =
{
{ "%set, ", "" },
{ "%image_slot", std::to_string(GL_COMPUTE_IMAGE_SLOT(0)) },
{ "%ssbo_slot", std::to_string(GL_COMPUTE_BUFFER_SLOT(0)) },
{ "%ws", std::to_string(optimal_group_size) },
{ "%wks", std::to_string(optimal_kernel_size) }
};
m_src = fmt::replace_all(raw_data, repl_list);
}
void cs_ssbo_to_color_image::run(gl::command_context& cmd, const buffer* src, const texture_view* dst, const u32 src_offset, const coordu& dst_region, const pixel_buffer_layout& layout)
{
const u32 bpp = dst->image()->pitch() / dst->image()->width();
const u32 row_length = utils::align(dst_region.width * bpp, std::max<int>(layout.alignment, 1)) / bpp;
m_program.uniforms["swap_bytes"] = layout.swap_bytes;
m_program.uniforms["src_pitch"] = row_length;
m_program.uniforms["format"] = static_cast<GLenum>(dst->image()->get_internal_format());
m_program.uniforms["region_offset"] = color2i(dst_region.x, dst_region.y);
m_program.uniforms["region_size"] = color2i(dst_region.width, dst_region.height);
src->bind_range(gl::buffer::target::ssbo, GL_COMPUTE_BUFFER_SLOT(0), src_offset, row_length * bpp * dst_region.height);
glBindImageTexture(GL_COMPUTE_IMAGE_SLOT(0), dst->id(), 0, GL_FALSE, 0, GL_WRITE_ONLY, dst->view_format());
const int num_invocations = utils::aligned_div(dst_region.width * dst_region.height, optimal_kernel_size * optimal_group_size);
compute_task::run(cmd, num_invocations);
}
void cs_ssbo_to_color_image::run(gl::command_context& cmd, const buffer* src, texture* dst, const u32 src_offset, const coordu& dst_region, const pixel_buffer_layout& layout)
{
gl::nil_texture_view view(dst);
run(cmd, src, &view, src_offset, dst_region, layout);
}
}
| 13,775
|
C++
|
.cpp
| 374
| 33.561497
| 186
| 0.677957
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,498
|
GLDraw.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/GL/GLDraw.cpp
|
#include "stdafx.h"
#include "GLGSRender.h"
#include "../rsx_methods.h"
#include "../Common/BufferUtils.h"
namespace gl
{
GLenum comparison_op(rsx::comparison_function op)
{
switch (op)
{
case rsx::comparison_function::never: return GL_NEVER;
case rsx::comparison_function::less: return GL_LESS;
case rsx::comparison_function::equal: return GL_EQUAL;
case rsx::comparison_function::less_or_equal: return GL_LEQUAL;
case rsx::comparison_function::greater: return GL_GREATER;
case rsx::comparison_function::not_equal: return GL_NOTEQUAL;
case rsx::comparison_function::greater_or_equal: return GL_GEQUAL;
case rsx::comparison_function::always: return GL_ALWAYS;
}
fmt::throw_exception("Unsupported comparison op 0x%X", static_cast<u32>(op));
}
GLenum stencil_op(rsx::stencil_op op)
{
switch (op)
{
case rsx::stencil_op::invert: return GL_INVERT;
case rsx::stencil_op::keep: return GL_KEEP;
case rsx::stencil_op::zero: return GL_ZERO;
case rsx::stencil_op::replace: return GL_REPLACE;
case rsx::stencil_op::incr: return GL_INCR;
case rsx::stencil_op::decr: return GL_DECR;
case rsx::stencil_op::incr_wrap: return GL_INCR_WRAP;
case rsx::stencil_op::decr_wrap: return GL_DECR_WRAP;
}
fmt::throw_exception("Unsupported stencil op 0x%X", static_cast<u32>(op));
}
GLenum blend_equation(rsx::blend_equation op)
{
switch (op)
{
// Note : maybe add is signed on gl
case rsx::blend_equation::add_signed:
rsx_log.trace("blend equation add_signed used. Emulating using FUNC_ADD");
[[fallthrough]];
case rsx::blend_equation::add: return GL_FUNC_ADD;
case rsx::blend_equation::min: return GL_MIN;
case rsx::blend_equation::max: return GL_MAX;
case rsx::blend_equation::subtract: return GL_FUNC_SUBTRACT;
case rsx::blend_equation::reverse_subtract_signed:
rsx_log.trace("blend equation reverse_subtract_signed used. Emulating using FUNC_REVERSE_SUBTRACT");
[[fallthrough]];
case rsx::blend_equation::reverse_subtract: return GL_FUNC_REVERSE_SUBTRACT;
case rsx::blend_equation::reverse_add_signed:
default:
rsx_log.error("Blend equation 0x%X is unimplemented!", static_cast<u32>(op));
return GL_FUNC_ADD;
}
}
GLenum blend_factor(rsx::blend_factor op)
{
switch (op)
{
case rsx::blend_factor::zero: return GL_ZERO;
case rsx::blend_factor::one: return GL_ONE;
case rsx::blend_factor::src_color: return GL_SRC_COLOR;
case rsx::blend_factor::one_minus_src_color: return GL_ONE_MINUS_SRC_COLOR;
case rsx::blend_factor::dst_color: return GL_DST_COLOR;
case rsx::blend_factor::one_minus_dst_color: return GL_ONE_MINUS_DST_COLOR;
case rsx::blend_factor::src_alpha: return GL_SRC_ALPHA;
case rsx::blend_factor::one_minus_src_alpha: return GL_ONE_MINUS_SRC_ALPHA;
case rsx::blend_factor::dst_alpha: return GL_DST_ALPHA;
case rsx::blend_factor::one_minus_dst_alpha: return GL_ONE_MINUS_DST_ALPHA;
case rsx::blend_factor::src_alpha_saturate: return GL_SRC_ALPHA_SATURATE;
case rsx::blend_factor::constant_color: return GL_CONSTANT_COLOR;
case rsx::blend_factor::one_minus_constant_color: return GL_ONE_MINUS_CONSTANT_COLOR;
case rsx::blend_factor::constant_alpha: return GL_CONSTANT_ALPHA;
case rsx::blend_factor::one_minus_constant_alpha: return GL_ONE_MINUS_CONSTANT_ALPHA;
}
fmt::throw_exception("Unsupported blend factor 0x%X", static_cast<u32>(op));
}
GLenum logic_op(rsx::logic_op op)
{
switch (op)
{
case rsx::logic_op::logic_clear: return GL_CLEAR;
case rsx::logic_op::logic_and: return GL_AND;
case rsx::logic_op::logic_and_reverse: return GL_AND_REVERSE;
case rsx::logic_op::logic_copy: return GL_COPY;
case rsx::logic_op::logic_and_inverted: return GL_AND_INVERTED;
case rsx::logic_op::logic_noop: return GL_NOOP;
case rsx::logic_op::logic_xor: return GL_XOR;
case rsx::logic_op::logic_or: return GL_OR;
case rsx::logic_op::logic_nor: return GL_NOR;
case rsx::logic_op::logic_equiv: return GL_EQUIV;
case rsx::logic_op::logic_invert: return GL_INVERT;
case rsx::logic_op::logic_or_reverse: return GL_OR_REVERSE;
case rsx::logic_op::logic_copy_inverted: return GL_COPY_INVERTED;
case rsx::logic_op::logic_or_inverted: return GL_OR_INVERTED;
case rsx::logic_op::logic_nand: return GL_NAND;
case rsx::logic_op::logic_set: return GL_SET;
}
fmt::throw_exception("Unsupported logic op 0x%X", static_cast<u32>(op));
}
GLenum front_face(rsx::front_face op)
{
//NOTE: RSX face winding is always based off of upper-left corner like vulkan, but GL is bottom left
//shader_window_origin register does not affect this
//verified with Outrun Online Arcade (window_origin::top) and DS2 (window_origin::bottom)
//correctness of face winding checked using stencil test (GOW collection shadows)
switch (op)
{
case rsx::front_face::cw: return GL_CCW;
case rsx::front_face::ccw: return GL_CW;
}
fmt::throw_exception("Unsupported front face 0x%X", static_cast<u32>(op));
}
GLenum cull_face(rsx::cull_face op)
{
switch (op)
{
case rsx::cull_face::front: return GL_FRONT;
case rsx::cull_face::back: return GL_BACK;
case rsx::cull_face::front_and_back: return GL_FRONT_AND_BACK;
}
fmt::throw_exception("Unsupported cull face 0x%X", static_cast<u32>(op));
}
}
void GLGSRender::update_draw_state()
{
m_profiler.start();
gl_state.enable(GL_SCISSOR_TEST);
gl_state.enable(rsx::method_registers.dither_enabled(), GL_DITHER);
if (m_rtts.m_bound_depth_stencil.first)
{
// Z-buffer is active.
gl_state.depth_mask(rsx::method_registers.depth_write_enabled());
gl_state.stencil_mask(rsx::method_registers.stencil_mask());
gl_state.enable(rsx::method_registers.depth_clamp_enabled() || !rsx::method_registers.depth_clip_enabled(), GL_DEPTH_CLAMP);
if (gl_state.enable(rsx::method_registers.depth_test_enabled(), GL_DEPTH_TEST))
{
gl_state.depth_func(gl::comparison_op(rsx::method_registers.depth_func()));
}
if (gl::get_driver_caps().EXT_depth_bounds_test && (gl_state.enable(rsx::method_registers.depth_bounds_test_enabled(), GL_DEPTH_BOUNDS_TEST_EXT)))
{
gl_state.depth_bounds(rsx::method_registers.depth_bounds_min(), rsx::method_registers.depth_bounds_max());
}
if (gl::get_driver_caps().NV_depth_buffer_float_supported)
{
gl_state.depth_range(rsx::method_registers.clip_min(), rsx::method_registers.clip_max());
}
if (gl_state.enable(rsx::method_registers.stencil_test_enabled(), GL_STENCIL_TEST))
{
gl_state.stencil_func(gl::comparison_op(rsx::method_registers.stencil_func()),
rsx::method_registers.stencil_func_ref(),
rsx::method_registers.stencil_func_mask());
gl_state.stencil_op(gl::stencil_op(rsx::method_registers.stencil_op_fail()), gl::stencil_op(rsx::method_registers.stencil_op_zfail()),
gl::stencil_op(rsx::method_registers.stencil_op_zpass()));
if (rsx::method_registers.two_sided_stencil_test_enabled())
{
gl_state.stencil_back_mask(rsx::method_registers.back_stencil_mask());
gl_state.stencil_back_func(gl::comparison_op(rsx::method_registers.back_stencil_func()),
rsx::method_registers.back_stencil_func_ref(), rsx::method_registers.back_stencil_func_mask());
gl_state.stencil_back_op(gl::stencil_op(rsx::method_registers.back_stencil_op_fail()),
gl::stencil_op(rsx::method_registers.back_stencil_op_zfail()), gl::stencil_op(rsx::method_registers.back_stencil_op_zpass()));
}
}
}
if (m_rtts.get_color_surface_count())
{
// Color buffer is active
const auto host_write_mask = rsx::get_write_output_mask(rsx::method_registers.surface_color());
for (int index = 0; index < m_rtts.get_color_surface_count(); ++index)
{
bool color_mask_b = rsx::method_registers.color_mask_b(index);
bool color_mask_g = rsx::method_registers.color_mask_g(index);
bool color_mask_r = rsx::method_registers.color_mask_r(index);
bool color_mask_a = rsx::method_registers.color_mask_a(index);
switch (rsx::method_registers.surface_color())
{
case rsx::surface_color_format::b8:
rsx::get_b8_colormask(color_mask_r, color_mask_g, color_mask_b, color_mask_a);
break;
case rsx::surface_color_format::g8b8:
rsx::get_g8b8_r8g8_colormask(color_mask_r, color_mask_g, color_mask_b, color_mask_a);
break;
default:
break;
}
gl_state.color_maski(
index,
color_mask_r && host_write_mask[0],
color_mask_g && host_write_mask[1],
color_mask_b && host_write_mask[2],
color_mask_a && host_write_mask[3]);
}
// LogicOp and Blend are mutually exclusive. If both are enabled, LogicOp takes precedence.
// In OpenGL, this behavior is enforced in spec, but let's enforce it at renderer level as well.
if (gl_state.enable(rsx::method_registers.logic_op_enabled(), GL_COLOR_LOGIC_OP))
{
gl_state.logic_op(gl::logic_op(rsx::method_registers.logic_operation()));
gl_state.enablei(GL_FALSE, GL_BLEND, 0);
gl_state.enablei(GL_FALSE, GL_BLEND, 1);
gl_state.enablei(GL_FALSE, GL_BLEND, 2);
gl_state.enablei(GL_FALSE, GL_BLEND, 3);
}
else
{
bool mrt_blend_enabled[] =
{
rsx::method_registers.blend_enabled(),
rsx::method_registers.blend_enabled_surface_1(),
rsx::method_registers.blend_enabled_surface_2(),
rsx::method_registers.blend_enabled_surface_3()
};
if (mrt_blend_enabled[0] || mrt_blend_enabled[1] || mrt_blend_enabled[2] || mrt_blend_enabled[3])
{
glBlendFuncSeparate(gl::blend_factor(rsx::method_registers.blend_func_sfactor_rgb()),
gl::blend_factor(rsx::method_registers.blend_func_dfactor_rgb()),
gl::blend_factor(rsx::method_registers.blend_func_sfactor_a()),
gl::blend_factor(rsx::method_registers.blend_func_dfactor_a()));
auto blend_colors = rsx::get_constant_blend_colors();
glBlendColor(blend_colors[0], blend_colors[1], blend_colors[2], blend_colors[3]);
glBlendEquationSeparate(gl::blend_equation(rsx::method_registers.blend_equation_rgb()),
gl::blend_equation(rsx::method_registers.blend_equation_a()));
}
gl_state.enablei(mrt_blend_enabled[0], GL_BLEND, 0);
gl_state.enablei(mrt_blend_enabled[1], GL_BLEND, 1);
gl_state.enablei(mrt_blend_enabled[2], GL_BLEND, 2);
gl_state.enablei(mrt_blend_enabled[3], GL_BLEND, 3);
}
}
switch (rsx::method_registers.current_draw_clause.primitive)
{
case rsx::primitive_type::lines:
case rsx::primitive_type::line_loop:
case rsx::primitive_type::line_strip:
gl_state.line_width(rsx::method_registers.line_width() * rsx::get_resolution_scale());
gl_state.enable(rsx::method_registers.line_smooth_enabled(), GL_LINE_SMOOTH);
break;
default:
gl_state.enable(rsx::method_registers.poly_offset_point_enabled(), GL_POLYGON_OFFSET_POINT);
gl_state.enable(rsx::method_registers.poly_offset_line_enabled(), GL_POLYGON_OFFSET_LINE);
gl_state.enable(rsx::method_registers.poly_offset_fill_enabled(), GL_POLYGON_OFFSET_FILL);
// offset_bias is the constant factor, multiplied by the implementation factor R
// offset_scale is the slope factor, multiplied by the triangle slope factor M
const auto poly_offset_scale = rsx::method_registers.poly_offset_scale();
auto poly_offset_bias = rsx::method_registers.poly_offset_bias();
if (auto ds = m_rtts.m_bound_depth_stencil.second;
ds && ds->get_internal_format() == gl::texture::internal_format::depth24_stencil8)
{
// Check details in VKDraw.cpp about behaviour of RSX vs desktop D24X8 implementations
// TLDR, RSX expects R = 16,777,215 (2^24 - 1)
const auto& caps = gl::get_driver_caps();
if (caps.vendor_NVIDIA || caps.vendor_MESA)
{
// R derived to be 8388607 (2^23 - 1)
poly_offset_bias *= 0.5f;
}
else if (caps.vendor_AMD)
{
// R derived to be 4194303 (2^22 - 1)
poly_offset_bias *= 0.25f;
}
}
gl_state.polygon_offset(poly_offset_scale, poly_offset_bias);
if (gl_state.enable(rsx::method_registers.cull_face_enabled(), GL_CULL_FACE))
{
gl_state.cull_face(gl::cull_face(rsx::method_registers.cull_face_mode()));
}
gl_state.front_face(gl::front_face(rsx::method_registers.front_face_mode()));
break;
}
// Clip planes
gl_state.clip_planes((current_vertex_program.output_mask >> CELL_GCM_ATTRIB_OUTPUT_UC0) & 0x3F);
// Sample control
// TODO: MinSampleShading
//gl_state.enable(rsx::method_registers.msaa_enabled(), GL_MULTISAMPLE);
//gl_state.enable(rsx::method_registers.msaa_alpha_to_coverage_enabled(), GL_SAMPLE_ALPHA_TO_COVERAGE);
//gl_state.enable(rsx::method_registers.msaa_alpha_to_one_enabled(), GL_SAMPLE_ALPHA_TO_ONE);
//TODO
//NV4097_SET_ANISO_SPREAD
//NV4097_SET_SPECULAR_ENABLE
//NV4097_SET_TWO_SIDE_LIGHT_EN
//NV4097_SET_FLAT_SHADE_OP
//NV4097_SET_EDGE_FLAG
//NV4097_SET_COLOR_KEY_COLOR
//NV4097_SET_SHADER_CONTROL
//NV4097_SET_ZMIN_MAX_CONTROL
//NV4097_SET_ANTI_ALIASING_CONTROL
//NV4097_SET_CLIP_ID_TEST_ENABLE
// For OGL Z range is updated every draw as it is separate from viewport config
m_graphics_state.clear(rsx::pipeline_state::zclip_config_state_dirty);
m_frame_stats.setup_time += m_profiler.duration();
}
void GLGSRender::load_texture_env()
{
// Load textures
gl::command_context cmd{ gl_state };
std::lock_guard lock(m_sampler_mutex);
for (u32 textures_ref = current_fp_metadata.referenced_textures_mask, i = 0; textures_ref; textures_ref >>= 1, ++i)
{
if (!(textures_ref & 1))
continue;
if (!fs_sampler_state[i])
fs_sampler_state[i] = std::make_unique<gl::texture_cache::sampled_image_descriptor>();
auto sampler_state = static_cast<gl::texture_cache::sampled_image_descriptor*>(fs_sampler_state[i].get());
const auto& tex = rsx::method_registers.fragment_textures[i];
const auto previous_format_class = sampler_state->format_class;
if (m_samplers_dirty || m_textures_dirty[i] || m_gl_texture_cache.test_if_descriptor_expired(cmd, m_rtts, sampler_state, tex))
{
if (tex.enabled())
{
*sampler_state = m_gl_texture_cache.upload_texture(cmd, tex, m_rtts);
if (sampler_state->validate())
{
if (m_textures_dirty[i])
{
m_fs_sampler_states[i].apply(tex, fs_sampler_state[i].get());
}
else if (sampler_state->format_class != previous_format_class)
{
m_graphics_state |= rsx::fragment_program_state_dirty;
}
}
}
else
{
*sampler_state = {};
}
m_textures_dirty[i] = false;
}
}
for (u32 textures_ref = current_vp_metadata.referenced_textures_mask, i = 0; textures_ref; textures_ref >>= 1, ++i)
{
if (!(textures_ref & 1))
continue;
if (!vs_sampler_state[i])
vs_sampler_state[i] = std::make_unique<gl::texture_cache::sampled_image_descriptor>();
auto sampler_state = static_cast<gl::texture_cache::sampled_image_descriptor*>(vs_sampler_state[i].get());
const auto& tex = rsx::method_registers.vertex_textures[i];
const auto previous_format_class = sampler_state->format_class;
if (m_samplers_dirty || m_vertex_textures_dirty[i] || m_gl_texture_cache.test_if_descriptor_expired(cmd, m_rtts, sampler_state, tex))
{
if (rsx::method_registers.vertex_textures[i].enabled())
{
*sampler_state = m_gl_texture_cache.upload_texture(cmd, rsx::method_registers.vertex_textures[i], m_rtts);
if (sampler_state->validate())
{
if (m_vertex_textures_dirty[i])
{
m_vs_sampler_states[i].apply(tex, vs_sampler_state[i].get());
}
else if (sampler_state->format_class != previous_format_class)
{
m_graphics_state |= rsx::vertex_program_state_dirty;
}
}
}
else
{
*sampler_state = {};
}
m_vertex_textures_dirty[i] = false;
}
}
m_samplers_dirty.store(false);
}
void GLGSRender::bind_texture_env()
{
// Bind textures and resolve external copy operations
gl::command_context cmd{ gl_state };
for (u32 textures_ref = current_fp_metadata.referenced_textures_mask, i = 0; textures_ref; textures_ref >>= 1, ++i)
{
if (!(textures_ref & 1))
continue;
gl::texture_view* view = nullptr;
auto sampler_state = static_cast<gl::texture_cache::sampled_image_descriptor*>(fs_sampler_state[i].get());
if (rsx::method_registers.fragment_textures[i].enabled() &&
sampler_state->validate())
{
if (view = sampler_state->image_handle; !view) [[unlikely]]
{
view = m_gl_texture_cache.create_temporary_subresource(cmd, sampler_state->external_subresource_desc);
}
}
if (view) [[likely]]
{
view->bind(cmd, GL_FRAGMENT_TEXTURES_START + i);
if (current_fragment_program.texture_state.redirected_textures & (1 << i))
{
auto root_texture = static_cast<gl::viewable_image*>(view->image());
auto stencil_view = root_texture->get_view(rsx::default_remap_vector.with_encoding(gl::GL_REMAP_IDENTITY), gl::image_aspect::stencil);
stencil_view->bind(cmd, GL_STENCIL_MIRRORS_START + i);
}
}
else
{
auto target = gl::get_target(current_fragment_program.get_texture_dimension(i));
cmd->bind_texture(GL_FRAGMENT_TEXTURES_START + i, target, m_null_textures[target]->id());
if (current_fragment_program.texture_state.redirected_textures & (1 << i))
{
cmd->bind_texture(GL_STENCIL_MIRRORS_START + i, target, m_null_textures[target]->id());
}
}
}
for (u32 textures_ref = current_vp_metadata.referenced_textures_mask, i = 0; textures_ref; textures_ref >>= 1, ++i)
{
if (!(textures_ref & 1))
continue;
auto sampler_state = static_cast<gl::texture_cache::sampled_image_descriptor*>(vs_sampler_state[i].get());
if (rsx::method_registers.vertex_textures[i].enabled() &&
sampler_state->validate())
{
if (sampler_state->image_handle) [[likely]]
{
sampler_state->image_handle->bind(cmd, GL_VERTEX_TEXTURES_START + i);
}
else
{
m_gl_texture_cache.create_temporary_subresource(cmd, sampler_state->external_subresource_desc)->bind(cmd, GL_VERTEX_TEXTURES_START + i);
}
}
else
{
cmd->bind_texture(GL_VERTEX_TEXTURES_START + i, GL_TEXTURE_2D, GL_NONE);
}
}
}
void GLGSRender::emit_geometry(u32 sub_index)
{
const auto do_heap_cleanup = [this]()
{
if (manually_flush_ring_buffers)
{
m_attrib_ring_buffer->unmap();
m_index_ring_buffer->unmap();
}
else
{
//DMA push; not needed with MAP_COHERENT
//glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
}
};
m_profiler.start();
auto& draw_call = rsx::method_registers.current_draw_clause;
const rsx::flags32_t vertex_state_mask = rsx::vertex_base_changed | rsx::vertex_arrays_changed;
const rsx::flags32_t vertex_state = (sub_index == 0) ? rsx::vertex_arrays_changed : draw_call.execute_pipeline_dependencies(m_ctx) & vertex_state_mask;
if (vertex_state & rsx::vertex_arrays_changed)
{
analyse_inputs_interleaved(m_vertex_layout);
}
else if (vertex_state & rsx::vertex_base_changed)
{
// Rebase vertex bases instead of
for (auto& info : m_vertex_layout.interleaved_blocks)
{
info->vertex_range.second = 0;
const auto vertex_base_offset = rsx::method_registers.vertex_data_base_offset();
info->real_offset_address = rsx::get_address(rsx::get_vertex_offset_from_base(vertex_base_offset, info->base_offset), info->memory_location);
}
}
else
{
// Discard cached results
for (auto& info : m_vertex_layout.interleaved_blocks)
{
info->vertex_range.second = 0;
}
}
if (vertex_state && !m_vertex_layout.validate())
{
// No vertex inputs enabled
// Execute remainining pipeline barriers with NOP draw
do
{
draw_call.execute_pipeline_dependencies(m_ctx);
} while (draw_call.next());
draw_call.end();
return;
}
if (manually_flush_ring_buffers)
{
//Use approximations to reserve space. This path is mostly for debug purposes anyway
u32 approx_vertex_count = draw_call.get_elements_count();
u32 approx_working_buffer_size = approx_vertex_count * 256;
//Allocate 256K heap if we have no approximation at this time (inlined array)
m_attrib_ring_buffer->reserve_storage_on_heap(std::max(approx_working_buffer_size, 256 * 1024U));
m_index_ring_buffer->reserve_storage_on_heap(16 * 1024);
}
// Do vertex upload before RTT prep / texture lookups to give the driver time to push data
auto upload_info = set_vertex_buffer();
do_heap_cleanup();
if (upload_info.vertex_draw_count == 0)
{
// Malformed vertex setup; abort
return;
}
const GLenum draw_mode = gl::draw_mode(draw_call.primitive);
update_vertex_env(upload_info);
m_frame_stats.vertex_upload_time += m_profiler.duration();
gl_state.use_program(m_program->id());
if (!upload_info.index_info)
{
if (draw_call.is_single_draw())
{
glDrawArrays(draw_mode, 0, upload_info.vertex_draw_count);
}
else
{
const auto subranges = draw_call.get_subranges();
const auto draw_count = subranges.size();
const auto driver_caps = gl::get_driver_caps();
bool use_draw_arrays_fallback = false;
m_scratch_buffer.resize(draw_count * 24);
GLint* firsts = reinterpret_cast<GLint*>(m_scratch_buffer.data());
GLsizei* counts = (firsts + draw_count);
const GLvoid** offsets = utils::bless<const GLvoid*>(counts + draw_count);
u32 first = 0;
u32 dst_index = 0;
for (const auto &range : subranges)
{
firsts[dst_index] = first;
counts[dst_index] = range.count;
offsets[dst_index++] = reinterpret_cast<const GLvoid*>(u64{first << 2});
if (driver_caps.vendor_AMD && (first + range.count) > (0x100000 >> 2))
{
//Unlikely, but added here in case the identity buffer is not large enough somehow
use_draw_arrays_fallback = true;
break;
}
first += range.count;
}
if (use_draw_arrays_fallback)
{
//MultiDrawArrays is broken on some primitive types using AMD. One known type is GL_TRIANGLE_STRIP but there could be more
for (u32 n = 0; n < draw_count; ++n)
{
glDrawArrays(draw_mode, firsts[n], counts[n]);
}
}
else if (driver_caps.vendor_AMD)
{
//Use identity index buffer to fix broken vertexID on AMD
m_identity_index_buffer->bind();
glMultiDrawElements(draw_mode, counts, GL_UNSIGNED_INT, offsets, static_cast<GLsizei>(draw_count));
}
else
{
//Normal render
glMultiDrawArrays(draw_mode, firsts, counts, static_cast<GLsizei>(draw_count));
}
}
}
else
{
const GLenum index_type = std::get<0>(*upload_info.index_info);
const u32 index_offset = std::get<1>(*upload_info.index_info);
const bool restarts_valid = gl::is_primitive_native(draw_call.primitive) && !draw_call.is_disjoint_primitive;
if (gl_state.enable(restarts_valid && rsx::method_registers.restart_index_enabled(), GL_PRIMITIVE_RESTART))
{
glPrimitiveRestartIndex((index_type == GL_UNSIGNED_SHORT) ? 0xffff : 0xffffffff);
}
m_index_ring_buffer->bind();
if (draw_call.is_single_draw())
{
glDrawElements(draw_mode, upload_info.vertex_draw_count, index_type, reinterpret_cast<GLvoid*>(u64{index_offset}));
}
else
{
const auto subranges = draw_call.get_subranges();
const auto draw_count = subranges.size();
const u32 type_scale = (index_type == GL_UNSIGNED_SHORT) ? 1 : 2;
uptr index_ptr = index_offset;
m_scratch_buffer.resize(draw_count * 16);
GLsizei *counts = reinterpret_cast<GLsizei*>(m_scratch_buffer.data());
const GLvoid** offsets = utils::bless<const GLvoid*>(counts + draw_count);
int dst_index = 0;
for (const auto &range : subranges)
{
const auto index_size = get_index_count(draw_call.primitive, range.count);
counts[dst_index] = index_size;
offsets[dst_index++] = reinterpret_cast<const GLvoid*>(index_ptr);
index_ptr += (index_size << type_scale);
}
glMultiDrawElements(draw_mode, counts, index_type, offsets, static_cast<GLsizei>(draw_count));
}
}
m_frame_stats.draw_exec_time += m_profiler.duration();
}
void GLGSRender::begin()
{
// Save shader state now before prefetch and loading happens
m_interpreter_state = (m_graphics_state.load() & rsx::pipeline_state::invalidate_pipeline_bits);
rsx::thread::begin();
if (skip_current_frame || cond_render_ctrl.disable_rendering())
{
return;
}
init_buffers(rsx::framebuffer_creation_context::context_draw);
if (m_graphics_state & rsx::pipeline_state::invalidate_pipeline_bits)
{
// Shaders need to be reloaded.
m_program = nullptr;
}
}
void GLGSRender::end()
{
m_profiler.start();
if (skip_current_frame || !m_graphics_state.test(rsx::rtt_config_valid) || cond_render_ctrl.disable_rendering())
{
execute_nop_draw();
rsx::thread::end();
return;
}
if (m_graphics_state & (rsx::pipeline_state::fragment_program_ucode_dirty | rsx::pipeline_state::vertex_program_ucode_dirty))
{
analyse_current_rsx_pipeline();
}
m_frame_stats.setup_time += m_profiler.duration();
// Active texture environment is used to decode shaders
load_texture_env();
m_frame_stats.textures_upload_time += m_profiler.duration();
// NOTE: Due to common OpenGL driver architecture, vertex data has to be uploaded as far away from the draw as possible
// TODO: Implement shaders cache prediction to avoid uploading vertex data if draw is going to skip
if (!load_program())
{
// Program is not ready, skip drawing this
std::this_thread::yield();
execute_nop_draw();
// m_rtts.on_write(); - breaks games for obvious reasons
rsx::thread::end();
return;
}
// Load program execution environment
load_program_env();
m_frame_stats.setup_time += m_profiler.duration();
bind_texture_env();
m_gl_texture_cache.release_uncached_temporary_subresources();
m_frame_stats.textures_upload_time += m_profiler.duration();
gl::command_context cmd{ gl_state };
if (auto ds = std::get<1>(m_rtts.m_bound_depth_stencil)) ds->write_barrier(cmd);
for (auto &rtt : m_rtts.m_bound_render_targets)
{
if (auto surface = std::get<1>(rtt))
{
surface->write_barrier(cmd);
}
}
update_draw_state();
if (g_cfg.video.debug_output)
{
m_program->validate();
}
rsx::method_registers.current_draw_clause.begin();
u32 subdraw = 0u;
do
{
emit_geometry(subdraw++);
}
while (rsx::method_registers.current_draw_clause.next());
m_rtts.on_write(m_framebuffer_layout.color_write_enabled, m_framebuffer_layout.zeta_write_enabled);
m_attrib_ring_buffer->notify();
m_index_ring_buffer->notify();
m_fragment_env_buffer->notify();
m_vertex_env_buffer->notify();
m_texture_parameters_buffer->notify();
m_vertex_layout_buffer->notify();
m_fragment_constants_buffer->notify();
m_transform_constants_buffer->notify();
m_frame_stats.setup_time += m_profiler.duration();
rsx::thread::end();
}
| 26,382
|
C++
|
.cpp
| 672
| 35.901786
| 152
| 0.710731
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,499
|
fsr_pass.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/GL/upscalers/fsr1/fsr_pass.cpp
|
#include "stdafx.h"
#include "../fsr_pass.h"
#if defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wignored-qualifiers"
#pragma GCC diagnostic ignored "-Wold-style-cast"
#pragma GCC diagnostic ignored "-Wunused-function"
#elif defined(__clang__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wignored-qualifiers"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wunused-function"
#endif
#define A_CPU 1
#include "3rdparty/GPUOpen/include/ffx_a.h"
#include "3rdparty/GPUOpen/include/ffx_fsr1.h"
#undef A_CPU
#if defined(__GNUC__)
#pragma GCC diagnostic pop
#elif defined(__clang__)
#pragma clang diagnostic pop
#endif
namespace gl
{
namespace FidelityFX
{
fsr_pass::fsr_pass(const std::string& config_definitions, u32 push_constants_size)
{
// Just use AMD-provided source with minimal modification
const char* shader_core =
#include "Emu/RSX/Program/Upscalers/FSR1/fsr_ubershader.glsl"
;
// Replacements
const char* ffx_a_contents =
#include "Emu/RSX/Program/Upscalers/FSR1/fsr_ffx_a_flattened.inc"
;
const char* ffx_fsr_contents =
#include "Emu/RSX/Program/Upscalers/FSR1/fsr_ffx_fsr1_flattened.inc"
;
const std::pair<std::string_view, std::string> replacement_table[] =
{
{ "%FFX_DEFINITIONS%", config_definitions },
{ "%FFX_A_IMPORT%", ffx_a_contents },
{ "%FFX_FSR_IMPORT%", ffx_fsr_contents },
{ "layout(set=0,", "layout(" },
{ "%push_block%", fmt::format("binding=%d, std140", GL_COMPUTE_BUFFER_SLOT(0)) }
};
m_src = shader_core;
m_src = fmt::replace_all(m_src, replacement_table);
// Fill with 0 to avoid sending incomplete/unused variables to the GPU
m_constants_buf.resize(utils::rounded_div(push_constants_size, 4), 0);
create();
m_sampler.create();
m_sampler.apply_defaults(GL_LINEAR);
}
fsr_pass::~fsr_pass()
{
m_ubo.remove();
m_sampler.remove();
}
void fsr_pass::bind_resources()
{
// Bind relevant stuff
const u32 push_buffer_size = ::size32(m_constants_buf) * sizeof(m_constants_buf[0]);
if (!m_ubo)
{
ensure(compiled);
m_ubo.create(gl::buffer::target::uniform, push_buffer_size, nullptr, gl::buffer::memory_type::local, gl::buffer::usage::dynamic_update);
// Statically bind the image sources
m_program.uniforms["InputTexture"] = GL_TEMP_IMAGE_SLOT(0);
m_program.uniforms["OutputTexture"] = GL_COMPUTE_IMAGE_SLOT(0);
}
m_ubo.sub_data(0, push_buffer_size, m_constants_buf.data());
m_ubo.bind_range(GL_COMPUTE_BUFFER_SLOT(0), 0, push_buffer_size);
}
void fsr_pass::run(gl::command_context& cmd, gl::texture* src, gl::texture* dst, const size2u& input_size, const size2u& output_size)
{
m_input_image = src;
m_output_image = dst;
m_input_size = input_size;
m_output_size = output_size;
configure();
saved_sampler_state saved(GL_TEMP_IMAGE_SLOT(0), m_sampler);
cmd->bind_texture(GL_TEMP_IMAGE_SLOT(0), GL_TEXTURE_2D, src->id());
glBindImageTexture(GL_COMPUTE_IMAGE_SLOT(0), dst->id(), 0, GL_FALSE, 0, GL_WRITE_ONLY, GL_RGBA8);
constexpr auto wg_size = 16;
const auto invocations_x = utils::aligned_div(output_size.width, wg_size);
const auto invocations_y = utils::aligned_div(output_size.height, wg_size);
ensure(invocations_x == (output_size.width + (wg_size - 1)) / wg_size);
ensure(invocations_y == (output_size.height + (wg_size - 1)) / wg_size);
compute_task::run(cmd, invocations_x, invocations_y);
}
easu_pass::easu_pass()
: fsr_pass(
"#define SAMPLE_EASU 1\n"
"#define SAMPLE_RCAS 0\n"
"#define SAMPLE_BILINEAR 0\n"
"#define SAMPLE_SLOW_FALLBACK 1",
80 // 5*VEC4
)
{}
void easu_pass::configure()
{
// NOTE: Configuration vector 4 is unused as we do not support HDR natively
auto con0 = &m_constants_buf[0];
auto con1 = &m_constants_buf[4];
auto con2 = &m_constants_buf[8];
auto con3 = &m_constants_buf[12];
FsrEasuCon(con0, con1, con2, con3,
static_cast<f32>(m_input_size.width), static_cast<f32>(m_input_size.height), // Incoming viewport size to upscale (actual size)
static_cast<f32>(m_input_image->width()), static_cast<f32>(m_input_image->height()), // Size of the raw image to upscale (in case viewport does not cover it all)
static_cast<f32>(m_output_size.width), static_cast<f32>(m_output_size.height)); // Size of output viewport (target size)
}
rcas_pass::rcas_pass()
: fsr_pass(
"#define SAMPLE_RCAS 1\n"
"#define SAMPLE_EASU 0\n"
"#define SAMPLE_BILINEAR 0\n"
"#define SAMPLE_SLOW_FALLBACK 1",
32 // 2*VEC4
)
{}
void rcas_pass::configure()
{
// 0 is actually the sharpest with 2 being the chosen limit. Each progressive unit 'halves' the sharpening intensity.
auto cas_attenuation = 2.f - (g_cfg.video.vk.rcas_sharpening_intensity / 50.f);
FsrRcasCon(&m_constants_buf[0], cas_attenuation);
}
}
fsr_upscale_pass::~fsr_upscale_pass()
{
dispose_images();
}
void fsr_upscale_pass::dispose_images()
{
m_output_left.reset();
m_output_right.reset();
m_intermediate_data.reset();
m_flip_fbo.remove();
}
void fsr_upscale_pass::initialize_image(u32 output_w, u32 output_h, rsx::flags32_t mode)
{
dispose_images();
const auto initialize_image_impl = [&]()
{
return std::make_unique<gl::viewable_image>(
GL_TEXTURE_2D,
output_w, output_h, 1, 1,
GL_RGBA8, RSX_FORMAT_CLASS_COLOR);
};
if (mode & UPSCALE_LEFT_VIEW)
{
m_output_left = initialize_image_impl();
}
if (mode & UPSCALE_RIGHT_VIEW)
{
m_output_right = initialize_image_impl();
}
m_intermediate_data = initialize_image_impl();
}
gl::texture* fsr_upscale_pass::scale_output(
gl::command_context& cmd,
gl::texture* src,
const areai& src_region,
const areai& dst_region,
gl::flags32_t mode)
{
size2u input_size, output_size;
input_size.width = src_region.width();
input_size.height = src_region.height();
output_size.width = dst_region.width();
output_size.height = dst_region.height();
auto src_image = src;
auto input_region = src_region;
if (input_size.width < output_size.width && input_size.height < output_size.height)
{
// Cannot upscale both LEFT and RIGHT images at the same time.
// Default maps to LEFT for simplicity
ensure((mode & (UPSCALE_LEFT_VIEW | UPSCALE_RIGHT_VIEW)) != (UPSCALE_LEFT_VIEW | UPSCALE_RIGHT_VIEW));
auto& m_output_data = (mode & UPSCALE_LEFT_VIEW) ? m_output_left : m_output_right;
if (!m_output_data || m_output_data->width() != output_size.width || m_output_data->height() != output_size.height)
{
initialize_image(output_size.width, output_size.height, mode);
}
if (m_output_data)
{
// Execute the pass here
auto cs_easu_task = gl::get_compute_task<gl::FidelityFX::easu_pass>();
auto cs_rcas_task = gl::get_compute_task<gl::FidelityFX::rcas_pass>();
// EASU
cs_easu_task->run(cmd, src, m_intermediate_data.get(), input_size, output_size);
// RCAS
cs_rcas_task->run(cmd, m_intermediate_data.get(), m_output_data.get(), input_size, output_size);
// Swap input for FSR target
src_image = m_output_data.get();
input_region.x1 = 0;
input_region.x2 = src_image->width();
input_region.y1 = 0;
input_region.y2 = src_image->height();
}
}
if (mode & UPSCALE_AND_COMMIT)
{
m_flip_fbo.recreate();
m_flip_fbo.bind();
m_flip_fbo.color = src_image->id();
m_flip_fbo.read_buffer(m_flip_fbo.color);
m_flip_fbo.draw_buffer(m_flip_fbo.color);
m_flip_fbo.blit(gl::screen, input_region, dst_region, gl::buffers::color, gl::filter::linear);
return 0;
}
return src_image;
}
}
| 7,989
|
C++
|
.cpp
| 214
| 32.443925
| 170
| 0.665495
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,500
|
program.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/GL/glutils/program.cpp
|
#include "stdafx.h"
#include "program.h"
#include "state_tracker.hpp"
#include "Emu/system_config.h"
namespace gl
{
namespace glsl
{
void patch_macros_INTEL(std::string& source)
{
auto read_token = [&source](size_t start) -> std::tuple<size_t, size_t>
{
size_t string_begin = std::string::npos, i = start;
for (size_t count = 0; i < source.length(); ++i)
{
const auto& c = source[i];
const auto is_space = std::isspace(c);
if (string_begin == std::string::npos)
{
if (c == '\n') break;
if (is_space) continue;
string_begin = i;
}
if (is_space)
{
if (!count) break;
}
else if (c == '(')
{
count++;
}
else if (c == ')')
{
count--;
}
}
return std::make_tuple(string_begin, i - 1);
};
auto is_exempt = [&source](const std::string_view& token) -> bool
{
const char* handled_keywords[] =
{
"SSBO_LOCATION(x)",
"UBO_LOCATION(x)",
"IMAGE_LOCATION(x)"
};
for (const auto& keyword : handled_keywords)
{
if (token.starts_with(keyword))
{
return false;
}
}
return true;
};
size_t prev_loc = 0;
while (true)
{
// Find macro define blocks and remove the outer-most brackets around the expression part
const auto next_loc = source.find("#define", prev_loc);
if (next_loc == std::string::npos)
{
break;
}
prev_loc = next_loc + 1;
const auto [name_start, name_end] = read_token(next_loc + ("#define"sv).length());
if (name_start == std::string::npos)
{
break;
}
const auto macro_name = std::string_view(source.data() + name_start, (name_end - name_start) + 1);
if (is_exempt(macro_name))
{
continue;
}
const auto [expr_start, expr_end] = read_token(name_end + 1);
if (expr_start == std::string::npos)
{
continue;
}
if (source[expr_start] == '(' && source[expr_end] == ')')
{
rsx_log.notice("[Compiler warning] We'll remove brackets around the expression named '%s'. Add it to exclusion list if this is not desired.", macro_name);
source[expr_start] = ' ';
source[expr_end] = ' ';
}
}
}
void shader::precompile()
{
if (gl::get_driver_caps().vendor_INTEL)
{
// Workaround for broken macro expansion.
patch_macros_INTEL(source);
}
const char* str = source.c_str();
const GLint length = ::narrow<GLint>(source.length());
if (g_cfg.video.log_programs)
{
std::string base_name;
switch (type)
{
case ::glsl::program_domain::glsl_vertex_program:
base_name = "shaderlog/VertexProgram";
break;
case ::glsl::program_domain::glsl_fragment_program:
base_name = "shaderlog/FragmentProgram";
break;
case ::glsl::program_domain::glsl_compute_program:
base_name = "shaderlog/ComputeProgram";
break;
}
fs::write_file(fs::get_cache_dir() + base_name + std::to_string(m_id) + ".glsl", fs::rewrite, str, length);
}
glShaderSource(m_id, 1, &str, &length);
m_init_fence.create();
flush_command_queue(m_init_fence);
}
void shader::create(::glsl::program_domain type_, const std::string & src)
{
type = type_;
source = src;
GLenum shader_type{};
switch (type)
{
case ::glsl::program_domain::glsl_vertex_program:
shader_type = GL_VERTEX_SHADER;
break;
case ::glsl::program_domain::glsl_fragment_program:
shader_type = GL_FRAGMENT_SHADER;
break;
case ::glsl::program_domain::glsl_compute_program:
shader_type = GL_COMPUTE_SHADER;
break;
default:
rsx_log.fatal("gl::glsl::shader::compile(): Unhandled shader type (%d)", +type_);
return;
}
m_id = glCreateShader(shader_type);
precompile();
}
shader& shader::compile()
{
std::lock_guard lock(m_compile_lock);
if (m_is_compiled)
{
// Another thread compiled this already
return *this;
}
ensure(!m_init_fence.is_empty()); // Do not attempt to compile a shader_view!!
m_init_fence.server_wait_sync();
glCompileShader(m_id);
GLint status = GL_FALSE;
glGetShaderiv(m_id, GL_COMPILE_STATUS, &status);
if (status == GL_FALSE)
{
GLint length = 0;
glGetShaderiv(m_id, GL_INFO_LOG_LENGTH, &length);
std::string error_msg;
if (length)
{
std::unique_ptr<GLchar[]> buf(new char[length + 1]);
glGetShaderInfoLog(m_id, length, nullptr, buf.get());
error_msg = buf.get();
}
rsx_log.fatal("Compilation failed: %s\nsource: %s", error_msg, source);
}
m_compiled_fence.create();
flush_command_queue(m_compiled_fence);
m_is_compiled = true;
return *this;
}
bool program::uniforms_t::has_location(const std::string & name, int* location)
{
auto found = locations.find(name);
if (found != locations.end())
{
if (location)
{
*location = found->second;
}
return (found->second >= 0);
}
auto result = glGetUniformLocation(m_program.id(), name.c_str());
locations[name] = result;
if (location)
{
*location = result;
}
return (result >= 0);
}
GLint program::uniforms_t::location(const std::string& name)
{
auto found = locations.find(name);
if (found != locations.end())
{
if (found->second >= 0)
{
return found->second;
}
else
{
rsx_log.fatal("%s not found.", name);
return -1;
}
}
auto result = glGetUniformLocation(m_program.id(), name.c_str());
if (result < 0)
{
rsx_log.fatal("%s not found.", name);
return result;
}
locations[name] = result;
return result;
}
void program::link(std::function<void(program*)> init_func)
{
glLinkProgram(m_id);
GLint status = GL_FALSE;
glGetProgramiv(m_id, GL_LINK_STATUS, &status);
if (status == GL_FALSE)
{
GLint length = 0;
glGetProgramiv(m_id, GL_INFO_LOG_LENGTH, &length);
std::string error_msg;
if (length)
{
std::unique_ptr<GLchar[]> buf(new char[length + 1]);
glGetProgramInfoLog(m_id, length, nullptr, buf.get());
error_msg = buf.get();
}
rsx_log.fatal("Linkage failed: %s", error_msg);
}
else
{
if (init_func)
{
init_func(this);
}
m_fence.create();
flush_command_queue(m_fence);
}
}
void program::validate()
{
glValidateProgram(m_id);
GLint status = GL_FALSE;
glGetProgramiv(m_id, GL_VALIDATE_STATUS, &status);
if (status == GL_FALSE)
{
GLint length = 0;
glGetProgramiv(m_id, GL_INFO_LOG_LENGTH, &length);
std::string error_msg;
if (length)
{
std::unique_ptr<GLchar[]> buf(new char[length + 1]);
glGetProgramInfoLog(m_id, length, nullptr, buf.get());
error_msg = buf.get();
}
rsx_log.error("Validation failed: %s", error_msg.c_str());
}
}
}
}
| 7,185
|
C++
|
.cpp
| 264
| 21.215909
| 160
| 0.587406
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,501
|
fbo.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/GL/glutils/fbo.cpp
|
#include "stdafx.h"
#include "fbo.h"
#include "buffer_object.h"
#include "vao.hpp"
#include "Emu/RSX/Common/simple_array.hpp"
namespace gl
{
const fbo screen{};
void fbo::create()
{
glGenFramebuffers(1, &m_id);
}
void fbo::bind() const
{
glBindFramebuffer(GL_FRAMEBUFFER, m_id);
}
void fbo::blit(const fbo& dst, areai src_area, areai dst_area, buffers buffers_, filter filter_) const
{
bind_as(target::read_frame_buffer);
dst.bind_as(target::draw_frame_buffer);
glBlitFramebuffer(
src_area.x1, src_area.y1, src_area.x2, src_area.y2,
dst_area.x1, dst_area.y1, dst_area.x2, dst_area.y2,
static_cast<GLbitfield>(buffers_), static_cast<GLenum>(filter_));
}
void fbo::bind_as(target target_) const
{
glBindFramebuffer(static_cast<int>(target_), id());
}
void fbo::remove()
{
if (m_id != GL_NONE)
{
glDeleteFramebuffers(1, &m_id);
m_id = GL_NONE;
}
}
bool fbo::created() const
{
return m_id != GL_NONE;
}
bool fbo::check() const
{
GLenum status = DSA_CALL2_RET(CheckNamedFramebufferStatus, m_id, GL_FRAMEBUFFER);
if (status != GL_FRAMEBUFFER_COMPLETE)
{
rsx_log.error("FBO check failed: 0x%04x", status);
return false;
}
return true;
}
void fbo::recreate()
{
if (created())
remove();
create();
}
void fbo::draw_buffer(const attachment& buffer) const
{
GLenum buf = buffer.id();
DSA_CALL3(NamedFramebufferDrawBuffers, FramebufferDrawBuffers, m_id, 1, &buf);
}
void fbo::draw_buffer(swapchain_buffer buffer) const
{
GLenum buf = static_cast<GLenum>(buffer);
DSA_CALL3(NamedFramebufferDrawBuffers, FramebufferDrawBuffers, m_id, 1, &buf);
}
void fbo::draw_buffers(const std::initializer_list<attachment>& indexes) const
{
rsx::simple_array<GLenum> ids;
ids.reserve(::size32(indexes));
for (auto& index : indexes)
ids.push_back(index.id());
DSA_CALL3(NamedFramebufferDrawBuffers, FramebufferDrawBuffers, m_id, static_cast<GLsizei>(ids.size()), ids.data());
}
void fbo::read_buffer(const attachment& buffer) const
{
DSA_CALL3(NamedFramebufferReadBuffer, FramebufferReadBuffer, m_id, buffer.id());
}
void fbo::read_buffer(swapchain_buffer buffer) const
{
GLenum buf = static_cast<GLenum>(buffer);
DSA_CALL3(NamedFramebufferReadBuffer, FramebufferReadBuffer, m_id, buf);
}
void fbo::draw_arrays(GLenum mode, GLsizei count, GLint first) const
{
save_binding_state save(*this);
glDrawArrays(mode, first, count);
}
void fbo::draw_arrays(const buffer& buffer, GLenum mode, GLsizei count, GLint first) const
{
buffer.bind(buffer::target::array);
draw_arrays(mode, count, first);
}
void fbo::draw_arrays(const vao& buffer, GLenum mode, GLsizei count, GLint first) const
{
buffer.bind();
draw_arrays(mode, count, first);
}
void fbo::draw_elements(GLenum mode, GLsizei count, indices_type type, const GLvoid* indices) const
{
save_binding_state save(*this);
glDrawElements(mode, count, static_cast<GLenum>(type), indices);
}
void fbo::draw_elements(const buffer& buffer, GLenum mode, GLsizei count, indices_type type, const GLvoid* indices) const
{
buffer.bind(buffer::target::array);
glDrawElements(mode, count, static_cast<GLenum>(type), indices);
}
void fbo::draw_elements(GLenum mode, GLsizei count, indices_type type, const buffer& indices, usz indices_buffer_offset) const
{
indices.bind(buffer::target::element_array);
glDrawElements(mode, count, static_cast<GLenum>(type), reinterpret_cast<GLvoid*>(indices_buffer_offset));
}
void fbo::draw_elements(const buffer& buffer_, GLenum mode, GLsizei count, indices_type type, const buffer& indices, usz indices_buffer_offset) const
{
buffer_.bind(buffer::target::array);
draw_elements(mode, count, type, indices, indices_buffer_offset);
}
void fbo::draw_elements(GLenum mode, GLsizei count, const GLubyte* indices) const
{
draw_elements(mode, count, indices_type::ubyte, indices);
}
void fbo::draw_elements(const buffer& buffer, GLenum mode, GLsizei count, const GLubyte* indices) const
{
draw_elements(buffer, mode, count, indices_type::ubyte, indices);
}
void fbo::draw_elements(GLenum mode, GLsizei count, const GLushort* indices) const
{
draw_elements(mode, count, indices_type::ushort, indices);
}
void fbo::draw_elements(const buffer& buffer, GLenum mode, GLsizei count, const GLushort* indices) const
{
draw_elements(buffer, mode, count, indices_type::ushort, indices);
}
void fbo::draw_elements(GLenum mode, GLsizei count, const GLuint* indices) const
{
draw_elements(mode, count, indices_type::uint, indices);
}
void fbo::draw_elements(const buffer& buffer, GLenum mode, GLsizei count, const GLuint* indices) const
{
draw_elements(buffer, mode, count, indices_type::uint, indices);
}
void fbo::clear(buffers buffers_) const
{
save_binding_state save(*this);
glClear(static_cast<GLbitfield>(buffers_));
}
void fbo::copy_from(const void* pixels, const sizei& size, gl::texture::format format_, gl::texture::type type_, class pixel_unpack_settings pixel_settings) const
{
save_binding_state save(*this);
pixel_settings.apply();
glDrawPixels(size.width, size.height, static_cast<GLenum>(format_), static_cast<GLenum>(type_), pixels);
}
void fbo::copy_from(const buffer& buf, const sizei& size, gl::texture::format format_, gl::texture::type type_, class pixel_unpack_settings pixel_settings) const
{
save_binding_state save(*this);
buffer::save_binding_state save_buffer(buffer::target::pixel_unpack, buf);
pixel_settings.apply();
glDrawPixels(size.width, size.height, static_cast<GLenum>(format_), static_cast<GLenum>(type_), nullptr);
}
void fbo::copy_to(void* pixels, coordi coord, gl::texture::format format_, gl::texture::type type_, class pixel_pack_settings pixel_settings) const
{
save_binding_state save(*this);
pixel_settings.apply();
glReadPixels(coord.x, coord.y, coord.width, coord.height, static_cast<GLenum>(format_), static_cast<GLenum>(type_), pixels);
}
void fbo::copy_to(const buffer& buf, coordi coord, gl::texture::format format_, gl::texture::type type_, class pixel_pack_settings pixel_settings) const
{
save_binding_state save(*this);
buffer::save_binding_state save_buffer(buffer::target::pixel_pack, buf);
pixel_settings.apply();
glReadPixels(coord.x, coord.y, coord.width, coord.height, static_cast<GLenum>(format_), static_cast<GLenum>(type_), nullptr);
}
fbo fbo::get_bound_draw_buffer()
{
GLint value;
glGetIntegerv(GL_DRAW_FRAMEBUFFER_BINDING, &value);
return{ static_cast<GLuint>(value) };
}
fbo fbo::get_bound_read_buffer()
{
GLint value;
glGetIntegerv(GL_READ_FRAMEBUFFER_BINDING, &value);
return{ static_cast<GLuint>(value) };
}
fbo fbo::get_bound_buffer()
{
GLint value;
glGetIntegerv(GL_FRAMEBUFFER_BINDING, &value);
return{ static_cast<GLuint>(value) };
}
GLuint fbo::id() const
{
return m_id;
}
void fbo::set_id(GLuint id)
{
m_id = id;
}
void fbo::set_extents(const size2i& extents)
{
m_size = extents;
}
size2i fbo::get_extents() const
{
return m_size;
}
bool fbo::matches(const std::array<GLuint, 4>& color_targets, GLuint depth_stencil_target) const
{
for (u32 index = 0; index < 4; ++index)
{
if (color[index].resource_id() != color_targets[index])
{
return false;
}
}
const auto depth_resource = depth.resource_id() | depth_stencil.resource_id();
return (depth_resource == depth_stencil_target);
}
bool fbo::references_any(const std::vector<GLuint>& resources) const
{
return std::any_of(m_resource_bindings.cbegin(), m_resource_bindings.cend(), [&resources](const auto& e)
{
return std::find(resources.cbegin(), resources.cend(), e.second) != resources.cend();
});
}
}
| 8,016
|
C++
|
.cpp
| 228
| 31.232456
| 164
| 0.706609
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,502
|
ring_buffer.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/GL/glutils/ring_buffer.cpp
|
#include "stdafx.h"
#include "ring_buffer.h"
namespace gl
{
void ring_buffer::recreate(GLsizeiptr size, const void* data)
{
if (m_id)
{
m_fence.wait_for_signal();
remove();
}
buffer::create();
save_binding_state save(current_target(), *this);
GLbitfield buffer_storage_flags = GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT | GL_MAP_COHERENT_BIT;
if (gl::get_driver_caps().vendor_MESA) buffer_storage_flags |= GL_CLIENT_STORAGE_BIT;
DSA_CALL2(NamedBufferStorage, m_id, size, data, buffer_storage_flags);
m_memory_mapping = DSA_CALL2_RET(MapNamedBufferRange, m_id, 0, size, GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT | GL_MAP_COHERENT_BIT);
ensure(m_memory_mapping != nullptr);
m_data_loc = 0;
m_size = ::narrow<u32>(size);
m_memory_type = memory_type::host_visible;
}
void ring_buffer::create(target target_, GLsizeiptr size, const void* data_)
{
m_target = target_;
recreate(size, data_);
}
std::pair<void*, u32> ring_buffer::alloc_from_heap(u32 alloc_size, u16 alignment)
{
u32 offset = m_data_loc;
if (m_data_loc) offset = utils::align(offset, alignment);
if ((offset + alloc_size) > m_size)
{
if (!m_fence.is_empty())
{
m_fence.wait_for_signal();
}
else
{
rsx_log.error("OOM Error: Ring buffer was likely being used without notify() being called");
glFinish();
}
m_data_loc = 0;
offset = 0;
}
//Align data loc to 256; allows some "guard" region so we dont trample our own data inadvertently
m_data_loc = utils::align(offset + alloc_size, 256);
return std::make_pair(static_cast<char*>(m_memory_mapping) + offset, offset);
}
void ring_buffer::remove()
{
if (m_memory_mapping)
{
buffer::unmap();
m_memory_mapping = nullptr;
m_data_loc = 0;
m_size = 0;
}
buffer::remove();
}
void ring_buffer::notify()
{
//Insert fence about 25% into the buffer
if (m_fence.is_empty() && (m_data_loc > (m_size >> 2)))
m_fence.reset();
}
// Legacy ring buffer - used when ARB_buffer_storage is not available, OR when capturing with renderdoc
void legacy_ring_buffer::recreate(GLsizeiptr size, const void* data)
{
if (m_id)
remove();
buffer::create();
buffer::data(size, data, GL_DYNAMIC_DRAW);
m_memory_type = memory_type::host_visible;
m_memory_mapping = nullptr;
m_data_loc = 0;
m_size = ::narrow<u32>(size);
}
void legacy_ring_buffer::create(target target_, GLsizeiptr size, const void* data_)
{
m_target = target_;
recreate(size, data_);
}
void legacy_ring_buffer::reserve_storage_on_heap(u32 alloc_size)
{
ensure(m_memory_mapping == nullptr);
u32 offset = m_data_loc;
if (m_data_loc) offset = utils::align(offset, 256);
const u32 block_size = utils::align(alloc_size + 16, 256); //Overallocate just in case we need to realign base
if ((offset + block_size) > m_size)
{
buffer::data(m_size, nullptr, GL_DYNAMIC_DRAW);
m_data_loc = 0;
}
m_memory_mapping = DSA_CALL2_RET(MapNamedBufferRange, m_id, m_data_loc, block_size, GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_RANGE_BIT | GL_MAP_UNSYNCHRONIZED_BIT);
m_mapped_bytes = block_size;
m_mapping_offset = m_data_loc;
m_alignment_offset = 0;
//When using debugging tools, the mapped base might not be aligned as expected
const u64 mapped_address_base = reinterpret_cast<u64>(m_memory_mapping);
if (mapped_address_base & 0xF)
{
//Unaligned result was returned. We have to modify the base address a bit
//We lose some memory here, but the 16 byte overallocation above makes up for it
const u64 new_base = (mapped_address_base & ~0xF) + 16;
const u64 diff_bytes = new_base - mapped_address_base;
m_memory_mapping = reinterpret_cast<void*>(new_base);
m_mapped_bytes -= ::narrow<u32>(diff_bytes);
m_alignment_offset = ::narrow<u32>(diff_bytes);
}
ensure(m_mapped_bytes >= alloc_size);
}
std::pair<void*, u32> legacy_ring_buffer::alloc_from_heap(u32 alloc_size, u16 alignment)
{
u32 offset = m_data_loc;
if (m_data_loc) offset = utils::align(offset, alignment);
u32 padding = (offset - m_data_loc);
u32 real_size = utils::align(padding + alloc_size, alignment); //Ensures we leave the loc pointer aligned after we exit
if (real_size > m_mapped_bytes)
{
//Missed allocation. We take a performance hit on doing this.
//Overallocate slightly for the next allocation if requested size is too small
unmap();
reserve_storage_on_heap(std::max(real_size, 4096U));
offset = m_data_loc;
if (m_data_loc) offset = utils::align(offset, alignment);
padding = (offset - m_data_loc);
real_size = utils::align(padding + alloc_size, alignment);
}
m_data_loc = offset + real_size;
m_mapped_bytes -= real_size;
u32 local_offset = (offset - m_mapping_offset);
return std::make_pair(static_cast<char*>(m_memory_mapping) + local_offset, offset + m_alignment_offset);
}
void legacy_ring_buffer::remove()
{
ring_buffer::remove();
m_mapped_bytes = 0;
}
void legacy_ring_buffer::unmap()
{
buffer::unmap();
m_memory_mapping = nullptr;
m_mapped_bytes = 0;
m_mapping_offset = 0;
}
// AMD persistent mapping workaround for driver-assisted flushing
void* transient_ring_buffer::map_internal(u32 offset, u32 length)
{
flush();
dirty = true;
return DSA_CALL2_RET(MapNamedBufferRange, m_id, offset, length, GL_MAP_WRITE_BIT | GL_MAP_UNSYNCHRONIZED_BIT);
}
void transient_ring_buffer::bind()
{
flush();
buffer::bind();
}
void transient_ring_buffer::recreate(GLsizeiptr size, const void* data)
{
if (m_id)
{
m_fence.wait_for_signal();
remove();
}
buffer::create();
save_binding_state save(current_target(), *this);
DSA_CALL2(NamedBufferStorage, m_id, size, data, GL_MAP_WRITE_BIT);
m_data_loc = 0;
m_size = ::narrow<u32>(size);
m_memory_type = memory_type::host_visible;
}
std::pair<void*, u32> transient_ring_buffer::alloc_from_heap(u32 alloc_size, u16 alignment)
{
ensure(m_memory_mapping == nullptr);
const auto allocation = ring_buffer::alloc_from_heap(alloc_size, alignment);
return { map_internal(allocation.second, alloc_size), allocation.second };
}
void transient_ring_buffer::flush()
{
if (dirty)
{
buffer::unmap();
dirty = false;
}
}
void transient_ring_buffer::unmap()
{
flush();
}
scratch_ring_buffer::~scratch_ring_buffer()
{
if (m_storage)
{
remove();
}
}
void scratch_ring_buffer::create(buffer::target target_, u64 size, u32 usage_flags)
{
if (m_storage)
{
remove();
}
m_storage.create(target_, size, nullptr, gl::buffer::memory_type::local, usage_flags);
}
void scratch_ring_buffer::remove()
{
if (m_storage)
{
m_storage.remove();
}
m_barriers.clear();
m_alloc_pointer = 0;
}
u32 scratch_ring_buffer::alloc(u32 size, u32 alignment)
{
u64 start = utils::align(m_alloc_pointer, alignment);
m_alloc_pointer = (start + size);
if (static_cast<GLsizeiptr>(m_alloc_pointer) > m_storage.size())
{
start = 0;
m_alloc_pointer = size;
}
pop_barrier(static_cast<u32>(start), size);
return static_cast<u32>(start);
}
void scratch_ring_buffer::pop_barrier(u32 start, u32 length)
{
const auto range = utils::address_range::start_length(start, length);
m_barriers.erase(std::remove_if(m_barriers.begin(), m_barriers.end(), [&range](auto& barrier_)
{
if (barrier_.range.overlaps(range))
{
barrier_.signal.server_wait_sync();
barrier_.signal.destroy();
return true;
}
return false;
}), m_barriers.end());
}
void scratch_ring_buffer::push_barrier(u32 start, u32 length)
{
if (!length)
{
return;
}
barrier barrier_;
barrier_.range = utils::address_range::start_length(start, length);
barrier_.signal.create();
m_barriers.emplace_back(barrier_);
}
}
| 8,070
|
C++
|
.cpp
| 252
| 27.65873
| 163
| 0.668484
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,503
|
blitter.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/GL/glutils/blitter.cpp
|
#include "stdafx.h"
#include "blitter.h"
#include "state_tracker.hpp"
#include "../GLTexture.h" // TODO: This system also needs to be refactored
#include "../GLOverlays.h"
namespace gl
{
blitter* g_hw_blitter = nullptr;
void blitter::copy_image(gl::command_context&, const texture* src, const texture* dst, int src_level, int dst_level, const position3i& src_offset, const position3i& dst_offset, const size3i& size) const
{
ensure(src_level == 0);
// Typeless bypass for BGRA8
std::unique_ptr<gl::texture> temp_image;
const texture* real_src = src;
glCopyImageSubData(real_src->id(), static_cast<GLenum>(real_src->get_target()), src_level,
src_offset.x, src_offset.y, src_offset.z,
dst->id(), static_cast<GLenum>(dst->get_target()), dst_level,
dst_offset.x, dst_offset.y, dst_offset.z, size.width, size.height, size.depth);
}
void blitter::scale_image(gl::command_context& cmd, const texture* src, texture* dst, areai src_rect, areai dst_rect,
bool linear_interpolation, const rsx::typeless_xfer& xfer_info)
{
std::unique_ptr<texture> typeless_src;
std::unique_ptr<texture> typeless_dst;
const gl::texture* real_src = src;
const gl::texture* real_dst = dst;
// Optimization pass; check for pass-through data transfer
if (!xfer_info.flip_horizontal && !xfer_info.flip_vertical && src_rect.height() == dst_rect.height())
{
auto src_w = src_rect.width();
auto dst_w = dst_rect.width();
if (xfer_info.src_is_typeless) src_w = static_cast<int>(src_w * xfer_info.src_scaling_hint);
if (xfer_info.dst_is_typeless) dst_w = static_cast<int>(dst_w * xfer_info.dst_scaling_hint);
if (src_w == dst_w)
{
// Final dimensions are a match
if (xfer_info.src_is_typeless || xfer_info.dst_is_typeless)
{
const coord3i src_region = { { src_rect.x1, src_rect.y1, 0 }, { src_rect.width(), src_rect.height(), 1 } };
const coord3i dst_region = { { dst_rect.x1, dst_rect.y1, 0 }, { dst_rect.width(), dst_rect.height(), 1 } };
gl::copy_typeless(cmd, dst, src, static_cast<coord3u>(dst_region), static_cast<coord3u>(src_region));
}
else
{
copy_image(cmd, src, dst, 0, 0, position3i{ src_rect.x1, src_rect.y1, 0u }, position3i{ dst_rect.x1, dst_rect.y1, 0 }, size3i{ src_rect.width(), src_rect.height(), 1 });
}
return;
}
}
if (xfer_info.src_is_typeless)
{
const auto internal_fmt = xfer_info.src_native_format_override ?
GLenum(xfer_info.src_native_format_override) :
get_sized_internal_format(xfer_info.src_gcm_format);
if (static_cast<gl::texture::internal_format>(internal_fmt) != src->get_internal_format())
{
const u16 internal_width = static_cast<u16>(src->width() * xfer_info.src_scaling_hint);
typeless_src = std::make_unique<texture>(GL_TEXTURE_2D, internal_width, src->height(), 1, 1, internal_fmt);
copy_typeless(cmd, typeless_src.get(), src);
real_src = typeless_src.get();
src_rect.x1 = static_cast<u16>(src_rect.x1 * xfer_info.src_scaling_hint);
src_rect.x2 = static_cast<u16>(src_rect.x2 * xfer_info.src_scaling_hint);
}
}
if (xfer_info.dst_is_typeless)
{
const auto internal_fmt = xfer_info.dst_native_format_override ?
GLenum(xfer_info.dst_native_format_override) :
get_sized_internal_format(xfer_info.dst_gcm_format);
if (static_cast<gl::texture::internal_format>(internal_fmt) != dst->get_internal_format())
{
const auto internal_width = static_cast<u16>(dst->width() * xfer_info.dst_scaling_hint);
typeless_dst = std::make_unique<texture>(GL_TEXTURE_2D, internal_width, dst->height(), 1, 1, internal_fmt);
copy_typeless(cmd, typeless_dst.get(), dst);
real_dst = typeless_dst.get();
dst_rect.x1 = static_cast<u16>(dst_rect.x1 * xfer_info.dst_scaling_hint);
dst_rect.x2 = static_cast<u16>(dst_rect.x2 * xfer_info.dst_scaling_hint);
}
}
ensure(real_src->aspect() == real_dst->aspect());
if (xfer_info.flip_horizontal)
{
src_rect.flip_horizontal();
}
if (xfer_info.flip_vertical)
{
src_rect.flip_vertical();
}
if (src_rect.width() == dst_rect.width() &&
src_rect.height() == dst_rect.height() &&
!src_rect.is_flipped() && !dst_rect.is_flipped())
{
copy_image(cmd, real_src, real_dst, 0, 0, position3i{ src_rect.x1, src_rect.y1, 0 }, position3i{ dst_rect.x1, dst_rect.y1, 0 }, size3i{ src_rect.width(), src_rect.height(), 1 });
}
else
{
const bool is_depth_copy = (real_src->aspect() != image_aspect::color);
const filter interp = (linear_interpolation && !is_depth_copy) ? filter::linear : filter::nearest;
gl::fbo::attachment::type attachment;
gl::buffers target;
if (is_depth_copy)
{
if (real_dst->aspect() & gl::image_aspect::stencil)
{
attachment = fbo::attachment::type::depth_stencil;
target = gl::buffers::depth_stencil;
}
else
{
attachment = fbo::attachment::type::depth;
target = gl::buffers::depth;
}
}
else
{
attachment = fbo::attachment::type::color;
target = gl::buffers::color;
}
cmd->disable(GL_SCISSOR_TEST);
save_binding_state saved;
gl::fbo::attachment src_att{ blit_src, static_cast<fbo::attachment::type>(attachment) };
src_att = *real_src;
gl::fbo::attachment dst_att{ blit_dst, static_cast<fbo::attachment::type>(attachment) };
dst_att = *real_dst;
blit_src.blit(blit_dst, src_rect, dst_rect, target, interp);
// Release the attachments explicitly (not doing so causes glitches, e.g Journey Menu)
src_att = GL_NONE;
dst_att = GL_NONE;
}
if (xfer_info.dst_is_typeless)
{
// Transfer contents from typeless dst back to original dst
copy_typeless(cmd, dst, typeless_dst.get());
}
}
void blitter::fast_clear_image(gl::command_context& cmd, const texture* dst, const color4f& color)
{
save_binding_state saved;
blit_dst.bind();
blit_dst.color[0] = *dst;
blit_dst.check();
cmd->clear_color(color);
cmd->color_maski(0, true, true, true, true);
glClear(GL_COLOR_BUFFER_BIT);
blit_dst.color[0] = GL_NONE;
}
void blitter::fast_clear_image(gl::command_context& cmd, const texture* dst, float /*depth*/, u8 /*stencil*/)
{
fbo::attachment::type attachment;
GLbitfield clear_mask;
switch (const auto fmt = dst->get_internal_format())
{
case texture::internal_format::depth16:
case texture::internal_format::depth32f:
clear_mask = GL_DEPTH_BUFFER_BIT;
attachment = fbo::attachment::type::depth;
break;
case texture::internal_format::depth24_stencil8:
case texture::internal_format::depth32f_stencil8:
clear_mask = GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT;
attachment = fbo::attachment::type::depth_stencil;
break;
default:
fmt::throw_exception("Invalid texture passed to clear depth function, format=0x%x", static_cast<u32>(fmt));
}
save_binding_state saved;
fbo::attachment attach_point{ blit_dst, attachment };
blit_dst.bind();
attach_point = *dst;
blit_dst.check();
cmd->depth_mask(GL_TRUE);
cmd->stencil_mask(0xFF);
glClear(clear_mask);
attach_point = GL_NONE;
}
}
| 7,284
|
C++
|
.cpp
| 176
| 36.323864
| 204
| 0.664818
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,504
|
common.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/GL/glutils/common.cpp
|
#include "state_tracker.hpp"
#include "vao.hpp"
namespace gl
{
static thread_local bool s_tls_primary_context_thread = false;
static gl::driver_state* s_current_state = nullptr;
void set_primary_context_thread(bool value)
{
s_tls_primary_context_thread = value;
}
bool is_primary_context_thread()
{
return s_tls_primary_context_thread;
}
void set_command_context(gl::command_context& ctx)
{
s_current_state = ctx.operator->();
}
void set_command_context(gl::driver_state& ctx)
{
s_current_state = &ctx;
}
gl::command_context get_command_context()
{
return { *s_current_state };
}
attrib_t vao::operator[](u32 index) const noexcept
{
return attrib_t(index);
}
}
| 738
|
C++
|
.cpp
| 31
| 20.322581
| 64
| 0.695527
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,505
|
image.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/GL/glutils/image.cpp
|
#include "stdafx.h"
#include "image.h"
#include "buffer_object.h"
#include "state_tracker.hpp"
#include "pixel_settings.hpp"
namespace gl
{
static GLenum sizedfmt_to_ifmt(GLenum sized)
{
switch (sized)
{
case GL_BGRA8:
return GL_RGBA8;
case GL_BGR5_A1:
return GL_RGB5_A1;
default:
return sized;
}
}
texture::texture(GLenum target, GLuint width, GLuint height, GLuint depth, GLuint mipmaps, GLenum sized_format, rsx::format_class format_class)
{
glGenTextures(1, &m_id);
// Must bind to initialize the new texture
gl::get_command_context()->bind_texture(GL_TEMP_IMAGE_SLOT(0), target, m_id, GL_TRUE);
const GLenum storage_fmt = sizedfmt_to_ifmt(sized_format);
switch (target)
{
default:
fmt::throw_exception("Invalid image target 0x%X", target);
case GL_TEXTURE_1D:
glTexStorage1D(target, mipmaps, storage_fmt, width);
height = depth = 1;
break;
case GL_TEXTURE_2D:
case GL_TEXTURE_CUBE_MAP:
glTexStorage2D(target, mipmaps, storage_fmt, width, height);
depth = 1;
break;
case GL_TEXTURE_3D:
case GL_TEXTURE_2D_ARRAY:
glTexStorage3D(target, mipmaps, storage_fmt, width, height, depth);
break;
case GL_TEXTURE_BUFFER:
break;
}
if (target != GL_TEXTURE_BUFFER)
{
glTexParameteri(target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(target, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(target, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(target, GL_TEXTURE_WRAP_R, GL_REPEAT);
glTexParameteri(target, GL_TEXTURE_BASE_LEVEL, 0);
glTexParameteri(target, GL_TEXTURE_MAX_LEVEL, mipmaps - 1);
m_width = width;
m_height = height;
m_depth = depth;
m_mipmaps = mipmaps;
m_aspect_flags = image_aspect::color;
switch (storage_fmt)
{
case GL_DEPTH_COMPONENT16:
{
m_pitch = width * 2;
m_aspect_flags = image_aspect::depth;
break;
}
case GL_DEPTH_COMPONENT32F:
{
m_pitch = width * 4;
m_aspect_flags = image_aspect::depth;
break;
}
case GL_DEPTH24_STENCIL8:
case GL_DEPTH32F_STENCIL8:
{
m_pitch = width * 4;
m_aspect_flags = image_aspect::depth | image_aspect::stencil;
break;
}
case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
{
m_compressed = true;
m_pitch = utils::align(width, 4) / 2;
break;
}
case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
{
m_compressed = true;
m_pitch = utils::align(width, 4);
break;
}
default:
{
GLenum query_target = (target == GL_TEXTURE_CUBE_MAP) ? GL_TEXTURE_CUBE_MAP_POSITIVE_X : target;
GLint r, g, b, a;
glGetTexLevelParameteriv(query_target, 0, GL_TEXTURE_RED_SIZE, &r);
glGetTexLevelParameteriv(query_target, 0, GL_TEXTURE_GREEN_SIZE, &g);
glGetTexLevelParameteriv(query_target, 0, GL_TEXTURE_BLUE_SIZE, &b);
glGetTexLevelParameteriv(query_target, 0, GL_TEXTURE_ALPHA_SIZE, &a);
m_pitch = width * (r + g + b + a) / 8;
break;
}
}
if (!m_pitch)
{
fmt::throw_exception("Unhandled GL format 0x%X", sized_format);
}
if (format_class == RSX_FORMAT_CLASS_UNDEFINED)
{
if (m_aspect_flags != image_aspect::color)
{
rsx_log.error("Undefined format class for depth texture is not allowed");
}
else
{
format_class = RSX_FORMAT_CLASS_COLOR;
}
}
}
m_target = static_cast<texture::target>(target);
m_internal_format = static_cast<internal_format>(sized_format);
m_component_layout = { GL_ALPHA, GL_RED, GL_GREEN, GL_BLUE };
m_format_class = format_class;
}
texture::~texture()
{
gl::get_command_context()->unbind_texture(static_cast<GLenum>(m_target), m_id);
glDeleteTextures(1, &m_id);
m_id = GL_NONE;
}
void texture::copy_from(const void* src, texture::format format, texture::type type, int level, const coord3u region, const pixel_unpack_settings& pixel_settings)
{
pixel_settings.apply();
switch (const auto target_ = static_cast<GLenum>(m_target))
{
case GL_TEXTURE_1D:
{
DSA_CALL(TextureSubImage1D, m_id, GL_TEXTURE_1D, level, region.x, region.width, static_cast<GLenum>(format), static_cast<GLenum>(type), src);
break;
}
case GL_TEXTURE_2D:
{
DSA_CALL(TextureSubImage2D, m_id, GL_TEXTURE_2D, level, region.x, region.y, region.width, region.height, static_cast<GLenum>(format), static_cast<GLenum>(type), src);
break;
}
case GL_TEXTURE_3D:
case GL_TEXTURE_2D_ARRAY:
{
DSA_CALL(TextureSubImage3D, m_id, target_, level, region.x, region.y, region.z, region.width, region.height, region.depth, static_cast<GLenum>(format), static_cast<GLenum>(type), src);
break;
}
case GL_TEXTURE_CUBE_MAP:
{
if (get_driver_caps().ARB_dsa_supported)
{
glTextureSubImage3D(m_id, level, region.x, region.y, region.z, region.width, region.height, region.depth, static_cast<GLenum>(format), static_cast<GLenum>(type), src);
}
else
{
rsx_log.warning("Cubemap upload via texture::copy_from is halfplemented!");
auto ptr = static_cast<const u8*>(src);
const auto end = std::min(6u, region.z + region.depth);
for (unsigned face = region.z; face < end; ++face)
{
glTextureSubImage2DEXT(m_id, GL_TEXTURE_CUBE_MAP_POSITIVE_X + face, level, region.x, region.y, region.width, region.height, static_cast<GLenum>(format), static_cast<GLenum>(type), ptr);
ptr += (region.width * region.height * 4); //TODO
}
}
break;
}
}
}
void texture::copy_from(buffer& buf, u32 gl_format_type, u32 offset, u32 length)
{
if (get_target() != target::textureBuffer)
fmt::throw_exception("OpenGL error: texture cannot copy from buffer");
DSA_CALL(TextureBufferRange, m_id, GL_TEXTURE_BUFFER, gl_format_type, buf.id(), offset, length);
}
void texture::copy_from(buffer_view& view)
{
copy_from(*view.value(), view.format(), view.offset(), view.range());
}
void texture::copy_to(void* dst, texture::format format, texture::type type, int level, const coord3u& region, const pixel_pack_settings& pixel_settings) const
{
pixel_settings.apply();
const auto& caps = get_driver_caps();
if (!region.x && !region.y && !region.z &&
region.width == m_width && region.height == m_height && region.depth == m_depth)
{
if (caps.ARB_dsa_supported)
glGetTextureImage(m_id, level, static_cast<GLenum>(format), static_cast<GLenum>(type), s32{ smax }, dst);
else
glGetTextureImageEXT(m_id, static_cast<GLenum>(m_target), level, static_cast<GLenum>(format), static_cast<GLenum>(type), dst);
}
else if (caps.ARB_dsa_supported)
{
glGetTextureSubImage(m_id, level, region.x, region.y, region.z, region.width, region.height, region.depth,
static_cast<GLenum>(format), static_cast<GLenum>(type), s32{ smax }, dst);
}
else
{
// Worst case scenario. For some reason, EXT_dsa does not have glGetTextureSubImage
const auto target_ = static_cast<GLenum>(m_target);
texture tmp{ target_, region.width, region.height, region.depth, 1, static_cast<GLenum>(m_internal_format) };
glCopyImageSubData(m_id, target_, level, region.x, region.y, region.z, tmp.id(), target_, 0, 0, 0, 0,
region.width, region.height, region.depth);
const coord3u region2 = { {0, 0, 0}, region.size };
tmp.copy_to(dst, format, type, 0, region2, pixel_settings);
}
}
void texture_view::create(texture* data, GLenum target, GLenum sized_format, const subresource_range& range, const GLenum* argb_swizzle)
{
m_target = target;
m_format = sized_format;
m_view_format = sizedfmt_to_ifmt(sized_format);
m_image_data = data;
m_aspect_flags = range.aspect_mask & data->aspect();
ensure(m_aspect_flags);
glGenTextures(1, &m_id);
glTextureView(m_id, target, data->id(), m_view_format, range.min_level, range.num_levels, range.min_layer, range.num_layers);
if (argb_swizzle)
{
component_swizzle[0] = argb_swizzle[1];
component_swizzle[1] = argb_swizzle[2];
component_swizzle[2] = argb_swizzle[3];
component_swizzle[3] = argb_swizzle[0];
DSA_CALL(TextureParameteriv, m_id, m_target, GL_TEXTURE_SWIZZLE_RGBA, reinterpret_cast<GLint*>(component_swizzle));
}
else
{
component_swizzle[0] = GL_RED;
component_swizzle[1] = GL_GREEN;
component_swizzle[2] = GL_BLUE;
component_swizzle[3] = GL_ALPHA;
}
if (range.aspect_mask & image_aspect::stencil)
{
constexpr u32 depth_stencil_mask = (image_aspect::depth | image_aspect::stencil);
ensure((range.aspect_mask & depth_stencil_mask) != depth_stencil_mask); // "Invalid aspect mask combination"
DSA_CALL(TextureParameteri, m_id, m_target, GL_DEPTH_STENCIL_TEXTURE_MODE, GL_STENCIL_INDEX);
}
}
texture_view::~texture_view()
{
if (m_id)
{
gl::get_command_context()->unbind_texture(static_cast<GLenum>(m_target), m_id);
glDeleteTextures(1, &m_id);
m_id = GL_NONE;
}
}
void texture_view::bind(gl::command_context& cmd, GLuint layer) const
{
cmd->bind_texture(layer, m_target, m_id);
}
nil_texture_view::nil_texture_view(texture* data)
{
m_id = data->id();
m_target = static_cast<GLenum>(data->get_target());
m_format = static_cast<GLenum>(data->get_internal_format());
m_view_format = sizedfmt_to_ifmt(m_format);
m_aspect_flags = data->aspect();
m_image_data = data;
component_swizzle[0] = GL_RED;
component_swizzle[1] = GL_GREEN;
component_swizzle[2] = GL_BLUE;
component_swizzle[3] = GL_ALPHA;
}
nil_texture_view::~nil_texture_view()
{
m_id = GL_NONE;
}
texture_view* viewable_image::get_view(const rsx::texture_channel_remap_t& remap_, GLenum aspect_flags)
{
auto remap = remap_;
const u64 view_aspect = static_cast<u64>(aspect_flags) & aspect();
ensure(view_aspect);
const u64 key = static_cast<u64>(remap.encoded) | (view_aspect << 32);
if (auto found = views.find(key);
found != views.end())
{
ensure(found->second.get() != nullptr);
return found->second.get();
}
std::array<GLenum, 4> mapping;
GLenum* swizzle = nullptr;
if (remap.encoded != GL_REMAP_IDENTITY)
{
mapping = apply_swizzle_remap(get_native_component_layout(), remap);
swizzle = mapping.data();
}
auto view = std::make_unique<texture_view>(this, swizzle, aspect_flags);
auto result = view.get();
views.emplace(key, std::move(view));
return result;
}
void viewable_image::set_native_component_layout(const std::array<GLenum, 4>& layout)
{
if (m_component_layout[0] != layout[0] ||
m_component_layout[1] != layout[1] ||
m_component_layout[2] != layout[2] ||
m_component_layout[3] != layout[3])
{
texture::set_native_component_layout(layout);
views.clear();
}
}
}
| 10,987
|
C++
|
.cpp
| 310
| 30.741935
| 191
| 0.669214
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.