id
int64 0
755k
| file_name
stringlengths 3
109
| file_path
stringlengths 13
185
| content
stringlengths 31
9.38M
| size
int64 31
9.38M
| language
stringclasses 1
value | extension
stringclasses 11
values | total_lines
int64 1
340k
| avg_line_length
float64 2.18
149k
| max_line_length
int64 7
2.22M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 6
65
| repo_stars
int64 100
47.3k
| repo_forks
int64 0
12k
| repo_open_issues
int64 0
3.4k
| repo_license
stringclasses 9
values | repo_extraction_date
stringclasses 92
values | exact_duplicates_redpajama
bool 2
classes | near_duplicates_redpajama
bool 2
classes | exact_duplicates_githubcode
bool 2
classes | exact_duplicates_stackv2
bool 1
class | exact_duplicates_stackv1
bool 2
classes | near_duplicates_githubcode
bool 2
classes | near_duplicates_stackv1
bool 2
classes | near_duplicates_stackv2
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
753,179
|
bytes.cpp
|
NovaMods_nova-renderer/src/util/bytes.cpp
|
#include "nova_renderer/util/bytes.hpp"
#include <ostream>
namespace nova::mem {
std::ostream& operator<<(std::ostream& os, const Bytes b) {
os << b.b_count() << "b";
return os;
}
std::ostream& operator<<(std::ostream& os, const KBytes b) {
os << b.k_count() << "kb";
return os;
}
std::ostream& operator<<(std::ostream& os, const MBytes b) {
os << b.m_count() << "mb";
return os;
}
std::ostream& operator<<(std::ostream& os, const GBytes b) {
os << b.g_count() << "gb";
return os;
}
} // namespace nova::mem
| 609
|
C++
|
.cpp
| 20
| 24.4
| 64
| 0.546233
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,180
|
result.cpp
|
NovaMods_nova-renderer/src/util/result.cpp
|
#include "nova_renderer/util/result.hpp"
namespace ntl {
NovaError::NovaError(std::string message) : message(std::move(message)) {}
NovaError::NovaError(std::string message, NovaError* cause) : message(std::move(message)), cause(cause) {}
std::string NovaError::to_string() const {
if(cause) {
return std::string::format("%s\nCaused by: %s", message, cause->to_string());
} else {
return message;
}
}
} // namespace ntl
| 487
|
C++
|
.cpp
| 12
| 34
| 110
| 0.629237
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,181
|
utils.cpp
|
NovaMods_nova-renderer/src/util/utils.cpp
|
#include "nova_renderer/util/utils.hpp"
#include <sstream>
namespace nova::renderer {
// taken from https://www.fluentcpp.com/2017/04/21/how-to-split-a-string-in-c/
std::pmr::vector<std::string> split(const std::string& s, char delim) {
std::pmr::vector<std::string> tokens;
std::string token;
std::istringstream tokenStream(s.c_str());
while(std::getline(tokenStream, token, delim)) {
tokens.push_back(token.c_str());
}
return tokens;
}
std::string join(const std::pmr::vector<std::string>& strings, const std::string& joiner = ", ") {
std::stringstream ss;
for(size_t i = 0; i < strings.size(); i++) {
ss << strings[i].c_str();
if(i < strings.size() - 1) {
ss << joiner.c_str();
}
}
return ss.str().c_str();
}
std::string print_color(unsigned int color) {
auto red = color >> 24;
auto green = (color >> 16) & 0xFF;
auto blue = (color >> 8) & 0xFF;
auto alpha = color & 0xFF;
std::stringstream str;
str << "(" << red << ", " << green << ", " << blue << ", " << alpha << ")";
return str.str().c_str();
}
std::string print_array(int* const data, int size) {
std::stringstream ss;
for(int i = 0; i < size; i++) {
ss << data[i] << " ";
}
return ss.str().c_str();
}
bool ends_with(const std::string& string, const std::string& ending) {
if(string.length() >= ending.length()) {
return (0 == string.compare(string.length() - ending.length(), ending.length(), ending));
}
return false;
}
} // namespace nova::renderer
| 1,747
|
C++
|
.cpp
| 46
| 29.695652
| 102
| 0.533136
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,182
|
renderdoc.cpp
|
NovaMods_nova-renderer/src/debugging/renderdoc.cpp
|
#include "renderdoc.hpp"
#include <rx/core/log.h>
#include "nova_renderer/util/platform.hpp"
#include "nova_renderer/util/utils.hpp"
#if defined(NOVA_WINDOWS)
#include "nova_renderer/util/windows.hpp"
#include "../util/windows_utils.hpp"
#elif defined(NOVA_LINUX)
#include <dlfcn.h>
#include "../util/linux_utils.hpp"
#endif
namespace nova::renderer {
RX_LOG("RenderDoc", logger);
ntl::Result<RENDERDOC_API_1_3_0*> load_renderdoc(const std::string& renderdoc_dll_path) {
#if defined(NOVA_WINDOWS)
using Hinstance = HINSTANCE__* const;
Hinstance renderdoc_dll = LoadLibrary(renderdoc_dll_path.data());
if(!renderdoc_dll) {
const std::string error = get_last_windows_error();
return ntl::Result<RENDERDOC_API_1_3_0*>(MAKE_ERROR("Could not load RenderDoc. Error: %s", error));
}
logger->debug("Loaded RenderDoc DLL from %s", renderdoc_dll_path);
const auto get_api = reinterpret_cast<pRENDERDOC_GetAPI>(GetProcAddress(renderdoc_dll, "RENDERDOC_GetAPI"));
if(!get_api) {
const std::string error = get_last_windows_error();
return ntl::Result<RENDERDOC_API_1_3_0*>(MAKE_ERROR("Could not load RenderDoc DLL. Error: %s", error));
}
#elif defined(NOVA_LINUX)
void* renderdoc_so = dlopen(renderdoc_dll_path.data(), RTLD_NOW);
if(renderdoc_so == nullptr) {
// Try to load system-wide version of renderdoc
renderdoc_so = dlopen("librenderdoc.so", RTLD_NOW);
if(renderdoc_so == nullptr) {
return ntl::Result<RENDERDOC_API_1_3_0*>(MAKE_ERROR("Could not load RenderDoc DLL. Error: %s", dlerror()));
}
}
const auto get_api = reinterpret_cast<pRENDERDOC_GetAPI>(dlsym(renderdoc_so, "RENDERDOC_GetAPI"));
if(get_api == nullptr) {
return ntl::Result<RENDERDOC_API_1_3_0*>(MAKE_ERROR("Could not find the RenderDoc API loading function. Error: %s", dlerror()));
}
#endif
RENDERDOC_API_1_3_0* api;
const int32_t ret = get_api(eRENDERDOC_API_Version_1_3_0, reinterpret_cast<void**>(&api));
if(ret != 1) {
logger->error("Could not load RenderDoc API");
return ntl::Result<RENDERDOC_API_1_3_0*>(MAKE_ERROR("Could not load RenderDoc API. Error code %d", ret));
}
logger->debug("Loaded RenderDoc 1.3 API");
return ntl::Result(api);
}
} // namespace nova::renderer
| 2,473
|
C++
|
.cpp
| 51
| 40.921569
| 140
| 0.643955
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| true
| true
| false
| false
| true
| false
| false
|
753,183
|
rhi_types.cpp
|
NovaMods_nova-renderer/src/rhi/rhi_types.cpp
|
#include "nova_renderer/rhi/rhi_types.hpp"
namespace nova::renderer::rhi {
bool RhiResourceBindingDescription::operator==(const RhiResourceBindingDescription& other) {
return set == other.set && binding == other.binding && count == other.count && type == other.type;
}
bool RhiResourceBindingDescription::operator!=(const RhiResourceBindingDescription& other) { return !(*this == other); }
RhiResourceBarrier::RhiResourceBarrier() : buffer_memory_barrier{0, 0} {};
uint32_t RhiPipelineInterface::get_num_descriptors_of_type(const DescriptorType type) const {
uint32_t num_descriptors = 0;
for(const auto& [binding_name, description] : bindings) {
if(description.type == type) {
num_descriptors++;
}
}
return num_descriptors;
}
ShaderStage operator|=(const ShaderStage lhs, const ShaderStage rhs) {
return static_cast<ShaderStage>(static_cast<uint32_t>(lhs) | static_cast<uint32_t>(rhs));
}
bool is_depth_format(const PixelFormat format) {
switch(format) {
case PixelFormat::Rgba8:
[[fallthrough]];
case PixelFormat::Rgba16F:
[[fallthrough]];
case PixelFormat::Rgba32F:
return false;
case PixelFormat::Depth32:
[[fallthrough]];
case PixelFormat::Depth24Stencil8:
return true;
default:
return false;
}
}
uint32_t get_byte_size(const VertexFieldFormat format) {
switch(format) {
case VertexFieldFormat::Uint:
return 4;
case VertexFieldFormat::Float2:
return 8;
case VertexFieldFormat::Float3:
return 12;
case VertexFieldFormat::Float4:
return 16;
default:
return 16;
}
}
std::string descriptor_type_to_string(DescriptorType type) {
switch(type) {
case DescriptorType::CombinedImageSampler:
return "CombinedImageSampler";
case DescriptorType::UniformBuffer:
return "UniformBuffer";
case DescriptorType::StorageBuffer:
return "StorageBuffer";
case DescriptorType::Texture:
return "Texture";
case DescriptorType::Sampler:
return "Sampler";
default:
return "Unknown";
}
}
} // namespace nova::renderer::rhi
| 2,588
|
C++
|
.cpp
| 66
| 27.787879
| 124
| 0.585532
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| true
| true
| false
| false
| true
| false
| false
|
753,184
|
swapchain.cpp
|
NovaMods_nova-renderer/src/rhi/swapchain.cpp
|
#include "nova_renderer/rhi/swapchain.hpp"
namespace nova::renderer::rhi {
Swapchain::Swapchain(const uint32_t num_images, const glm::uvec2& size) : num_images(num_images), size(size) {}
RhiFramebuffer* Swapchain::get_framebuffer(const uint32_t frame_idx) const { return framebuffers[frame_idx]; }
RhiImage* Swapchain::get_image(const uint32_t frame_idx) const { return swapchain_images[frame_idx]; }
RhiFence* Swapchain::get_fence(const uint32_t frame_idx) const { return fences[frame_idx]; }
glm::uvec2 Swapchain::get_size() const { return size; }
} // namespace nova::renderer::rhi
| 609
|
C++
|
.cpp
| 8
| 72.125
| 115
| 0.737018
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,185
|
render_device.cpp
|
NovaMods_nova-renderer/src/rhi/render_device.cpp
|
#include "nova_renderer/rhi/render_device.hpp"
#include "vulkan/vulkan_render_device.hpp"
namespace nova::renderer::rhi {
Swapchain* RenderDevice::get_swapchain() const { return swapchain; }
RenderDevice::RenderDevice(NovaSettingsAccessManager& settings, NovaWindow& window)
: settings(settings),
window(window),
swapchain_size(settings.settings.window.width, settings.settings.window.height) {}
std::unique_ptr<RenderDevice> create_render_device(NovaSettingsAccessManager& settings, NovaWindow& window) {
return std::make_unique<VulkanRenderDevice>(settings, window);
}
} // namespace nova::renderer::rhi
| 662
|
C++
|
.cpp
| 12
| 49.5
| 113
| 0.747678
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,186
|
vulkan_resource_binder.cpp
|
NovaMods_nova-renderer/src/rhi/vulkan/vulkan_resource_binder.cpp
|
#include "vulkan_resource_binder.hpp"
#include <rx/core/algorithm/max.h>
#include <rx/core/log.h>
#include "nova_renderer/rhi/rhi_types.hpp"
#include "../../renderer/pipeline_reflection.hpp"
#include "vulkan_render_device.hpp"
#include "vulkan_utils.hpp"
namespace nova::renderer::rhi {
RX_LOG("VulkanResourceBinder", logger);
template <typename ResourceType>
void bind_resource_array(const std::string& binding_name,
const std::vector<ResourceType*>& resources,
std::unordered_map<std::string, std::vector<ResourceType*>>& bound_resources);
VulkanResourceBinder::VulkanResourceBinder(VulkanRenderDevice& device,
std::unordered_map<std::string, RhiResourceBindingDescription> bindings,
std::vector<vk::DescriptorSet> sets,
vk::PipelineLayout layout,
rx::memory::allocator& allocator)
: render_device{&device},
allocator{&allocator},
layout{layout},
sets{std::move(sets)},
bindings{std::move(bindings)},
bound_images{&allocator},
bound_buffers{&allocator},
bound_samplers{&allocator} {}
void VulkanResourceBinder::bind_image(const std::string& binding_name, RhiImage* image) {
bind_image_array(binding_name, {allocator, std::array{image}});
}
void VulkanResourceBinder::bind_buffer(const std::string& binding_name, RhiBuffer* buffer) {
bind_buffer_array(binding_name, {allocator, std::array{buffer}});
}
void VulkanResourceBinder::bind_sampler(const std::string& binding_name, RhiSampler* sampler) {
bind_sampler_array(binding_name, {allocator, std::array{sampler}});
}
void VulkanResourceBinder::bind_image_array(const std::string& binding_name, const std::vector<RhiImage*>& images) {
bind_resource_array(binding_name, images, bound_images);
dirty = true;
}
void VulkanResourceBinder::bind_buffer_array(const std::string& binding_name, const std::vector<RhiBuffer*>& buffers) {
#if NOVA_DEBUG
buffers.each_fwd([&](const RhiBuffer* buffer) {
if(buffer->size > render_device->gpu.props.limits.maxUniformBufferRange) {
logger->error("Cannot bind a buffer with a size greater than %u", render_device->gpu.props.limits.maxUniformBufferRange);
}
});
#endif
bind_resource_array(binding_name, buffers, bound_buffers);
dirty = true;
}
void VulkanResourceBinder::bind_sampler_array(const std::string& binding_name, const std::vector<RhiSampler*>& samplers) {
bind_resource_array(binding_name, samplers, bound_samplers);
dirty = true;
}
vk::PipelineLayout VulkanResourceBinder::get_layout() const { return layout; }
const std::vector<vk::DescriptorSet>& VulkanResourceBinder::get_sets() {
if(dirty) {
update_all_descriptors();
}
return sets;
}
void VulkanResourceBinder::update_all_descriptors() {
std::vector<vk::WriteDescriptorSet> writes{allocator};
writes.reserve(bound_images.size() + bound_samplers.size() + bound_buffers.size());
std::vector<std::vector<vk::DescriptorImageInfo>> all_image_infos{allocator};
all_image_infos.reserve(bound_images.size() + bound_samplers.size());
std::vector < std::vector<vk::DescriptorBufferInfo>> all_buffer_infos{allocator};
all_buffer_infos.reserve(bound_buffers.size());
bound_images.each_pair([&](const std::string& name, const std::vector<RhiImage*>& images) {
const auto& binding = *bindings.find(name);
const auto set = sets[binding.set];
std::vector<vk::DescriptorImageInfo> image_infos{allocator};
image_infos.reserve(images.size());
images.each_fwd([&](const RhiImage* image) {
const auto* vk_image = static_cast<const VulkanImage*>(image);
auto image_info = vk::DescriptorImageInfo()
.setImageView(vk_image->image_view)
.setImageLayout(vk::ImageLayout::eShaderReadOnlyOptimal);
image_infos.push_back(std::move(image_info));
});
all_image_infos.push_back(std::move(image_infos));
auto write = vk::WriteDescriptorSet()
.setDstSet(set)
.setDstBinding(binding.binding)
.setDstArrayElement(0)
.setDescriptorCount(static_cast<uint32_t>(images.size()))
.setDescriptorType(vk::DescriptorType::eSampledImage)
.setPImageInfo(all_image_infos.last().data());
writes.push_back(std::move(write));
});
bound_samplers.each_pair([&](const std::string& name, const std::vector<RhiSampler*>& samplers) {
const auto& binding = *bindings.find(name);
const auto set = sets[binding.set];
std::vector<vk::DescriptorImageInfo> sampler_infos{allocator};
sampler_infos.reserve(samplers.size());
samplers.each_fwd([&](const RhiSampler* sampler) {
const auto* vk_sampler = static_cast<const VulkanSampler*>(sampler);
auto sampler_info = vk::DescriptorImageInfo().setSampler(vk_sampler->sampler);
sampler_infos.push_back(std::move(sampler_info));
});
all_image_infos.push_back(std::move(sampler_infos));
auto write = vk::WriteDescriptorSet()
.setDstSet(set)
.setDstBinding(binding.binding)
.setDstArrayElement(0)
.setDescriptorCount(static_cast<uint32_t>(samplers.size()))
.setDescriptorType(vk::DescriptorType::eSampler)
.setPImageInfo(all_image_infos.last().data());
writes.push_back(std::move(write));
});
bound_buffers.each_pair([&](const std::string& name, const std::vector<RhiBuffer*>& buffers) {
const auto& binding = *bindings.find(name);
const auto set = sets[binding.set];
std::vector<vk::DescriptorBufferInfo> buffer_infos{allocator};
buffer_infos.reserve(buffers.size());
buffers.each_fwd([&](const RhiBuffer* buffer) {
const auto* vk_buffer = static_cast<const VulkanBuffer*>(buffer);
auto buffer_info = vk::DescriptorBufferInfo().setBuffer(vk_buffer->buffer).setOffset(0).setRange(vk_buffer->size.b_count());
buffer_infos.push_back(std::move(buffer_info));
});
all_buffer_infos.push_back(std::move(buffer_infos));
auto write = vk::WriteDescriptorSet()
.setDstSet(set)
.setDstBinding(binding.binding)
.setDstArrayElement(0)
.setDescriptorCount(static_cast<uint32_t>(buffers.size()))
.setDescriptorType(vk::DescriptorType::eUniformBuffer)
.setPBufferInfo(all_buffer_infos.last().data());
writes.push_back(std::move(write));
});
render_device->device.updateDescriptorSets(static_cast<uint32_t>(writes.size()), writes.data(), 0, nullptr);
}
template <typename ResourceType>
void bind_resource_array(const std::string& binding_name,
const std::vector<ResourceType*>& resources,
std::unordered_map<std::string, std::vector<ResourceType*>>& bound_resources) {
if(auto* bound_data = bound_resources.find(binding_name)) {
*bound_data = resources;
} else {
bound_resources.insert(binding_name, resources);
}
}
} // namespace nova::renderer::rhi
| 8,162
|
C++
|
.cpp
| 143
| 42.79021
| 140
| 0.596942
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| true
| true
| false
| false
| true
| false
| false
|
753,187
|
vulkan_command_list.cpp
|
NovaMods_nova-renderer/src/rhi/vulkan/vulkan_command_list.cpp
|
#include "vulkan_command_list.hpp"
#include <Tracy.hpp>
#include <rx/core/log.h>
#include <string.h>
#include <vk_mem_alloc.h>
#include "nova_renderer/camera.hpp"
#include "nova_renderer/constants.hpp"
#include "nova_renderer/rhi/pipeline_create_info.hpp"
#include "vk_structs.hpp"
#include "vulkan_render_device.hpp"
#include "vulkan_resource_binder.hpp"
#include "vulkan_utils.hpp"
namespace nova::renderer::rhi {
RX_LOG("vk::CmdLst", logger);
vk::IndexType to_vk_index_type(const IndexType index_type) {
switch(index_type) {
case IndexType::Uint16:
return VK_INDEX_TYPE_UINT16;
case IndexType::Uint32:
[[fallthrough]];
default:
return VK_INDEX_TYPE_UINT32;
}
}
VulkanRenderCommandList::VulkanRenderCommandList(vk::CommandBuffer cmds,
VulkanRenderDevice& render_device,
rx::memory::allocator& allocator)
: cmds(cmds), device(render_device), allocator(allocator), descriptor_sets{&allocator} {
ZoneScoped; // TODO: Put this begin info in the constructor parameters
vk::CommandBufferBeginInfo begin_info = {};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
vkBeginCommandBuffer(cmds, &begin_info);
}
void VulkanRenderCommandList::set_debug_name(const std::string& name) {
vk::DebugUtilsObjectNameInfoEXT vk_name{};
vk_name.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT;
vk_name.objectType = VK_OBJECT_TYPE_COMMAND_BUFFER;
vk_name.objectHandle = reinterpret_cast<uint64_t>(cmds);
vk_name.pObjectName = name.data();
device.vkSetDebugUtilsObjectNameEXT(device.device, &vk_name);
}
void VulkanRenderCommandList::bind_material_resources(RhiBuffer* camera_buffer,
RhiBuffer* material_buffer,
RhiSampler* point_sampler,
RhiSampler* bilinear_sampler,
RhiSampler* trilinear_sampler,
const std::vector<RhiImage*>& textures,
rx::memory::allocator& allocator) {
const auto set = device.get_next_standard_descriptor_set();
const auto* vk_camera_buffer = static_cast<VulkanBuffer*>(camera_buffer);
const auto camera_buffer_write = vk::DescriptorBufferInfo()
.setOffset(0)
.setRange(vk_camera_buffer->size.b_count())
.setBuffer(vk_camera_buffer->buffer);
const auto camera_buffer_descriptor_type = vk_camera_buffer->size < device.gpu.props.limits.maxUniformBufferRange ?
vk::DescriptorType::eUniformBuffer :
vk::DescriptorType::eStorageBuffer;
const auto* vk_material_buffer = static_cast<VulkanBuffer*>(material_buffer);
const auto material_buffer_write = vk::DescriptorBufferInfo()
.setOffset(0)
.setRange(material_buffer->size.b_count())
.setBuffer(vk_material_buffer->buffer);
const auto material_buffer_descriptor_type = material_buffer->size < device.gpu.props.limits.maxUniformBufferRange ?
vk::DescriptorType::eUniformBuffer :
vk::DescriptorType::eStorageBuffer;
const auto* vk_point_sampler = static_cast<VulkanSampler*>(point_sampler);
const auto point_sampler_write = vk::DescriptorImageInfo().setSampler(vk_point_sampler->sampler);
const auto* vk_bilinear_sampler = static_cast<VulkanSampler*>(bilinear_sampler);
const auto bilinear_sampler_write = vk::DescriptorImageInfo().setSampler(vk_bilinear_sampler->sampler);
const auto* vk_trilinear_sampler = static_cast<VulkanSampler*>(trilinear_sampler);
const auto trilinear_sampler_write = vk::DescriptorImageInfo().setSampler(vk_trilinear_sampler->sampler);
std::vector<vk::DescriptorImageInfo> vk_textures{&allocator};
vk_textures.reserve(textures.size());
textures.each_fwd([&](const RhiImage* image) {
const auto* vk_image = static_cast<const VulkanImage*>(image);
vk_textures.emplace_back(
vk::DescriptorImageInfo().setImageView(vk_image->image_view).setImageLayout(vk::ImageLayout::eShaderReadOnlyOptimal));
});
const auto writes = std::array{
vk::WriteDescriptorSet()
.setDstSet(set)
.setDstBinding(0)
.setDstArrayElement(0)
.setDescriptorCount(1)
.setDescriptorType(camera_buffer_descriptor_type)
.setPBufferInfo(&camera_buffer_write),
vk::WriteDescriptorSet()
.setDstSet(set)
.setDstBinding(1)
.setDstArrayElement(0)
.setDescriptorCount(1)
.setDescriptorType(material_buffer_descriptor_type)
.setPBufferInfo(&material_buffer_write),
vk::WriteDescriptorSet()
.setDstSet(set)
.setDstBinding(2)
.setDstArrayElement(0)
.setDescriptorCount(1)
.setDescriptorType(vk::DescriptorType::eSampler)
.setPImageInfo(&point_sampler_write),
vk::WriteDescriptorSet()
.setDstSet(set)
.setDstBinding(3)
.setDstArrayElement(0)
.setDescriptorCount(1)
.setDescriptorType(vk::DescriptorType::eSampler)
.setPImageInfo(&bilinear_sampler_write),
vk::WriteDescriptorSet()
.setDstSet(set)
.setDstBinding(4)
.setDstArrayElement(0)
.setDescriptorCount(1)
.setDescriptorType(vk::DescriptorType::eSampler)
.setPImageInfo(&trilinear_sampler_write),
vk::WriteDescriptorSet()
.setDstSet(set)
.setDstBinding(5)
.setDstArrayElement(0)
.setDescriptorCount(static_cast<uint32_t>(vk_textures.size()))
.setDescriptorType(vk::DescriptorType::eSampledImage)
.setPImageInfo(vk_textures.data()),
};
device.device.updateDescriptorSets(static_cast<uint32_t>(writes.size()), writes.data(), 0, nullptr);
vkCmdBindDescriptorSets(cmds,
VK_PIPELINE_BIND_POINT_GRAPHICS,
device.standard_pipeline_layout,
0,
1,
reinterpret_cast<const vk::DescriptorSet*>(&set),
0,
nullptr);
descriptor_sets.emplace_back(set);
}
void VulkanRenderCommandList::bind_resources(RhiResourceBinder& binder) {
auto& vk_binder = static_cast<VulkanResourceBinder&>(binder);
const auto& sets = vk_binder.get_sets();
const auto& layout = vk_binder.get_layout();
vkCmdBindDescriptorSets(cmds,
VK_PIPELINE_BIND_POINT_GRAPHICS,
layout,
0,
static_cast<uint32_t>(sets.size()),
reinterpret_cast<const vk::DescriptorSet*>(sets.data()),
0,
nullptr);
}
void VulkanRenderCommandList::resource_barriers(const PipelineStage stages_before_barrier,
const PipelineStage stages_after_barrier,
const std::vector<RhiResourceBarrier>& barriers) {
ZoneScoped; std::vector<vk::BufferMemoryBarrier> buffer_barriers{&allocator};
buffer_barriers.reserve(barriers.size());
std::vector<vk::ImageMemoryBarrier> image_barriers{&allocator};
image_barriers.reserve(barriers.size());
barriers.each_fwd([&](const RhiResourceBarrier& barrier) {
switch(barrier.resource_to_barrier->type) {
case ResourceType::Image: {
const auto* image = static_cast<VulkanImage*>(barrier.resource_to_barrier);
vk::ImageMemoryBarrier image_barrier = {};
image_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
image_barrier.srcAccessMask = to_vk_access_flags(barrier.access_before_barrier);
image_barrier.dstAccessMask = to_vk_access_flags(barrier.access_after_barrier);
image_barrier.oldLayout = to_vk_image_layout(barrier.old_state);
image_barrier.newLayout = to_vk_image_layout(barrier.new_state);
image_barrier.srcQueueFamilyIndex = device.get_queue_family_index(barrier.source_queue);
image_barrier.dstQueueFamilyIndex = device.get_queue_family_index(barrier.destination_queue);
image_barrier.image = image->image;
image_barrier.subresourceRange.aspectMask = static_cast<vk::ImageAspectFlags>(barrier.image_memory_barrier.aspect);
image_barrier.subresourceRange.baseMipLevel = 0; // TODO: Something smarter with mips
image_barrier.subresourceRange.levelCount = 1;
image_barrier.subresourceRange.baseArrayLayer = 0;
image_barrier.subresourceRange.layerCount = 1;
image_barriers.push_back(image_barrier);
} break;
case ResourceType::Buffer: {
const auto* buffer = static_cast<VulkanBuffer*>(barrier.resource_to_barrier);
vk::BufferMemoryBarrier buffer_barrier = {};
buffer_barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
buffer_barrier.srcAccessMask = to_vk_access_flags(barrier.access_before_barrier);
buffer_barrier.dstAccessMask = to_vk_access_flags(barrier.access_after_barrier);
buffer_barrier.srcQueueFamilyIndex = device.get_queue_family_index(barrier.source_queue);
buffer_barrier.dstQueueFamilyIndex = device.get_queue_family_index(barrier.destination_queue);
buffer_barrier.buffer = buffer->buffer;
buffer_barrier.offset = barrier.buffer_memory_barrier.offset.b_count();
buffer_barrier.size = barrier.buffer_memory_barrier.size.b_count();
buffer_barriers.push_back(buffer_barrier);
} break;
}
});
vkCmdPipelineBarrier(cmds,
static_cast<vk::PipelineStageFlags>(stages_before_barrier),
static_cast<vk::PipelineStageFlags>(stages_after_barrier),
0,
0,
nullptr,
static_cast<uint32_t>(buffer_barriers.size()),
buffer_barriers.data(),
static_cast<uint32_t>(image_barriers.size()),
image_barriers.data());
}
void VulkanRenderCommandList::copy_buffer(RhiBuffer* destination_buffer,
const mem::Bytes destination_offset,
RhiBuffer* source_buffer,
const mem::Bytes source_offset,
const mem::Bytes num_bytes) {
ZoneScoped; vk::BufferCopy copy;
copy.srcOffset = source_offset.b_count();
copy.dstOffset = destination_offset.b_count();
copy.size = num_bytes.b_count();
auto* vk_destination_buffer = static_cast<VulkanBuffer*>(destination_buffer);
auto* vk_source_buffer = static_cast<VulkanBuffer*>(source_buffer);
// TODO: fix the crash on this line
vkCmdCopyBuffer(cmds, vk_source_buffer->buffer, vk_destination_buffer->buffer, 1, ©);
}
void VulkanRenderCommandList::execute_command_lists(const std::vector<RhiRenderCommandList*>& lists) {
ZoneScoped; std::vector<vk::CommandBuffer> buffers{&allocator};
buffers.reserve(lists.size());
lists.each_fwd([&](RhiRenderCommandList* list) {
auto* vk_list = dynamic_cast<VulkanRenderCommandList*>(list);
buffers.push_back(vk_list->cmds);
});
vkCmdExecuteCommands(cmds, static_cast<uint32_t>(buffers.size()), buffers.data());
}
void VulkanRenderCommandList::set_camera(const Camera& camera) {
ZoneScoped; camera_index = camera.index;
vkCmdPushConstants(cmds, device.standard_pipeline_layout, VK_SHADER_STAGE_ALL, 0, sizeof(uint32_t), &camera_index);
}
void VulkanRenderCommandList::begin_renderpass(RhiRenderpass* renderpass, RhiFramebuffer* framebuffer) {
ZoneScoped; auto* vk_renderpass = static_cast<VulkanRenderpass*>(renderpass);
auto* vk_framebuffer = static_cast<VulkanFramebuffer*>(framebuffer);
current_render_pass = vk_renderpass;
std::vector<vk::ClearValue> clear_values{&allocator, vk_framebuffer->num_attachments};
vk::RenderPassBeginInfo begin_info = {};
begin_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
begin_info.renderPass = vk_renderpass->pass;
begin_info.framebuffer = vk_framebuffer->framebuffer;
begin_info.renderArea = {{0, 0}, {static_cast<uint32_t>(framebuffer->size.x), static_cast<uint32_t>(framebuffer->size.y)}};
begin_info.clearValueCount = vk_framebuffer->num_attachments;
begin_info.pClearValues = clear_values.data();
vkCmdBeginRenderPass(cmds, &begin_info, VK_SUBPASS_CONTENTS_INLINE);
}
void VulkanRenderCommandList::end_renderpass() {
ZoneScoped; vkCmdEndRenderPass(cmds);
current_render_pass = nullptr;
}
void VulkanRenderCommandList::set_material_index(uint32_t index) {
vkCmdPushConstants(cmds, device.standard_pipeline_layout, VK_SHADER_STAGE_ALL, sizeof(uint32_t), sizeof(uint32_t), &index);
}
void VulkanRenderCommandList::set_pipeline(const RhiPipeline& state) {
ZoneScoped;
const auto& vk_pipeline = static_cast<const VulkanPipeline&>(state);
if(current_render_pass != nullptr) {
auto* pipeline = current_render_pass->cached_pipelines.find(vk_pipeline.state.name);
if(pipeline == nullptr) {
const auto pipeline_result = device.compile_pipeline_state(vk_pipeline, *current_render_pass, allocator);
if(pipeline_result) {
pipeline = current_render_pass->cached_pipelines.insert(vk_pipeline.state.name, *pipeline_result);
} else {
logger->error("Could not compile pipeline %s", vk_pipeline.state.name);
return;
}
}
if(pipeline != nullptr) {
vkCmdBindPipeline(cmds, VK_PIPELINE_BIND_POINT_GRAPHICS, static_cast<vk::Pipeline>(*pipeline));
}
} else {
logger->error("Cannot use a pipeline state when not in a renderpass");
}
}
void VulkanRenderCommandList::bind_descriptor_sets(const std::vector<RhiDescriptorSet*>& descriptor_sets,
const RhiPipelineInterface* pipeline_interface) {
ZoneScoped; const auto* vk_interface = static_cast<const VulkanPipelineInterface*>(pipeline_interface);
for(uint32_t i = 0; i < descriptor_sets.size(); i++) {
const auto* vk_set = static_cast<const VulkanDescriptorSet*>(descriptor_sets[i]);
vkCmdBindDescriptorSets(cmds,
VK_PIPELINE_BIND_POINT_GRAPHICS,
device.standard_pipeline_layout,
i,
1,
&vk_set->descriptor_set,
0,
nullptr);
}
}
void VulkanRenderCommandList::bind_vertex_buffers(const std::vector<RhiBuffer*>& buffers) {
ZoneScoped; std::vector<vk::Buffer> vk_buffers{&allocator};
vk_buffers.reserve(buffers.size());
std::vector<vk::DeviceSize> offsets{&allocator};
offsets.reserve(buffers.size());
for(uint32_t i = 0; i < buffers.size(); i++) {
offsets.push_back(i);
const auto* vk_buffer = static_cast<const VulkanBuffer*>(buffers[i]);
vk_buffers.push_back(vk_buffer->buffer);
}
vkCmdBindVertexBuffers(cmds, 0, static_cast<uint32_t>(vk_buffers.size()), vk_buffers.data(), offsets.data());
}
void VulkanRenderCommandList::bind_index_buffer(const RhiBuffer* buffer, const IndexType index_type) {
ZoneScoped; const auto* vk_buffer = static_cast<const VulkanBuffer*>(buffer);
vkCmdBindIndexBuffer(cmds, vk_buffer->buffer, 0, to_vk_index_type(index_type));
}
void VulkanRenderCommandList::draw_indexed_mesh(const uint32_t num_indices, const uint32_t offset, const uint32_t num_instances) {
ZoneScoped; vkCmdDrawIndexed(cmds, num_indices, num_instances, offset, 0, 0);
}
void VulkanRenderCommandList::set_scissor_rect(const uint32_t x, const uint32_t y, const uint32_t width, const uint32_t height) {
ZoneScoped; vk::Rect2D scissor_rect = {{static_cast<int32_t>(x), static_cast<int32_t>(y)}, {width, height}};
vkCmdSetScissor(cmds, 0, 1, &scissor_rect);
}
void VulkanRenderCommandList::upload_data_to_image(RhiImage* image,
const size_t width,
const size_t height,
const size_t bytes_per_pixel,
RhiBuffer* staging_buffer,
const void* data) {
ZoneScoped; auto* vk_image = static_cast<VulkanImage*>(image);
auto* vk_buffer = static_cast<VulkanBuffer*>(staging_buffer);
memcpy(vk_buffer->allocation_info.pMappedData, data, width * height * bytes_per_pixel);
vk::BufferImageCopy image_copy{};
if(!vk_image->is_depth_tex) {
image_copy.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
} else {
logger->error("Can not upload data to depth images");
}
image_copy.imageSubresource.layerCount = 1;
image_copy.imageExtent = {static_cast<uint32_t>(width), static_cast<uint32_t>(height), 1};
vkCmdCopyBufferToImage(cmds, vk_buffer->buffer, vk_image->image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &image_copy);
}
void VulkanRenderCommandList::cleanup_resources() {
ZoneScoped;
device.return_standard_descriptor_sets(descriptor_sets);
descriptor_sets.clear();
}
} // namespace nova::renderer::rhi
| 20,003
|
C++
|
.cpp
| 332
| 43.090361
| 135
| 0.57935
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,188
|
vulkan_render_device.cpp
|
NovaMods_nova-renderer/src/rhi/vulkan/vulkan_render_device.cpp
|
// This define MUST be before including vulkan_render_device.hpp
#define VMA_IMPLEMENTATION
#include "vulkan_render_device.hpp"
#include <csignal>
#include <cstring>
#include <sstream>
#include <Tracy.hpp>
#include <spdlog/sinks/stdout_color_sinks.h>
#include <spdlog/spdlog.h>
#include "nova_renderer/camera.hpp"
#include "nova_renderer/constants.hpp"
#include "nova_renderer/frame_context.hpp"
#include "nova_renderer/renderables.hpp"
#include "nova_renderer/rhi/pipeline_create_info.hpp"
#include "nova_renderer/window.hpp"
#include "../../renderer/pipeline_reflection.hpp"
#include "vk_structs.hpp"
#include "vulkan_command_list.hpp"
#include "vulkan_resource_binder.hpp"
#include "vulkan_utils.hpp"
// TODO: Move window creation out of the RHI
#ifdef NOVA_LINUX
#define NOVA_VK_XLIB
#include "../../util/linux_utils.hpp"
#elif defined(NOVA_WINDOWS)
#include "nova_renderer/util/windows.hpp"
#endif
using namespace nova::mem;
#if defined(NOVA_WINDOWS)
#define BREAK_ON_DEVICE_LOST(result) \
if((result) == VK_ERROR_DEVICE_LOST) { \
DebugBreak(); \
}
#elif defined(NOVA_LINUX)
#define BREAK_ON_DEVICE_LOST(result) \
if((result) == VK_ERROR_DEVICE_LOST) { \
raise(SIGINT); \
}
#endif
namespace nova::renderer::rhi {
static auto logger = spdlog::stdout_color_mt("VulkanRenderDevice");
void FencedTask::operator()() const { work_to_perform(); }
VulkanRenderDevice::VulkanRenderDevice(NovaSettingsAccessManager& settings, NovaWindow& window) : RenderDevice{settings, window} {
ZoneScoped;
create_instance();
if(settings.settings.debug.enabled) {
enable_debug_output();
}
create_surface();
create_device_and_queues();
save_device_info();
initialize_vma();
if(settings.settings.debug.enabled) {
// Late init, can only be used when the device has already been created
vkSetDebugUtilsObjectNameEXT = reinterpret_cast<PFN_vkSetDebugUtilsObjectNameEXT>(
vkGetDeviceProcAddr(device, "vkSetDebugUtilsObjectNameEXT"));
}
create_swapchain();
create_per_thread_command_pools();
create_standard_pipeline_layout();
}
void VulkanRenderDevice::set_num_renderpasses(uint32_t /* num_renderpasses */) {
// Pretty sure Vulkan doesn't need to do anything here
}
ntl::Result<RhiRenderpass*> VulkanRenderDevice::create_renderpass(const renderpack::RenderPassCreateInfo& data,
const glm::uvec2& framebuffer_size) {
ZoneScoped;
auto* vk_swapchain = static_cast<VulkanSwapchain*>(swapchain);
vk::Extent2D swapchain_extent = {swapchain_size.x, swapchain_size.y};
auto renderpass = VulkanRenderpass{};
vk::SubpassDescription subpass_description = {};
subpass_description.flags = 0;
subpass_description.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpass_description.inputAttachmentCount = 0;
subpass_description.pInputAttachments = nullptr;
subpass_description.preserveAttachmentCount = 0;
subpass_description.pPreserveAttachments = nullptr;
subpass_description.pResolveAttachments = nullptr;
subpass_description.pDepthStencilAttachment = nullptr;
vk::SubpassDependency image_available_dependency = {};
image_available_dependency.dependencyFlags = 0;
image_available_dependency.srcSubpass = VK_SUBPASS_EXTERNAL;
image_available_dependency.dstSubpass = 0;
image_available_dependency.srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
image_available_dependency.srcAccessMask = 0;
image_available_dependency.dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
image_available_dependency.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
vk::RenderPassCreateInfo render_pass_create_info = {};
render_pass_create_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
render_pass_create_info.pNext = nullptr;
render_pass_create_info.flags = 0;
render_pass_create_info.subpassCount = 1;
render_pass_create_info.pSubpasses = &subpass_description;
render_pass_create_info.dependencyCount = 1;
render_pass_create_info.pDependencies = &image_available_dependency;
std::vector<vk::AttachmentReference> attachment_references{};
std::vector<vk::AttachmentDescription> attachments{};
std::vector<vk::ImageView> framebuffer_attachments{};
uint32_t framebuffer_width = framebuffer_size.x;
uint32_t framebuffer_height = framebuffer_size.y;
bool writes_to_backbuffer = false;
// Collect framebuffer size information from color output attachments
for(const renderpack::TextureAttachmentInfo& attachment : data.texture_outputs) {
if(attachment.name == BACKBUFFER_NAME) {
// Handle backbuffer
// Backbuffer framebuffers are handled by themselves in their own special snowflake way so we just need to skip
// everything
writes_to_backbuffer = true;
vk::AttachmentDescription desc;
desc.flags = 0;
desc.format = vk_swapchain->get_swapchain_format();
desc.samples = VK_SAMPLE_COUNT_1_BIT;
desc.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
desc.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
desc.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
desc.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
desc.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
desc.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
attachments.push_back(desc);
vk::AttachmentReference ref;
ref.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
ref.attachment = static_cast<uint32_t>(attachments.size()) - 1;
attachment_references.push_back(ref);
framebuffer_width = swapchain_extent.width;
framebuffer_height = swapchain_extent.height;
} else {
vk::AttachmentDescription desc;
desc.flags = 0;
desc.format = to_vk_format(attachment.pixel_format);
desc.samples = VK_SAMPLE_COUNT_1_BIT;
desc.loadOp = attachment.clear ? VK_ATTACHMENT_LOAD_OP_CLEAR : VK_ATTACHMENT_LOAD_OP_LOAD;
desc.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
desc.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
desc.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
desc.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
desc.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
attachments.push_back(desc);
vk::AttachmentReference ref;
ref.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
ref.attachment = static_cast<uint32_t>(attachments.size()) - 1;
attachment_references.push_back(ref);
}
}
vk::AttachmentReference depth_reference = {};
// Collect framebuffer size information from the depth attachment
if(data.depth_texture) {
vk::AttachmentDescription desc = {};
desc.flags = 0;
desc.format = to_vk_format(data.depth_texture->pixel_format);
desc.samples = VK_SAMPLE_COUNT_1_BIT;
desc.loadOp = data.depth_texture->clear ? VK_ATTACHMENT_LOAD_OP_CLEAR : VK_ATTACHMENT_LOAD_OP_LOAD;
desc.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
desc.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
desc.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
desc.initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
desc.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
attachments.push_back(desc);
depth_reference.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
depth_reference.attachment = static_cast<uint32_t>(attachments.size()) - 1;
subpass_description.pDepthStencilAttachment = &depth_reference;
}
if(framebuffer_width == 0) {
return ntl::Result<RhiRenderpass*>(MAKE_ERROR(
"Framebuffer width for pass {:s} is 0. This is illegal! Make sure that there is at least one attachment for this render pass, and ensure that all attachments used by this pass have a non-zero width",
data.name.data()));
}
if(framebuffer_height == 0) {
return ntl::Result<RhiRenderpass*>(MAKE_ERROR(
"Framebuffer height for pass {:s} is 0. This is illegal! Make sure that there is at least one attachment for this render pass, and ensure that all attachments used by this pass have a non-zero height",
data.name.data()));
}
if(framebuffer_attachments.size() > gpu.props.limits.maxColorAttachments) {
return ntl::Result<RhiRenderpass*>(MAKE_ERROR(
"Framebuffer for pass {:s} has {:d} color attachments, but your GPU only supports {:d}. Please reduce the number of attachments that this pass uses, possibly by changing some of your input attachments to bound textures",
data.name.data(),
data.texture_outputs.size(),
gpu.props.limits.maxColorAttachments));
}
subpass_description.colorAttachmentCount = static_cast<uint32_t>(attachment_references.size());
subpass_description.pColorAttachments = attachment_references.data();
render_pass_create_info.attachmentCount = static_cast<uint32_t>(attachments.size());
render_pass_create_info.pAttachments = attachments.data();
NOVA_CHECK_RESULT(vkCreateRenderPass(device, &render_pass_create_info, nullptr, &renderpass.pass));
if(writes_to_backbuffer) {
if(data.texture_outputs.size() > 1) {
logger->error(
"Pass %s writes to the backbuffer, and other textures. Passes that write to the backbuffer are not allowed to write to any other textures",
data.name);
}
}
renderpass.render_area = {.offset = {0, 0}, .extent = {framebuffer_width, framebuffer_height}};
if(settings.settings.debug.enabled) {
vk::DebugUtilsObjectNameInfoEXT object_name = {};
object_name.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT;
object_name.objectType = VK_OBJECT_TYPE_RENDER_PASS;
object_name.objectHandle = reinterpret_cast<uint64_t>(renderpass.pass);
object_name.pObjectName = data.name.data();
NOVA_CHECK_RESULT(vkSetDebugUtilsObjectNameEXT(device, &object_name));
}
return ntl::Result(static_cast<RhiRenderpass*>(renderpass));
}
RhiFramebuffer* VulkanRenderDevice::create_framebuffer(const RhiRenderpass* renderpass,
const std::vector<RhiImage*>& color_attachments,
const std::optional<RhiImage*> depth_attachment,
const glm::uvec2& framebuffer_size,
rx::memory::allocator& allocator) {
const auto* vk_renderpass = static_cast<const VulkanRenderpass*>(renderpass);
std::vector<vk::ImageView> attachment_views(&allocator);
attachment_views.reserve(color_attachments.size() + 1);
color_attachments.each_fwd([&](const RhiImage* attachment) {
const auto* vk_image = static_cast<const VulkanImage*>(attachment);
attachment_views.push_back(vk_image->image_view);
});
// Depth attachment is ALWAYS the last attachment
if(depth_attachment) {
const auto* vk_depth_image = static_cast<const VulkanImage*>(*depth_attachment);
attachment_views.push_back(vk_depth_image->image_view);
}
vk::FramebufferCreateInfo framebuffer_create_info = {};
framebuffer_create_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
framebuffer_create_info.renderPass = vk_renderpass->pass;
framebuffer_create_info.attachmentCount = static_cast<uint32_t>(attachment_views.size());
framebuffer_create_info.pAttachments = attachment_views.data();
framebuffer_create_info.width = framebuffer_size.x;
framebuffer_create_info.height = framebuffer_size.y;
framebuffer_create_info.layers = 1;
auto* framebuffer = allocator.create<VulkanFramebuffer>();
framebuffer->size = framebuffer_size;
framebuffer->num_attachments = static_cast<uint32_t>(attachment_views.size());
const vk::AllocationCallbacks& vk_alloc = wrap_allocator(allocator);
NOVA_CHECK_RESULT(vkCreateFramebuffer(device, &framebuffer_create_info, &vk_alloc, &framebuffer->framebuffer));
return framebuffer;
}
std::unique_ptr<RhiPipeline> VulkanRenderDevice::create_surface_pipeline(const RhiGraphicsPipelineState& pipeline_state,
rx::memory::allocator& allocator) {
// For the Vulkan backend, creating a surface pipeline means creating a pipeline that uses the standard pipeline layout
auto pipeline = std::make_unique<VulkanPipeline>(&allocator);
pipeline->name = pipeline_state.name;
pipeline->layout.layout = standard_pipeline_layout;
pipeline->layout.descriptor_set_layouts = {&allocator, std::array{standard_set_layout}};
pipeline->layout.variable_descriptor_set_counts = {&allocator, std::array{MAX_NUM_TEXTURES}};
pipeline->layout.bindings = standard_layout_bindings;
pipeline->state = pipeline_state;
return pipeline;
}
std::unique_ptr<RhiPipeline> VulkanRenderDevice::create_global_pipeline(const RhiGraphicsPipelineState& pipeline_state,
rx::memory::allocator& allocator) {
// For the Vulkan backend, creating a global pipeline means creating a pipeline with a bespoke pipeline layout
const auto& layout = create_pipeline_layout(pipeline_state);
auto pipeline = std::make_unique<VulkanPipeline>(&allocator);
pipeline->name = pipeline_state.name;
pipeline->layout = layout;
pipeline->state = pipeline_state;
return pipeline;
}
std::unique_ptr<RhiResourceBinder> VulkanRenderDevice::create_resource_binder_for_pipeline(const RhiPipeline& pipeline,
rx::memory::allocator& allocator) {
const auto& vk_pipeline = static_cast<const VulkanPipeline&>(pipeline);
auto descriptors = create_descriptors(vk_pipeline.layout.descriptor_set_layouts,
vk_pipeline.layout.variable_descriptor_set_counts,
allocator);
return std::make_unique<VulkanResourceBinder>(&allocator,
*this,
vk_pipeline.layout.bindings,
descriptors,
vk_pipeline.layout.layout,
allocator);
}
std::optional<vk::DescriptorPool> VulkanRenderDevice::create_descriptor_pool(
const std::unordered_map<DescriptorType, uint32_t>& descriptor_capacity, rx::memory::allocator& allocator) {
ZoneScoped;
std::vector<vk::DescriptorPoolSize> pool_sizes{&internal_allocator};
uint32_t max_sets = 0;
descriptor_capacity.each_pair([&](const DescriptorType& type, const uint32_t count) {
pool_sizes.emplace_back(vk::DescriptorPoolSize{to_vk_descriptor_type(type), count});
max_sets += count;
});
const auto pool_create_info = vk::DescriptorPoolCreateInfo()
.setFlags(vk::DescriptorPoolCreateFlagBits::eUpdateAfterBind)
.setMaxSets(max_sets)
.setPoolSizeCount(static_cast<uint32_t>(pool_sizes.size()))
.setPPoolSizes(pool_sizes.data());
vk::DescriptorPool pool;
const auto& vk_alloc = wrap_allocator(allocator);
device.createDescriptorPool(&pool_create_info, &vk_alloc, &pool);
return pool;
}
vk::DescriptorSet VulkanRenderDevice::get_next_standard_descriptor_set() {
if(standard_descriptor_sets.is_empty()) {
const auto variable_set_counts = std::array{MAX_NUM_TEXTURES};
const auto count_allocate_info = vk::DescriptorSetVariableDescriptorCountAllocateInfo()
.setPDescriptorCounts(variable_set_counts.data())
.setDescriptorSetCount(static_cast<uint32_t>(variable_set_counts.size()));
const auto allocate_info = vk::DescriptorSetAllocateInfo()
.setDescriptorPool(standard_descriptor_set_pool)
.setDescriptorSetCount(1)
.setPSetLayouts(&standard_set_layout)
.setPNext(&count_allocate_info);
vk::DescriptorSet set;
device.allocateDescriptorSets(&allocate_info, &set);
if(settings->debug.enabled) {
vk::DebugUtilsObjectNameInfoEXT object_name = {};
object_name.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT;
object_name.objectType = VK_OBJECT_TYPE_DESCRIPTOR_SET;
object_name.objectHandle = reinterpret_cast<uint64_t>(static_cast<vk::DescriptorSet>(set));
object_name.pObjectName = "Standard descriptor set";
NOVA_CHECK_RESULT(vkSetDebugUtilsObjectNameEXT(device, &object_name));
}
return set;
} else {
const auto set = standard_descriptor_sets.last();
standard_descriptor_sets.pop_back();
return set;
}
}
void VulkanRenderDevice::return_standard_descriptor_sets(const std::vector<vk::DescriptorSet>& sets) {
standard_descriptor_sets += sets;
}
std::vector<vk::DescriptorSet> VulkanRenderDevice::create_descriptors(
const std::vector<vk::DescriptorSetLayout>& descriptor_set_layouts,
const std::vector<uint32_t>& variable_descriptor_max_counts,
rx::memory::allocator& allocator) const {
RX_ASSERT(descriptor_set_layouts.size() == variable_descriptor_max_counts.size(),
"Descriptor set layous and varaible descriptor counts must be the same size");
const auto variable_descriptor_counts = vk::DescriptorSetVariableDescriptorCountAllocateInfo()
.setDescriptorSetCount(static_cast<uint32_t>(variable_descriptor_max_counts.size()))
.setPDescriptorCounts(variable_descriptor_max_counts.data());
const auto allocate_info = vk::DescriptorSetAllocateInfo()
.setDescriptorSetCount(static_cast<uint32_t>(descriptor_set_layouts.size()))
.setPSetLayouts(descriptor_set_layouts.data())
.setDescriptorPool(standard_descriptor_set_pool)
.setPNext(&variable_descriptor_counts);
std::vector<vk::DescriptorSet> sets{&allocator, descriptor_set_layouts.size()};
device.allocateDescriptorSets(&allocate_info, sets.data());
return sets;
}
vk::Fence VulkanRenderDevice::get_next_submission_fence() {
if(submission_fences.is_empty()) {
const auto fence_create_info = vk::FenceCreateInfo();
vk::Fence fence;
device.createFence(&fence_create_info, &vk_internal_allocator, &fence);
return fence;
} else {
const auto fence = submission_fences.last();
submission_fences.pop_back();
return fence;
}
}
ntl::Result<vk::Pipeline> VulkanRenderDevice::compile_pipeline_state(const VulkanPipeline& pipeline_state,
const VulkanRenderpass& renderpass,
rx::memory::allocator& allocator) {
ZoneScoped;
const auto& state = pipeline_state.state;
logger->debug("Creating a vk::Pipeline for pipeline %s", state.name);
std::vector<vk::PipelineShaderStageCreateInfo> shader_stages{&internal_allocator};
std::unordered_map<vk::ShaderStageFlags, vk::ShaderModule> shader_modules{&internal_allocator};
logger->debug("Compiling vertex module");
const auto vertex_module = create_shader_module(state.vertex_shader.source);
if(vertex_module) {
shader_modules.insert(VK_SHADER_STAGE_VERTEX_BIT, *vertex_module);
} else {
return ntl::Result<vk::Pipeline>{ntl::NovaError("Could not create vertex module")};
}
if(state.geometry_shader) {
logger->debug("Compiling geometry module");
const auto geometry_module = create_shader_module(state.geometry_shader->source);
if(geometry_module) {
shader_modules.insert(VK_SHADER_STAGE_GEOMETRY_BIT, *geometry_module);
} else {
return ntl::Result<vk::Pipeline>{ntl::NovaError("Could not geometry module")};
}
}
if(state.pixel_shader) {
logger->debug("Compiling fragment module");
const auto fragment_module = create_shader_module(state.pixel_shader->source);
if(fragment_module) {
shader_modules.insert(VK_SHADER_STAGE_FRAGMENT_BIT, *fragment_module);
} else {
return ntl::Result<vk::Pipeline>{ntl::NovaError("Could not pixel module")};
}
} // namespace nova::renderer::rhi
shader_modules.each_pair([&](const vk::ShaderStageFlags stage, const vk::ShaderModule shader_module) {
vk::PipelineShaderStageCreateInfo shader_stage_create_info;
shader_stage_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
shader_stage_create_info.pNext = nullptr;
shader_stage_create_info.flags = 0;
shader_stage_create_info.stage = static_cast<vk::ShaderStageFlagBits>(stage);
shader_stage_create_info.module = shader_module;
shader_stage_create_info.pName = "main";
shader_stage_create_info.pSpecializationInfo = nullptr;
shader_stages.push_back(shader_stage_create_info);
});
const auto& [vertex_attribute_descriptions, vertex_binding_descriptions] = get_input_assembler_setup(state.vertex_fields);
vk::PipelineVertexInputStateCreateInfo vertex_input_state_create_info;
vertex_input_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
vertex_input_state_create_info.pNext = nullptr;
vertex_input_state_create_info.flags = 0;
vertex_input_state_create_info.vertexBindingDescriptionCount = static_cast<uint32_t>(vertex_binding_descriptions.size());
vertex_input_state_create_info.pVertexBindingDescriptions = vertex_binding_descriptions.data();
vertex_input_state_create_info.vertexAttributeDescriptionCount = static_cast<uint32_t>(vertex_attribute_descriptions.size());
vertex_input_state_create_info.pVertexAttributeDescriptions = vertex_attribute_descriptions.data();
vk::PipelineInputAssemblyStateCreateInfo input_assembly_create_info;
input_assembly_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
input_assembly_create_info.pNext = nullptr;
input_assembly_create_info.flags = 0;
input_assembly_create_info.primitiveRestartEnable = VK_FALSE;
switch(state.topology) {
case PrimitiveTopology::TriangleList:
input_assembly_create_info.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
break;
case PrimitiveTopology::LineList:
input_assembly_create_info.topology = VK_PRIMITIVE_TOPOLOGY_LINE_LIST;
break;
case PrimitiveTopology::PointList:
input_assembly_create_info.topology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
break;
}
vk::Viewport viewport;
viewport.x = 0;
viewport.y = 0;
viewport.width = static_cast<float>(renderpass.render_area.extent.width);
viewport.height = static_cast<float>(renderpass.render_area.extent.height);
viewport.minDepth = 0.0F;
viewport.maxDepth = 1.0F;
vk::Rect2D scissor;
scissor.offset = {0, 0};
scissor.extent = {static_cast<uint32_t>(state.viewport_size.x), static_cast<uint32_t>(state.viewport_size.y)};
vk::PipelineViewportStateCreateInfo viewport_state_create_info;
viewport_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
viewport_state_create_info.pNext = nullptr;
viewport_state_create_info.flags = 0;
viewport_state_create_info.viewportCount = 1;
viewport_state_create_info.pViewports = &viewport;
viewport_state_create_info.scissorCount = 1;
viewport_state_create_info.pScissors = &scissor;
vk::PipelineRasterizationStateCreateInfo rasterizer_create_info;
rasterizer_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rasterizer_create_info.pNext = nullptr;
rasterizer_create_info.flags = 0;
rasterizer_create_info.depthClampEnable = VK_FALSE;
rasterizer_create_info.rasterizerDiscardEnable = VK_FALSE;
rasterizer_create_info.polygonMode = VK_POLYGON_MODE_FILL;
rasterizer_create_info.lineWidth = 1.0F;
rasterizer_create_info.cullMode = VK_CULL_MODE_BACK_BIT;
rasterizer_create_info.frontFace = VK_FRONT_FACE_CLOCKWISE;
rasterizer_create_info.depthClampEnable = VK_FALSE;
rasterizer_create_info.depthBiasConstantFactor = state.rasterizer_state.depth_bias;
rasterizer_create_info.depthBiasSlopeFactor = state.rasterizer_state.slope_scaled_depth_bias;
rasterizer_create_info.depthBiasClamp = state.rasterizer_state.maximum_depth_bias;
if(rasterizer_create_info.depthBiasConstantFactor != 0 || rasterizer_create_info.depthBiasSlopeFactor != 0) {
rasterizer_create_info.depthBiasEnable = VK_TRUE;
} else {
rasterizer_create_info.depthBiasEnable = VK_FALSE;
}
vk::PipelineMultisampleStateCreateInfo multisample_create_info;
multisample_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
multisample_create_info.pNext = nullptr;
multisample_create_info.flags = 0;
multisample_create_info.sampleShadingEnable = VK_FALSE;
multisample_create_info.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
multisample_create_info.minSampleShading = 1.0F;
multisample_create_info.pSampleMask = nullptr;
multisample_create_info.alphaToCoverageEnable = VK_FALSE;
multisample_create_info.alphaToOneEnable = VK_FALSE;
vk::PipelineDepthStencilStateCreateInfo depth_stencil_create_info = {};
depth_stencil_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
if(state.depth_state) {
const auto& depth_state = *state.depth_state;
depth_stencil_create_info.depthTestEnable = VK_TRUE;
depth_stencil_create_info.depthWriteEnable = static_cast<vk::Bool32>(depth_state.enable_depth_write);
depth_stencil_create_info.depthCompareOp = to_compare_op(depth_state.compare_op);
if(depth_state.bounds_test_state) {
depth_stencil_create_info.depthBoundsTestEnable = VK_TRUE;
if(depth_state.bounds_test_state->mode == DepthBoundsTestMode::Static) {
depth_stencil_create_info.minDepthBounds = depth_state.bounds_test_state->static_state.min_bound;
depth_stencil_create_info.maxDepthBounds = depth_state.bounds_test_state->static_state.max_bound;
}
}
}
if(state.stencil_state) {
const auto stencil_state = *state.stencil_state;
depth_stencil_create_info.stencilTestEnable = VK_TRUE;
depth_stencil_create_info.front.failOp = to_stencil_op(stencil_state.front_face_op.fail_op);
depth_stencil_create_info.front.passOp = to_stencil_op(stencil_state.front_face_op.pass_op);
depth_stencil_create_info.front.depthFailOp = to_stencil_op(stencil_state.front_face_op.depth_fail_op);
depth_stencil_create_info.front.compareOp = to_compare_op(stencil_state.front_face_op.compare_op);
depth_stencil_create_info.front.compareMask = stencil_state.front_face_op.compare_mask;
depth_stencil_create_info.front.writeMask = stencil_state.front_face_op.write_mask;
depth_stencil_create_info.back.failOp = to_stencil_op(stencil_state.back_face_op.fail_op);
depth_stencil_create_info.back.passOp = to_stencil_op(stencil_state.back_face_op.pass_op);
depth_stencil_create_info.back.depthFailOp = to_stencil_op(stencil_state.back_face_op.depth_fail_op);
depth_stencil_create_info.back.compareOp = to_compare_op(stencil_state.back_face_op.compare_op);
depth_stencil_create_info.back.compareMask = stencil_state.back_face_op.compare_mask;
depth_stencil_create_info.back.writeMask = stencil_state.back_face_op.write_mask;
}
vk::PipelineColorBlendStateCreateInfo color_blend_create_info{};
color_blend_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
color_blend_create_info.pNext = nullptr;
color_blend_create_info.flags = 0;
color_blend_create_info.logicOpEnable = VK_FALSE;
color_blend_create_info.logicOp = VK_LOGIC_OP_COPY;
std::vector<vk::PipelineColorBlendAttachmentState> attachment_states{&allocator};
if(state.blend_state) {
const auto& blend_state = *state.blend_state;
attachment_states.reserve(blend_state.render_target_states.size());
blend_state.render_target_states.each_fwd([&](const RenderTargetBlendState& render_target_blend) {
vk::PipelineColorBlendAttachmentState color_blend_attachment;
color_blend_attachment.colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT |
VK_COLOR_COMPONENT_A_BIT;
color_blend_attachment.blendEnable = render_target_blend.enable ? VK_TRUE : VK_FALSE;
color_blend_attachment.srcColorBlendFactor = to_blend_factor(render_target_blend.src_color_factor);
color_blend_attachment.dstColorBlendFactor = to_blend_factor(render_target_blend.dst_color_factor);
color_blend_attachment.colorBlendOp = to_blend_op(render_target_blend.color_op);
color_blend_attachment.srcAlphaBlendFactor = to_blend_factor(render_target_blend.src_alpha_factor);
color_blend_attachment.dstAlphaBlendFactor = to_blend_factor(render_target_blend.dst_alpha_factor);
color_blend_attachment.alphaBlendOp = to_blend_op(render_target_blend.alpha_op);
attachment_states.emplace_back(color_blend_attachment);
});
color_blend_create_info.attachmentCount = static_cast<uint32_t>(attachment_states.size());
color_blend_create_info.pAttachments = attachment_states.data();
color_blend_create_info.blendConstants[0] = blend_state.blend_constants.r;
color_blend_create_info.blendConstants[1] = blend_state.blend_constants.g;
color_blend_create_info.blendConstants[2] = blend_state.blend_constants.b;
color_blend_create_info.blendConstants[3] = blend_state.blend_constants.a;
} else {
attachment_states.reserve(state.color_attachments.size());
state.color_attachments.each_fwd([&](const renderpack::TextureAttachmentInfo& /* attachment_info */) {
vk::PipelineColorBlendAttachmentState color_blend_attachment{};
color_blend_attachment.colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT |
VK_COLOR_COMPONENT_A_BIT;
color_blend_attachment.blendEnable = VK_FALSE;
attachment_states.emplace_back(color_blend_attachment);
});
color_blend_create_info.attachmentCount = static_cast<uint32_t>(attachment_states.size());
color_blend_create_info.pAttachments = attachment_states.data();
}
std::vector<vk::DynamicState> dynamic_states;
if(state.enable_scissor_test) {
dynamic_states.emplace_back(VK_DYNAMIC_STATE_SCISSOR);
}
vk::PipelineDynamicStateCreateInfo dynamic_state_create_info = {};
dynamic_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
dynamic_state_create_info.dynamicStateCount = static_cast<uint32_t>(dynamic_states.size());
dynamic_state_create_info.pDynamicStates = dynamic_states.data();
vk::GraphicsPipelineCreateInfo pipeline_create_info = {};
pipeline_create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
pipeline_create_info.pNext = nullptr;
pipeline_create_info.flags = 0;
pipeline_create_info.stageCount = static_cast<uint32_t>(shader_stages.size());
pipeline_create_info.pStages = shader_stages.data();
pipeline_create_info.pVertexInputState = &vertex_input_state_create_info;
pipeline_create_info.pInputAssemblyState = &input_assembly_create_info;
pipeline_create_info.pViewportState = &viewport_state_create_info;
pipeline_create_info.pRasterizationState = &rasterizer_create_info;
pipeline_create_info.pMultisampleState = &multisample_create_info;
pipeline_create_info.pDepthStencilState = &depth_stencil_create_info;
pipeline_create_info.pColorBlendState = &color_blend_create_info;
pipeline_create_info.pDynamicState = &dynamic_state_create_info;
pipeline_create_info.layout = pipeline_state.layout.layout;
pipeline_create_info.renderPass = renderpass.pass;
pipeline_create_info.subpass = 0;
pipeline_create_info.basePipelineIndex = -1;
const vk::AllocationCallbacks& vk_alloc = wrap_allocator(allocator);
vk::Pipeline pipeline;
const auto result = vkCreateGraphicsPipelines(device,
nullptr,
1,
&pipeline_create_info,
&vk_alloc,
reinterpret_cast<vk::Pipeline*>(&pipeline));
if(result != VK_SUCCESS) {
return ntl::Result<vk::Pipeline>{MAKE_ERROR("Could not compile pipeline %s", state.name)};
}
if(settings.settings.debug.enabled) {
vk::DebugUtilsObjectNameInfoEXT object_name = {};
object_name.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT;
object_name.objectType = VK_OBJECT_TYPE_PIPELINE;
object_name.objectHandle = reinterpret_cast<uint64_t>(static_cast<vk::Pipeline>(pipeline));
object_name.pObjectName = state.name.data();
NOVA_CHECK_RESULT(vkSetDebugUtilsObjectNameEXT(device, &object_name));
}
return ntl::Result{pipeline};
}
RhiBuffer* VulkanRenderDevice::create_buffer(const RhiBufferCreateInfo& info, rx::memory::allocator& allocator) {
ZoneScoped;
auto* buffer = allocator.create<VulkanBuffer>();
vk::BufferCreateInfo vk_create_info = {};
vk_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
vk_create_info.size = info.size.b_count();
vk_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VmaAllocationCreateInfo vma_alloc{};
switch(info.buffer_usage) {
case BufferUsage::UniformBuffer: {
if(info.size < gpu.props.limits.maxUniformBufferRange) {
vk_create_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
} else {
vk_create_info.usage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
}
vma_alloc.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT;
vma_alloc.usage = VMA_MEMORY_USAGE_CPU_TO_GPU;
} break;
case BufferUsage::IndexBuffer: {
vk_create_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
vma_alloc.usage = VMA_MEMORY_USAGE_GPU_ONLY;
} break;
case BufferUsage::VertexBuffer: {
vk_create_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
vma_alloc.usage = VMA_MEMORY_USAGE_GPU_ONLY;
} break;
case BufferUsage::StagingBuffer: {
vk_create_info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
vma_alloc.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT;
vma_alloc.usage = VMA_MEMORY_USAGE_CPU_ONLY;
} break;
}
const auto result = vmaCreateBuffer(vma,
&vk_create_info,
&vma_alloc,
&buffer->buffer,
&buffer->allocation,
&buffer->allocation_info);
if(result == VK_SUCCESS) {
buffer->size = info.size;
if(settings->debug.enabled) {
vk::DebugUtilsObjectNameInfoEXT object_name = {};
object_name.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT;
object_name.objectType = VK_OBJECT_TYPE_BUFFER;
object_name.objectHandle = reinterpret_cast<uint64_t>(buffer->buffer);
object_name.pObjectName = info.name.data();
NOVA_CHECK_RESULT(vkSetDebugUtilsObjectNameEXT(device, &object_name));
}
return buffer;
} else {
logger->error("Could not create buffer %s: %s", info.name, to_string(result));
return nullptr;
}
}
void VulkanRenderDevice::write_data_to_buffer(const void* data, const Bytes num_bytes, const RhiBuffer* buffer) {
ZoneScoped;
const auto* vulkan_buffer = static_cast<const VulkanBuffer*>(buffer);
memcpy(vulkan_buffer->allocation_info.pMappedData, data, num_bytes.b_count());
}
RhiSampler* VulkanRenderDevice::create_sampler(const RhiSamplerCreateInfo& create_info, rx::memory::allocator& allocator) {
ZoneScoped;
auto* sampler = allocator.create<VulkanSampler>();
vk::SamplerCreateInfo vk_create_info = {};
vk_create_info.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
vk_create_info.minFilter = to_vk_filter(create_info.min_filter);
vk_create_info.magFilter = to_vk_filter(create_info.mag_filter);
vk_create_info.addressModeU = to_vk_address_mode(create_info.x_wrap_mode);
vk_create_info.addressModeV = to_vk_address_mode(create_info.y_wrap_mode);
vk_create_info.addressModeW = to_vk_address_mode(create_info.z_wrap_mode);
vk_create_info.mipLodBias = create_info.mip_bias;
vk_create_info.anisotropyEnable = create_info.enable_anisotropy ? VK_TRUE : VK_FALSE;
vk_create_info.maxAnisotropy = create_info.max_anisotropy;
vk_create_info.minLod = create_info.min_lod;
vk_create_info.maxLod = create_info.max_lod;
const vk::AllocationCallbacks& alloc_calls = wrap_allocator(allocator);
vkCreateSampler(device, &vk_create_info, &alloc_calls, &sampler->sampler);
return sampler;
}
RhiImage* VulkanRenderDevice::create_image(const renderpack::TextureCreateInfo& info, rx::memory::allocator& allocator) {
ZoneScoped;
auto* image = allocator.create<VulkanImage>();
image->is_dynamic = true;
image->type = ResourceType::Image;
const vk::Format format = to_vk_format(info.format.pixel_format);
// In Nova, images all have a dedicated allocation
// This may or may not change depending on performance data, but given Nova's atlas-centric design I don't think it'll change much
const auto image_pixel_size = info.format.get_size_in_pixels(swapchain_size);
VmaAllocationCreateInfo vma_info = {};
vma_info.usage = VMA_MEMORY_USAGE_GPU_ONLY;
vk::ImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = format;
image_create_info.extent.width = image_pixel_size.x;
image_create_info.extent.height = image_pixel_size.y;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT;
if(format == VK_FORMAT_D24_UNORM_S8_UINT || format == VK_FORMAT_D32_SFLOAT) {
image->is_depth_tex = true;
}
if(info.usage == renderpack::ImageUsage::SampledImage) {
image_create_info.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
} else {
// If the image isn't a sampled image, it's a render target
// Render targets get dedicated allocations
if(format == VK_FORMAT_D24_UNORM_S8_UINT || format == VK_FORMAT_D32_SFLOAT) {
image_create_info.usage |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
} else {
image_create_info.usage |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
}
vma_info.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
}
image_create_info.queueFamilyIndexCount = 1;
image_create_info.pQueueFamilyIndices = &graphics_family_index;
image_create_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
const auto result = vmaCreateImage(vma, &image_create_info, &vma_info, &image->image, &image->allocation, nullptr);
if(result == VK_SUCCESS) {
if(settings->debug.enabled) {
vk::DebugUtilsObjectNameInfoEXT object_name = {};
object_name.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT;
object_name.objectType = VK_OBJECT_TYPE_IMAGE;
object_name.objectHandle = reinterpret_cast<uint64_t>(image->image);
object_name.pObjectName = info.name.data();
NOVA_CHECK_RESULT(vkSetDebugUtilsObjectNameEXT(device, &object_name));
}
vk::ImageViewCreateInfo image_view_create_info = {};
image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
image_view_create_info.image = image->image;
image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D;
image_view_create_info.format = image_create_info.format;
if(format == VK_FORMAT_D24_UNORM_S8_UINT || format == VK_FORMAT_D32_SFLOAT) {
image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
} else {
image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
}
image_view_create_info.subresourceRange.baseArrayLayer = 0;
image_view_create_info.subresourceRange.layerCount = 1;
image_view_create_info.subresourceRange.baseMipLevel = 0;
image_view_create_info.subresourceRange.levelCount = 1;
const vk::AllocationCallbacks& vk_alloc = wrap_allocator(allocator);
vkCreateImageView(device, &image_view_create_info, &vk_alloc, &image->image_view);
return image;
} else {
logger->error("Could not create image %s: %s", info.name, to_string(result));
return nullptr;
}
}
RhiSemaphore* VulkanRenderDevice::create_semaphore(rx::memory::allocator& allocator) {
ZoneScoped;
auto* semaphore = allocator.create<VulkanSemaphore>();
vk::SemaphoreCreateInfo create_info = {};
create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
const vk::AllocationCallbacks& vk_alloc = wrap_allocator(allocator);
vkCreateSemaphore(device, &create_info, &vk_alloc, &semaphore->semaphore);
return semaphore;
}
std::vector<RhiSemaphore*> VulkanRenderDevice::create_semaphores(const uint32_t num_semaphores, rx::memory::allocator& allocator) {
ZoneScoped;
auto semaphores = std::vector<RhiSemaphore*>{&allocator};
semaphores.reserve(num_semaphores);
for(uint32_t i = 0; i < num_semaphores; i++) {
semaphores.emplace_back(create_semaphore(allocator));
}
return semaphores;
}
RhiFence* VulkanRenderDevice::create_fence(const bool signaled, rx::memory::allocator& allocator) {
ZoneScoped;
auto* fence = allocator.create<VulkanFence>();
vk::FenceCreateInfo fence_create_info = {};
fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
if(signaled) {
fence_create_info.flags = VK_FENCE_CREATE_SIGNALED_BIT;
}
const vk::AllocationCallbacks& vk_alloc = wrap_allocator(allocator);
vkCreateFence(device, &fence_create_info, &vk_alloc, &fence->fence);
return fence;
}
std::vector<RhiFence*> VulkanRenderDevice::create_fences(const uint32_t num_fences,
const bool signaled,
rx::memory::allocator& allocator) {
ZoneScoped;
std::vector<RhiFence*> fences{&allocator};
fences.reserve(num_fences);
vk::FenceCreateInfo fence_create_info = {};
fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
if(signaled) {
fence_create_info.flags = VK_FENCE_CREATE_SIGNALED_BIT;
}
for(uint32_t i = 0; i < num_fences; i++) {
auto* fence = allocator.create<VulkanFence>();
const vk::AllocationCallbacks& vk_alloc = wrap_allocator(allocator);
vkCreateFence(device, &fence_create_info, &vk_alloc, &fence->fence);
fences.push_back(fence);
}
return fences;
}
void VulkanRenderDevice::wait_for_fences(const std::vector<RhiFence*> fences) {
ZoneScoped;
std::vector<vk::Fence> vk_fences{&internal_allocator};
vk_fences.reserve(fences.size());
fences.each_fwd([&](const RhiFence* fence) {
const auto* vk_fence = static_cast<const VulkanFence*>(fence);
vk_fences.push_back(vk_fence->fence);
});
const auto result = vkWaitForFences(device,
static_cast<uint32_t>(vk_fences.size()),
vk_fences.data(),
VK_TRUE,
std::numeric_limits<uint64_t>::max());
if(settings->debug.enabled) {
if(result != VK_SUCCESS) {
logger->error("Could not wait for fences. %s (error code %x)", to_string(result), result);
BREAK_ON_DEVICE_LOST(result);
}
}
}
void VulkanRenderDevice::reset_fences(const std::vector<RhiFence*>& fences) {
ZoneScoped;
std::vector<vk::Fence> vk_fences{&internal_allocator};
vk_fences.reserve(fences.size());
fences.each_fwd([&](const RhiFence* fence) {
const auto* vk_fence = static_cast<const VulkanFence*>(fence);
vk_fences.push_back(vk_fence->fence);
});
vkResetFences(device, static_cast<uint32_t>(fences.size()), vk_fences.data());
}
void VulkanRenderDevice::destroy_renderpass(RhiRenderpass* pass, rx::memory::allocator& allocator) {
ZoneScoped;
auto* vk_renderpass = static_cast<VulkanRenderpass*>(pass);
vkDestroyRenderPass(device, vk_renderpass->pass, nullptr);
allocator.deallocate(reinterpret_cast<uint8_t*>(pass));
}
void VulkanRenderDevice::destroy_framebuffer(RhiFramebuffer* framebuffer, rx::memory::allocator& allocator) {
ZoneScoped;
const auto* vk_framebuffer = static_cast<const VulkanFramebuffer*>(framebuffer);
vkDestroyFramebuffer(device, vk_framebuffer->framebuffer, nullptr);
allocator.deallocate(reinterpret_cast<uint8_t*>(framebuffer));
}
void VulkanRenderDevice::destroy_texture(RhiImage* resource, rx::memory::allocator& allocator) {
ZoneScoped;
auto* vk_image = static_cast<VulkanImage*>(resource);
vmaDestroyImage(vma, vk_image->image, vk_image->allocation);
allocator.deallocate(reinterpret_cast<uint8_t*>(resource));
}
void VulkanRenderDevice::destroy_semaphores(std::vector<RhiSemaphore*>& semaphores, rx::memory::allocator& allocator) {
ZoneScoped;
semaphores.each_fwd([&](RhiSemaphore* semaphore) {
auto* vk_semaphore = static_cast<VulkanSemaphore*>(semaphore);
vkDestroySemaphore(device, vk_semaphore->semaphore, nullptr);
allocator.deallocate(reinterpret_cast<uint8_t*>(semaphore));
});
}
void VulkanRenderDevice::destroy_fences(const std::vector<RhiFence*>& fences, rx::memory::allocator& allocator) {
ZoneScoped;
fences.each_fwd([&](RhiFence* fence) {
auto* vk_fence = static_cast<VulkanFence*>(fence);
const vk::AllocationCallbacks& vk_alloc = wrap_allocator(allocator);
vkDestroyFence(device, vk_fence->fence, &vk_alloc);
allocator.deallocate(reinterpret_cast<uint8_t*>(fence));
});
}
RhiRenderCommandList* VulkanRenderDevice::create_command_list(const uint32_t thread_idx,
const QueueType needed_queue_type,
const RhiRenderCommandList::Level level,
rx::memory::allocator& allocator) {
ZoneScoped;
const uint32_t queue_family_index = get_queue_family_index(needed_queue_type);
const vk::CommandPool pool = *command_pools_by_thread_idx[thread_idx].find(queue_family_index);
vk::CommandBufferAllocateInfo create_info = {};
create_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
create_info.commandPool = pool;
create_info.level = to_vk_command_buffer_level(level);
create_info.commandBufferCount = 1;
vk::CommandBuffer new_buffer;
vkAllocateCommandBuffers(device, &create_info, &new_buffer);
auto* list = allocator.create<VulkanRenderCommandList>(new_buffer, *this, allocator);
return list;
}
void VulkanRenderDevice::submit_command_list(RhiRenderCommandList* cmds,
const QueueType queue,
RhiFence* fence_to_signal,
const std::vector<RhiSemaphore*>& wait_semaphores,
const std::vector<RhiSemaphore*>& signal_semaphores) {
ZoneScoped;
auto* vk_list = static_cast<VulkanRenderCommandList*>(cmds);
vkEndCommandBuffer(vk_list->cmds);
vk::Queue queue_to_submit_to;
switch(queue) {
case QueueType::Graphics:
queue_to_submit_to = graphics_queue;
break;
case QueueType::Transfer:
queue_to_submit_to = copy_queue;
break;
case QueueType::AsyncCompute:
queue_to_submit_to = compute_queue;
break;
default:
queue_to_submit_to = graphics_queue;
}
std::vector<vk::Semaphore> vk_wait_semaphores{&internal_allocator};
vk_wait_semaphores.reserve(wait_semaphores.size());
wait_semaphores.each_fwd([&](const RhiSemaphore* semaphore) {
const auto* vk_semaphore = static_cast<const VulkanSemaphore*>(semaphore);
vk_wait_semaphores.push_back(vk_semaphore->semaphore);
});
std::vector<vk::Semaphore> vk_signal_semaphores{&internal_allocator};
vk_signal_semaphores.reserve(signal_semaphores.size());
signal_semaphores.each_fwd([&](const RhiSemaphore* semaphore) {
const auto* vk_semaphore = static_cast<const VulkanSemaphore*>(semaphore);
vk_signal_semaphores.push_back(vk_semaphore->semaphore);
});
vk::SubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.waitSemaphoreCount = static_cast<uint32_t>(vk_wait_semaphores.size());
submit_info.pWaitSemaphores = vk_wait_semaphores.data();
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &vk_list->cmds;
submit_info.signalSemaphoreCount = static_cast<uint32_t>(vk_signal_semaphores.size());
submit_info.pSignalSemaphores = vk_signal_semaphores.data();
const auto vk_signal_fence = [&]() -> vk::Fence {
if(fence_to_signal) {
return static_cast<const VulkanFence*>(fence_to_signal)->fence;
} else {
return get_next_submission_fence();
}
}();
const auto result = vkQueueSubmit(queue_to_submit_to, 1, &submit_info, vk_signal_fence);
fenced_tasks.emplace_back(vk_signal_fence, [&] {
vk_list->cleanup_resources();
submission_fences.emplace_back(vk_signal_fence);
});
if(settings->debug.enabled) {
if(result != VK_SUCCESS) {
logger->error("Could not submit command list: %s", to_string(result));
BREAK_ON_DEVICE_LOST(result);
}
}
}
void VulkanRenderDevice::end_frame(FrameContext& /* ctx */) {
ZoneScoped; // Intentionally copying the vector
auto cur_tasks = fenced_tasks;
// Clear out the list of tasks. We've copied the tasks to the new vector so it's fine, and we'll add back in the tasks that aren't
// ready to run
fenced_tasks.clear();
cur_tasks.each_fwd([&](const FencedTask& task) {
if(device.getFenceStatus(task.fence) == vk::Result::eSuccess) {
task();
} else {
fenced_tasks.push_back(task);
}
});
}
uint32_t VulkanRenderDevice::get_queue_family_index(const QueueType type) const {
switch(type) {
case QueueType::Graphics:
return graphics_family_index;
case QueueType::Transfer:
return transfer_family_index;
case QueueType::AsyncCompute:
return compute_family_index;
default:
RX_ASSERT(false, "Unknown queue type %u", static_cast<uint32_t>(type));
return 9999; // I have to return _something_ or Visual Studio gets mad
}
}
VulkanPipelineLayoutInfo VulkanRenderDevice::create_pipeline_layout(const RhiGraphicsPipelineState& state) {
const auto bindings = get_all_descriptors(state);
const auto ds_layouts = create_descriptor_set_layouts(bindings, *this, internal_allocator);
const auto pipeline_layout_create = vk::PipelineLayoutCreateInfo()
.setSetLayoutCount(static_cast<uint32_t>(ds_layouts.size()))
.setPSetLayouts(ds_layouts.data())
.setPushConstantRangeCount(static_cast<uint32_t>(standard_push_constants.size()))
.setPPushConstantRanges(standard_push_constants.data()); // TODO: Get this from reflection
vk::PipelineLayout layout;
device.createPipelineLayout(&pipeline_layout_create, &vk_internal_allocator, &layout);
std::vector<uint32_t> variable_descriptor_counts{&internal_allocator, ds_layouts.size()};
bindings.each_value([&](const RhiResourceBindingDescription& binding_desc) {
if(binding_desc.is_unbounded && binding_desc.count > variable_descriptor_counts[binding_desc.set]) {
variable_descriptor_counts[binding_desc.set] = binding_desc.count;
}
});
return {bindings, ds_layouts, layout, variable_descriptor_counts};
}
void VulkanRenderDevice::create_surface() {
ZoneScoped;
#ifdef NOVA_LINUX vk::XlibSurfaceCreateInfoKHR x_surface_create_info;
x_surface_create_info.sType = VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR;
x_surface_create_info.pNext = nullptr;
x_surface_create_info.flags = 0;
x_surface_create_info.dpy = window.get_display();
x_surface_create_info.window = window.get_window_handle();
const vk::AllocationCallbacks& vk_alloc = wrap_allocator(internal_allocator);
NOVA_CHECK_RESULT(vkCreateXlibSurfaceKHR(instance, &x_surface_create_info, &vk_alloc, &surface));
#elif defined(NOVA_WINDOWS)
vk::Win32SurfaceCreateInfoKHR win32_surface_create = {};
win32_surface_create.sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR;
win32_surface_create.hwnd = window.get_window_handle();
const vk::AllocationCallbacks& vk_alloc = wrap_allocator(internal_allocator);
NOVA_CHECK_RESULT(vkCreateWin32SurfaceKHR(instance, &win32_surface_create, &vk_alloc, &surface));
#else
#error Unsuported window system
#endif
}
void VulkanRenderDevice::create_instance() {
ZoneScoped;
const auto& version = settings.settings.vulkan.application_version;
vk::ApplicationInfo application_info;
application_info.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
application_info.pNext = nullptr;
application_info.pApplicationName = settings.settings.vulkan.application_name;
application_info.applicationVersion = VK_MAKE_VERSION(version.major, version.minor, version.patch);
application_info.pEngineName = "Nova Renderer 0.9";
application_info.apiVersion = VK_API_VERSION_1_2;
vk::InstanceCreateInfo create_info;
create_info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
create_info.pNext = nullptr;
create_info.flags = 0;
create_info.pApplicationInfo = &application_info;
if(settings.settings.debug.enabled && settings.settings.debug.enable_validation_layers) {
enabled_layer_names.push_back("VK_LAYER_LUNARG_standard_validation");
}
create_info.enabledLayerCount = static_cast<uint32_t>(enabled_layer_names.size());
create_info.ppEnabledLayerNames = enabled_layer_names.data();
std::vector<const char*> enabled_extension_names{&internal_allocator};
enabled_extension_names.push_back(VK_KHR_SURFACE_EXTENSION_NAME);
enabled_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
#ifdef NOVA_LINUX
enabled_extension_names.push_back(VK_KHR_XLIB_SURFACE_EXTENSION_NAME);
#elif defined(NOVA_WINDOWS)
enabled_extension_names.push_back(VK_KHR_WIN32_SURFACE_EXTENSION_NAME);
#else
#error Unsupported Operating system
#endif
std::vector<vk::ValidationFeatureEnableEXT> enabled_validation_features;
if(settings.settings.debug.enabled) {
enabled_extension_names.push_back(VK_EXT_DEBUG_REPORT_EXTENSION_NAME);
enabled_extension_names.push_back(VK_EXT_DEBUG_UTILS_EXTENSION_NAME);
enabled_validation_features.push_back(VK_VALIDATION_FEATURE_ENABLE_BEST_PRACTICES_EXT);
if(settings.settings.debug.enable_gpu_based_validation) {
enabled_validation_features.push_back(VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT);
enabled_validation_features.push_back(VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_RESERVE_BINDING_SLOT_EXT);
}
}
create_info.enabledExtensionCount = static_cast<uint32_t>(enabled_extension_names.size());
create_info.ppEnabledExtensionNames = enabled_extension_names.data();
vk::ValidationFeaturesEXT validation_features = {};
validation_features.sType = VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT;
validation_features.enabledValidationFeatureCount = static_cast<uint32_t>(enabled_validation_features.size());
validation_features.pEnabledValidationFeatures = enabled_validation_features.data();
create_info.pNext = &validation_features;
const vk::AllocationCallbacks& vk_alloc = wrap_allocator(internal_allocator);
{
ZoneScoped;
NOVA_CHECK_RESULT(vkCreateInstance(&create_info, &vk_alloc, &instance));
}
}
void VulkanRenderDevice::enable_debug_output() {
ZoneScoped;
vkCreateDebugUtilsMessengerEXT = reinterpret_cast<PFN_vkCreateDebugUtilsMessengerEXT>(
vkGetInstanceProcAddr(instance, "vkCreateDebugUtilsMessengerEXT"));
vkDestroyDebugReportCallbackEXT = reinterpret_cast<PFN_vkDestroyDebugReportCallbackEXT>(
vkGetInstanceProcAddr(instance, "vkDestroyDebugReportCallbackEXT"));
vk::DebugUtilsMessengerCreateInfoEXT debug_create_info = {};
debug_create_info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT;
debug_create_info.pNext = nullptr;
debug_create_info.messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT |
VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT;
debug_create_info.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT |
VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT;
debug_create_info.pfnUserCallback = reinterpret_cast<PFN_vkDebugUtilsMessengerCallbackEXT>(&debug_report_callback);
debug_create_info.pUserData = this;
const vk::AllocationCallbacks& vk_alloc = wrap_allocator(internal_allocator);
NOVA_CHECK_RESULT(vkCreateDebugUtilsMessengerEXT(instance, &debug_create_info, &vk_alloc, &debug_callback));
}
void VulkanRenderDevice::save_device_info() {
ZoneScoped;
switch(gpu.props.vendorID) {
case AMD_PCI_VENDOR_ID:
info.architecture = DeviceArchitecture::amd;
break;
case INTEL_PCI_VENDOR_ID:
info.architecture = DeviceArchitecture::intel;
break;
case NVIDIA_PCI_VENDOR_ID:
info.architecture = DeviceArchitecture::nvidia;
break;
default:
info.architecture = DeviceArchitecture::unknown;
}
vk_info.max_uniform_buffer_size = gpu.props.limits.maxUniformBufferRange;
info.max_texture_size = gpu.props.limits.maxImageDimension2D;
// TODO: Something smarter when Intel releases discreet GPUS
// TODO: Handle integrated AMD GPUs
info.is_uma = info.architecture == DeviceArchitecture::intel;
uint32_t extension_count;
vkEnumerateDeviceExtensionProperties(gpu.phys_device, nullptr, &extension_count, nullptr);
std::vector<vk::ExtensionProperties> available_extensions{&internal_allocator, extension_count};
vkEnumerateDeviceExtensionProperties(gpu.phys_device, nullptr, &extension_count, available_extensions.data());
const auto extension_name_matcher = [](const char* ext_name) {
return [=](const vk::ExtensionProperties& ext_props) -> bool { return strcmp(ext_name, ext_props.extensionName) == 0; };
};
// TODO: Update as more GPUs support hardware raytracing
info.supports_raytracing = available_extensions.find_if(extension_name_matcher(VK_NV_RAY_TRACING_EXTENSION_NAME)) !=
std::vector<vk::ExtensionProperties>::k_npos;
// TODO: Update as more GPUs support mesh shaders
info.supports_mesh_shaders = available_extensions.find_if(extension_name_matcher(VK_NV_MESH_SHADER_EXTENSION_NAME));
}
void VulkanRenderDevice::initialize_vma() {
ZoneScoped;
vk::AllocationCallbacks callbacks = vk_internal_allocator;
VmaAllocatorCreateInfo create_info{};
create_info.flags = VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT;
create_info.physicalDevice = gpu.phys_device;
create_info.device = device;
create_info.pAllocationCallbacks = &callbacks;
create_info.instance = instance;
const auto result = vmaCreateAllocator(&create_info, &vma);
if(result != VK_SUCCESS) {
logger->error("Could not initialize VMA: %s", to_string(result));
}
}
void VulkanRenderDevice::create_device_and_queues() {
ZoneScoped;
std::vector<char*> device_extensions{&internal_allocator};
device_extensions.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME);
device_extensions.push_back(VK_EXT_MEMORY_BUDGET_EXTENSION_NAME);
uint32_t device_count;
NOVA_CHECK_RESULT(vkEnumeratePhysicalDevices(instance, &device_count, nullptr));
auto physical_devices = std::vector<vk::PhysicalDevice>{&internal_allocator, device_count};
NOVA_CHECK_RESULT(vkEnumeratePhysicalDevices(instance, &device_count, physical_devices.data()));
uint32_t graphics_family_idx = 0xFFFFFFFF;
uint32_t compute_family_idx = 0xFFFFFFFF;
uint32_t copy_family_idx = 0xFFFFFFFF;
{
ZoneScoped;
for(uint32_t device_idx = 0; device_idx < device_count; device_idx++) {
graphics_family_idx = 0xFFFFFFFF;
vk::PhysicalDevice current_device = physical_devices[device_idx];
vkGetPhysicalDeviceProperties(current_device, &gpu.props);
const bool is_intel_gpu = gpu.props.vendorID == INTEL_PCI_VENDOR_ID;
const bool more_gpus_available = device_count - 1 > device_idx;
if(is_intel_gpu && more_gpus_available) {
// Intel GPU _probably_ isn't as powerful as a discreet GPU, and if there's more than one GPU then the other one(s) are
// _probably_ discreet GPUs, so let's not use the Intel GPU and instead use the discreet GPU
// TODO: Make a local device for the integrated GPU when we figure out multi-GPU
// TODO: Rework this code when Intel releases discreet GPUs
continue;
}
const auto supports_extensions = does_device_support_extensions(current_device, device_extensions);
if(!supports_extensions) {
continue;
}
uint32_t queue_family_count;
vkGetPhysicalDeviceQueueFamilyProperties(current_device, &queue_family_count, nullptr);
gpu.queue_family_props.resize(queue_family_count);
vkGetPhysicalDeviceQueueFamilyProperties(current_device, &queue_family_count, gpu.queue_family_props.data());
for(uint32_t queue_idx = 0; queue_idx < queue_family_count; queue_idx++) {
const vk::QueueFamilyProperties current_properties = gpu.queue_family_props[queue_idx];
if(current_properties.queueCount < 1) {
continue;
}
vk::Bool32 supports_present = VK_FALSE;
NOVA_CHECK_RESULT(vkGetPhysicalDeviceSurfaceSupportKHR(current_device, queue_idx, surface, &supports_present));
const vk::QueueFlags supports_graphics = current_properties.queueFlags & VK_QUEUE_GRAPHICS_BIT;
if((supports_graphics != 0U) && supports_present == VK_TRUE && graphics_family_idx == 0xFFFFFFFF) {
graphics_family_idx = queue_idx;
}
const vk::QueueFlags supports_compute = current_properties.queueFlags & VK_QUEUE_COMPUTE_BIT;
if((supports_compute != 0U) && compute_family_idx == 0xFFFFFFFF) {
compute_family_idx = queue_idx;
}
const vk::QueueFlags supports_copy = current_properties.queueFlags & VK_QUEUE_TRANSFER_BIT;
if((supports_copy != 0U) && copy_family_idx == 0xFFFFFFFF) {
copy_family_idx = queue_idx;
}
}
if(graphics_family_idx != 0xFFFFFFFF) {
logger->info("Selected GPU %s", gpu.props.deviceName);
gpu.phys_device = current_device;
break;
}
}
}
if(gpu.phys_device == nullptr) {
logger->error("Failed to find good GPU");
// TODO: Message the user that GPU selection failed
return;
}
PROFILE_VOID_EXPR(vkGetPhysicalDeviceFeatures(gpu.phys_device, &gpu.supported_features),
VulkanRenderDevice,
vkGetPhysicalDeviceFeatures);
PROFILE_VOID_EXPR(vkGetPhysicalDeviceMemoryProperties(gpu.phys_device, &gpu.memory_properties),
VulkanRenderDevice,
vkGetPhysicalDeviceMemoryProperties);
const float priority = 1.0;
vk::DeviceQueueCreateInfo graphics_queue_create_info{};
graphics_queue_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
graphics_queue_create_info.pNext = nullptr;
graphics_queue_create_info.flags = 0;
graphics_queue_create_info.queueCount = 1;
graphics_queue_create_info.queueFamilyIndex = graphics_family_idx;
graphics_queue_create_info.pQueuePriorities = &priority;
std::vector<vk::DeviceQueueCreateInfo> queue_create_infos{&internal_allocator};
queue_create_infos.push_back(graphics_queue_create_info);
vk::PhysicalDeviceFeatures physical_device_features{};
physical_device_features.geometryShader = VK_TRUE;
physical_device_features.tessellationShader = VK_TRUE;
physical_device_features.samplerAnisotropy = VK_TRUE;
physical_device_features.shaderSampledImageArrayDynamicIndexing = VK_TRUE;
if(settings->debug.enable_gpu_based_validation) {
physical_device_features.fragmentStoresAndAtomics = VK_TRUE;
physical_device_features.vertexPipelineStoresAndAtomics = VK_TRUE;
}
vk::DeviceCreateInfo device_create_info{};
device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_create_info.pNext = nullptr;
device_create_info.flags = 0;
device_create_info.queueCreateInfoCount = static_cast<uint32_t>(queue_create_infos.size());
device_create_info.pQueueCreateInfos = queue_create_infos.data();
device_create_info.pEnabledFeatures = &physical_device_features;
device_create_info.enabledExtensionCount = static_cast<uint32_t>(device_extensions.size());
device_create_info.ppEnabledExtensionNames = device_extensions.data();
device_create_info.enabledLayerCount = static_cast<uint32_t>(enabled_layer_names.size());
if(!enabled_layer_names.is_empty()) {
device_create_info.ppEnabledLayerNames = enabled_layer_names.data();
}
// Set up descriptor indexing
// Currently Nova only cares about indexing for texture descriptors
vk::PhysicalDeviceDescriptorIndexingFeatures descriptor_indexing_features = {};
descriptor_indexing_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES;
descriptor_indexing_features.shaderSampledImageArrayNonUniformIndexing = VK_TRUE;
descriptor_indexing_features.runtimeDescriptorArray = true;
descriptor_indexing_features.descriptorBindingVariableDescriptorCount = VK_TRUE;
descriptor_indexing_features.descriptorBindingPartiallyBound = VK_TRUE;
descriptor_indexing_features.descriptorBindingSampledImageUpdateAfterBind = VK_TRUE;
device_create_info.pNext = &descriptor_indexing_features;
const auto dev_12_features = vk::PhysicalDeviceVulkan12Features()
.setDescriptorIndexing(true)
.setShaderSampledImageArrayNonUniformIndexing(true)
.setRuntimeDescriptorArray(true)
.setDescriptorBindingVariableDescriptorCount(true)
.setDescriptorBindingPartiallyBound(true)
.setDescriptorBindingSampledImageUpdateAfterBind(true);
device_create_info.pNext = &dev_12_features;
const vk::AllocationCallbacks& vk_alloc = wrap_allocator(internal_allocator);
vk::Device vk_device;
const auto res = PROFILE_RET_EXPR(vkCreateDevice(gpu.phys_device, &device_create_info, &vk_alloc, &vk_device),
VulkanRenderEngine,
vkCreateDevice);
if(res != VK_SUCCESS) {
// logger();
}
device = vk_device;
graphics_family_index = graphics_family_idx;
vkGetDeviceQueue(device, graphics_family_idx, 0, &graphics_queue);
compute_family_index = compute_family_idx;
vkGetDeviceQueue(device, compute_family_idx, 0, &compute_queue);
transfer_family_index = copy_family_idx;
vkGetDeviceQueue(device, copy_family_idx, 0, ©_queue);
}
bool VulkanRenderDevice::does_device_support_extensions(vk::PhysicalDevice device, const std::vector<char*>& required_device_extensions) {
uint32_t extension_count;
vkEnumerateDeviceExtensionProperties(device, nullptr, &extension_count, nullptr);
std::vector<vk::ExtensionProperties> available(extension_count);
vkEnumerateDeviceExtensionProperties(device, nullptr, &extension_count, available.data());
rx::set<std::string> required{&internal_allocator};
required_device_extensions.each_fwd([&](const char* extension) { required.insert(extension); });
available.each_fwd(
[&](const vk::ExtensionProperties& extension) { required.erase(static_cast<const char*>(extension.extensionName)); });
if(!required.is_empty()) {
std::stringstream ss;
required.each([&](const std::string& extension) { ss << extension.data() << ", "; });
logger->warning("Device does not support these required extensions: %s", ss.str().c_str());
}
const auto device_supports_required_extensions = required.is_empty();
return device_supports_required_extensions;
}
void VulkanRenderDevice::create_swapchain() {
ZoneScoped; // Check what formats our rendering supports, and create a swapchain with one of those formats
vkGetPhysicalDeviceSurfaceCapabilitiesKHR(gpu.phys_device, surface, &gpu.surface_capabilities);
uint32_t num_surface_formats;
vkGetPhysicalDeviceSurfaceFormatsKHR(gpu.phys_device, surface, &num_surface_formats, nullptr);
gpu.surface_formats.resize(num_surface_formats);
vkGetPhysicalDeviceSurfaceFormatsKHR(gpu.phys_device, surface, &num_surface_formats, gpu.surface_formats.data());
uint32_t num_surface_present_modes;
vkGetPhysicalDeviceSurfacePresentModesKHR(gpu.phys_device, surface, &num_surface_present_modes, nullptr);
std::vector<vk::PresentModeKHR> present_modes{&internal_allocator, num_surface_present_modes};
vkGetPhysicalDeviceSurfacePresentModesKHR(gpu.phys_device, surface, &num_surface_present_modes, present_modes.data());
swapchain = internal_allocator.create<VulkanSwapchain>(settings->max_in_flight_frames,
this,
window.get_framebuffer_size(),
present_modes);
swapchain_size = window.get_framebuffer_size();
}
void VulkanRenderDevice::create_per_thread_command_pools() {
ZoneScoped;
const uint32_t num_threads = 1; // TODO: Make this real
command_pools_by_thread_idx.reserve(num_threads);
for(uint32_t i = 0; i < num_threads; i++) {
command_pools_by_thread_idx.push_back(make_new_command_pools());
}
}
void VulkanRenderDevice::create_standard_pipeline_layout() {
standard_push_constants = std::array{
// Camera and Material index
vk::PushConstantRange().setStageFlags(vk::ShaderStageFlagBits::eAll).setOffset(0).setSize(sizeof(uint32_t) * 2)};
const auto flags_per_binding = std::array{vk::DescriptorBindingFlags{},
vk::DescriptorBindingFlags{},
vk::DescriptorBindingFlags{},
vk::DescriptorBindingFlags{},
vk::DescriptorBindingFlags{},
vk::DescriptorBindingFlagBits::eUpdateAfterBind |
vk::DescriptorBindingFlagBits::eVariableDescriptorCount |
vk::DescriptorBindingFlagBits::ePartiallyBound};
const auto set_flags = vk::DescriptorSetLayoutBindingFlagsCreateInfo()
.setBindingCount(static_cast<uint32_t>(flags_per_binding.size()))
.setPBindingFlags(flags_per_binding.data());
const auto camera_buffer_descriptor_type = (MAX_NUM_CAMERAS * sizeof(CameraUboData)) < gpu.props.limits.maxUniformBufferRange ?
vk::DescriptorType::eUniformBuffer :
vk::DescriptorType::eStorageBuffer;
const auto material_buffer_descriptor_type = MATERIAL_BUFFER_SIZE.b_count() < gpu.props.limits.maxUniformBufferRange ?
vk::DescriptorType::eUniformBuffer :
vk::DescriptorType::eStorageBuffer;
// Binding for the array of material parameter buffers. Nova uses a variable-length, partially-bound
const std::vector<vk::DescriptorSetLayoutBinding> bindings = std::array{// Camera data buffer
vk::DescriptorSetLayoutBinding()
.setBinding(0)
.setDescriptorType(camera_buffer_descriptor_type)
.setDescriptorCount(1)
.setStageFlags(vk::ShaderStageFlagBits::eAll),
// Material data buffer
vk::DescriptorSetLayoutBinding()
.setBinding(1)
.setDescriptorType(material_buffer_descriptor_type)
.setDescriptorCount(1)
.setStageFlags(vk::ShaderStageFlagBits::eAll),
// Point sampler
vk::DescriptorSetLayoutBinding()
.setBinding(2)
.setDescriptorType(vk::DescriptorType::eSampler)
.setDescriptorCount(1)
.setStageFlags(vk::ShaderStageFlagBits::eAll),
// Bilinear sampler
vk::DescriptorSetLayoutBinding()
.setBinding(3)
.setDescriptorType(vk::DescriptorType::eSampler)
.setDescriptorCount(1)
.setStageFlags(vk::ShaderStageFlagBits::eAll),
// Trilinear sampler
vk::DescriptorSetLayoutBinding()
.setBinding(4)
.setDescriptorType(vk::DescriptorType::eSampler)
.setDescriptorCount(1)
.setStageFlags(vk::ShaderStageFlagBits::eAll),
// Textures array
vk::DescriptorSetLayoutBinding()
.setBinding(5)
.setDescriptorType(vk::DescriptorType::eSampledImage)
.setDescriptorCount(MAX_NUM_TEXTURES)
.setStageFlags(vk::ShaderStageFlagBits::eAll)};
const auto dsl_layout_create = vk::DescriptorSetLayoutCreateInfo()
.setFlags(vk::DescriptorSetLayoutCreateFlagBits::eUpdateAfterBindPool)
.setBindingCount(static_cast<uint32_t>(bindings.size()))
.setPBindings(bindings.data())
.setPNext(&set_flags);
device.createDescriptorSetLayout(&dsl_layout_create, &vk_internal_allocator, &standard_set_layout);
const auto pipeline_layout_create = vk::PipelineLayoutCreateInfo()
.setSetLayoutCount(1)
.setPSetLayouts(&standard_set_layout)
.setPushConstantRangeCount(static_cast<uint32_t>(standard_push_constants.size()))
.setPPushConstantRanges(standard_push_constants.data());
device.createPipelineLayout(&pipeline_layout_create, &vk_internal_allocator, &standard_pipeline_layout);
const auto& pool = create_descriptor_pool(std::array{std::pair{DescriptorType::StorageBuffer, 5_u32 * 1024},
std::pair{DescriptorType::UniformBuffer, 5_u32 * 1024},
std::pair{DescriptorType::Texture, MAX_NUM_TEXTURES * 1024},
std::pair{DescriptorType::Sampler, 3_u32 * 1024}},
internal_allocator);
standard_descriptor_set_pool = *pool;
if(settings->debug.enabled) {
vk::DebugUtilsObjectNameInfoEXT pipeline_layout_name = {};
pipeline_layout_name.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT;
pipeline_layout_name.objectType = VK_OBJECT_TYPE_PIPELINE_LAYOUT;
pipeline_layout_name.objectHandle = reinterpret_cast<uint64_t>(static_cast<vk::PipelineLayout>(standard_pipeline_layout));
pipeline_layout_name.pObjectName = "Standard Pipeline Layout";
NOVA_CHECK_RESULT(vkSetDebugUtilsObjectNameEXT(device, &pipeline_layout_name));
vk::DebugUtilsObjectNameInfoEXT descriptor_pool_name = {};
descriptor_pool_name.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT;
descriptor_pool_name.objectType = VK_OBJECT_TYPE_DESCRIPTOR_POOL;
descriptor_pool_name.objectHandle = reinterpret_cast<uint64_t>(static_cast<vk::DescriptorPool>(standard_descriptor_set_pool));
descriptor_pool_name.pObjectName = "Standard Descriptor Set Pool";
NOVA_CHECK_RESULT(vkSetDebugUtilsObjectNameEXT(device, &descriptor_pool_name));
vk::DebugUtilsObjectNameInfoEXT descriptor_set_layout_name = {};
descriptor_set_layout_name.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT;
descriptor_set_layout_name.objectType = VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT;
descriptor_set_layout_name.objectHandle = reinterpret_cast<uint64_t>(static_cast<vk::DescriptorSetLayout>(standard_set_layout));
descriptor_set_layout_name.pObjectName = "Standard descriptor set layout";
NOVA_CHECK_RESULT(vkSetDebugUtilsObjectNameEXT(device, &descriptor_set_layout_name));
}
}
std::unordered_map<uint32_t, vk::CommandPool> VulkanRenderDevice::make_new_command_pools() const {
ZoneScoped;
std::vector<uint32_t> queue_indices{&internal_allocator};
queue_indices.push_back(graphics_family_index);
queue_indices.push_back(transfer_family_index);
queue_indices.push_back(compute_family_index);
std::unordered_map<uint32_t, vk::CommandPool> pools_by_queue{&internal_allocator};
queue_indices.each_fwd([&](const uint32_t queue_index) {
vk::CommandPoolCreateInfo command_pool_create_info;
command_pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
command_pool_create_info.pNext = nullptr;
command_pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
command_pool_create_info.queueFamilyIndex = queue_index;
const vk::AllocationCallbacks& vk_alloc = wrap_allocator(internal_allocator);
vk::CommandPool command_pool;
NOVA_CHECK_RESULT(vkCreateCommandPool(device, &command_pool_create_info, &vk_alloc, &command_pool));
pools_by_queue.insert(queue_index, command_pool);
});
return pools_by_queue;
}
uint32_t VulkanRenderDevice::find_memory_type_with_flags(const uint32_t search_flags, const MemorySearchMode search_mode) const {
for(uint32_t i = 0; i < gpu.memory_properties.memoryTypeCount; i++) {
const vk::MemoryType& memory_type = gpu.memory_properties.memoryTypes[i];
switch(search_mode) {
case MemorySearchMode::Exact:
if(memory_type.propertyFlags == search_flags) {
return i;
}
break;
case MemorySearchMode::Fuzzy:
if((memory_type.propertyFlags & search_flags) != 0) {
return i;
}
break;
}
}
return VK_MAX_MEMORY_TYPES;
}
vk::ImageView VulkanRenderDevice::image_view_for_image(const RhiImage* image) {
// TODO: This method is terrible. We shouldn't tie image views to images, we should let everything that wants
// to use the image create its own image view
const auto* vk_image = static_cast<const VulkanImage*>(image);
return vk_image->image_view;
}
vk::CommandBufferLevel VulkanRenderDevice::to_vk_command_buffer_level(const RhiRenderCommandList::Level level) {
switch(level) {
case RhiRenderCommandList::Level::Primary:
return VK_COMMAND_BUFFER_LEVEL_PRIMARY;
case RhiRenderCommandList::Level::Secondary:
return VK_COMMAND_BUFFER_LEVEL_SECONDARY;
}
return VK_COMMAND_BUFFER_LEVEL_PRIMARY;
}
VulkanInputAssemblerLayout VulkanRenderDevice::get_input_assembler_setup(const std::vector<RhiVertexField>& vertex_fields) {
std::vector<vk::VertexInputAttributeDescription> attributes;
std::vector<vk::VertexInputBindingDescription> bindings;
attributes.reserve(vertex_fields.size());
bindings.reserve(vertex_fields.size());
uint32_t vertex_size = 0;
vertex_fields.each_fwd([&](const RhiVertexField& field) { vertex_size += get_byte_size(field.format); });
uint32_t cur_binding = 0;
uint32_t byte_offset = 0;
vertex_fields.each_fwd([&](const RhiVertexField& field) {
const auto field_size = get_byte_size(field.format);
const auto attr_format = to_vk_vertex_format(field.format);
attributes.emplace_back(vk::VertexInputAttributeDescription{cur_binding, 0, attr_format, byte_offset});
bindings.emplace_back(vk::VertexInputBindingDescription{cur_binding, vertex_size, VK_VERTEX_INPUT_RATE_VERTEX});
cur_binding++;
byte_offset += field_size;
});
return {attributes, bindings};
}
std::optional<vk::ShaderModule> VulkanRenderDevice::create_shader_module(const std::vector<uint32_t>& spirv) const {
ZoneScoped;
vk::ShaderModuleCreateInfo shader_module_create_info = {};
shader_module_create_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
shader_module_create_info.pCode = spirv.data();
shader_module_create_info.codeSize = spirv.size() * 4;
vk::ShaderModule module;
const vk::AllocationCallbacks& vk_alloc = wrap_allocator(internal_allocator);
const auto result = vkCreateShaderModule(device, &shader_module_create_info, &vk_alloc, &module);
if(result == VK_SUCCESS) {
return std::optional<vk::ShaderModule>(module);
} else {
logger->error("Could not create shader module: %s", to_string(result));
return rx::nullopt;
}
}
VKAPI_ATTR vk::Bool32 VKAPI_CALL VulkanRenderDevice::debug_report_callback(const vk::DebugUtilsMessageSeverityFlagBitsEXT message_severity,
const vk::DebugUtilsMessageTypeFlagsEXT message_types,
const vk::DebugUtilsMessengerCallbackDataEXT* callback_data,
void* render_device) {
std::string type = "General";
if((message_types & VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT) != 0U) {
type = "Validation";
} else if((message_types & VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT) != 0U) {
type = "Performance";
}
std::string queue_list;
if(callback_data->queueLabelCount != 0) {
queue_list.append(" Queues: ");
for(uint32_t i = 0; i < callback_data->queueLabelCount; i++) {
queue_list.append(callback_data->pQueueLabels[i].pLabelName);
if(i != callback_data->queueLabelCount - 1) {
queue_list.append(", ");
}
}
}
std::string command_buffer_list;
if(callback_data->cmdBufLabelCount != 0) {
command_buffer_list.append("Command Buffers: ");
for(uint32_t i = 0; i < callback_data->cmdBufLabelCount; i++) {
command_buffer_list.append(callback_data->pCmdBufLabels[i].pLabelName);
if(i != callback_data->cmdBufLabelCount - 1) {
command_buffer_list.append(", ");
}
}
}
std::string object_list;
if(callback_data->objectCount != 0) {
object_list.append("Objects: ");
for(uint32_t i = 0; i < callback_data->objectCount; i++) {
object_list.append(to_string(callback_data->pObjects[i].objectType));
if(callback_data->pObjects[i].pObjectName != nullptr) {
object_list.append(std::string::format(" \"%s\"", callback_data->pObjects[i].pObjectName));
}
object_list.append(std::string::format(" (%x)", callback_data->pObjects[i].objectHandle));
if(i != callback_data->objectCount - 1) {
object_list.append(", ");
}
}
}
std::string vk_message;
if(callback_data->pMessage != nullptr) {
vk_message.append(callback_data->pMessage);
}
const std::string msg = std::string::format("[%s] %s %s %s %s", type, queue_list, command_buffer_list, object_list, vk_message);
if((message_severity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) != 0) {
logger->error("%s", msg);
#ifdef NOVA_LINUX
nova_backtrace();
#endif
auto* vk_render_device = reinterpret_cast<VulkanRenderDevice*>(render_device);
if(vk_render_device->settings->debug.break_on_validation_errors) {
rx::abort("Validation error");
}
} else if((message_severity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT) != 0) {
// Warnings may hint at unexpected / non-spec API usage
logger->warning("%s", msg);
} else if(((message_severity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT) != 0) &&
((message_types & VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT) == 0U)) { // No validation info!
// Informal messages that may become handy during debugging
logger->info("%s", msg);
} else if((message_severity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT) != 0) {
// Diagnostic info from the Vulkan loader and layers
// Usually not helpful in terms of API usage, but may help to debug layer and loader problems
logger->debug("%s", msg);
} else if((message_types & VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT) == 0U) { // No validation info!
// Catch-all to be super sure
logger->info("%s", msg);
}
return VK_FALSE;
}
} // namespace nova::renderer::rhi
| 98,111
|
C++
|
.cpp
| 1,550
| 47.854839
| 236
| 0.612729
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| true
| true
| true
| false
| false
| true
| false
| false
|
753,189
|
vulkan_utils.cpp
|
NovaMods_nova-renderer/src/rhi/vulkan/vulkan_utils.cpp
|
#include "vulkan_utils.hpp"
#include <rx/core/algorithm/max.h>
#include <rx/core/log.h>
#include "nova_renderer/renderables.hpp"
#include "nova_renderer/rhi/pipeline_create_info.hpp"
#include "nova_renderer/rhi/render_device.hpp"
#include "vulkan_render_device.hpp"
namespace nova::renderer::rhi {
RX_LOG("VulkanUtil", logger);
vk::ImageLayout to_vk_image_layout(const ResourceState layout) {
switch(layout) {
case ResourceState::Common:
return VK_IMAGE_LAYOUT_GENERAL;
case ResourceState::CopySource:
return VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
case ResourceState::CopyDestination:
return VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
case ResourceState::ShaderRead:
return VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
case ResourceState::ShaderWrite:
return VK_IMAGE_LAYOUT_GENERAL; // TODO: Reevaluate this because it can't be optimal
case ResourceState::RenderTarget:
return VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
case ResourceState::DepthWrite:
return VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
case ResourceState::DepthRead:
return VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL;
case ResourceState::PresentSource:
return VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
case ResourceState::Undefined:
return VK_IMAGE_LAYOUT_UNDEFINED;
default:
logger->error("%u is not a valid image state", static_cast<uint32_t>(layout));
return VK_IMAGE_LAYOUT_GENERAL;
}
}
vk::AccessFlags to_vk_access_flags(const ResourceAccess access) {
switch(access) {
case ResourceAccess::IndirectCommandRead:
return VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
case ResourceAccess::IndexRead:
return VK_ACCESS_INDEX_READ_BIT;
case ResourceAccess::VertexAttributeRead:
return VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
case ResourceAccess::UniformRead:
return VK_ACCESS_UNIFORM_READ_BIT;
case ResourceAccess::InputAttachmentRead:
return VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
case ResourceAccess::ShaderRead:
return VK_ACCESS_SHADER_READ_BIT;
case ResourceAccess::ShaderWrite:
return VK_ACCESS_SHADER_WRITE_BIT;
case ResourceAccess::ColorAttachmentRead:
return VK_ACCESS_COLOR_ATTACHMENT_READ_BIT;
case ResourceAccess::ColorAttachmentWrite:
return VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
case ResourceAccess::DepthStencilAttachmentRead:
return VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
case ResourceAccess::DepthStencilAttachmentWrite:
return VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
case ResourceAccess::CopyRead:
return VK_ACCESS_TRANSFER_READ_BIT;
case ResourceAccess::CopyWrite:
return VK_ACCESS_TRANSFER_WRITE_BIT;
case ResourceAccess::HostRead:
return VK_ACCESS_HOST_READ_BIT;
case ResourceAccess::HostWrite:
return VK_ACCESS_HOST_WRITE_BIT;
case ResourceAccess::MemoryRead:
return VK_ACCESS_MEMORY_READ_BIT;
case ResourceAccess::MemoryWrite:
return VK_ACCESS_MEMORY_WRITE_BIT;
case ResourceAccess::ShadingRateImageRead:
return VK_ACCESS_SHADING_RATE_IMAGE_READ_BIT_NV;
case ResourceAccess::AccelerationStructureRead:
return VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NV;
case ResourceAccess::AccelerationStructureWrite:
return VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NV;
case ResourceAccess::FragmentDensityMapRead:
return VK_ACCESS_FRAGMENT_DENSITY_MAP_READ_BIT_EXT;
}
return {};
}
vk::PrimitiveTopology to_primitive_topology(const renderpack::RPPrimitiveTopology topology) {
switch(topology) {
case renderpack::RPPrimitiveTopology::Lines:
return VK_PRIMITIVE_TOPOLOGY_LINE_LIST;
case renderpack::RPPrimitiveTopology::Triangles:
default: // else the compiler complains
return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
}
}
vk::BlendFactor to_blend_factor(const BlendFactor factor) {
switch(factor) {
case BlendFactor::DstAlpha:
return VK_BLEND_FACTOR_DST_ALPHA;
case BlendFactor::DstColor:
return VK_BLEND_FACTOR_DST_COLOR;
case BlendFactor::One:
return VK_BLEND_FACTOR_ONE;
case BlendFactor::OneMinusDstAlpha:
return VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
case BlendFactor::OneMinusDstColor:
return VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR;
case BlendFactor::OneMinusSrcAlpha:
return VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
case BlendFactor::OneMinusSrcColor:
return VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR;
case BlendFactor::SrcAlpha:
return VK_BLEND_FACTOR_SRC_ALPHA;
case BlendFactor::SrcColor:
return VK_BLEND_FACTOR_SRC_COLOR;
case BlendFactor::Zero:
return VK_BLEND_FACTOR_ZERO;
default:
return VK_BLEND_FACTOR_ZERO;
}
}
vk::BlendOp to_blend_op(const BlendOp blend_op) {
switch(blend_op) {
case BlendOp::Add:
return VK_BLEND_OP_ADD;
case BlendOp::Subtract:
return VK_BLEND_OP_SUBTRACT;
case BlendOp::ReverseSubtract:
return VK_BLEND_OP_REVERSE_SUBTRACT;
case BlendOp::Min:
return VK_BLEND_OP_MIN;
case BlendOp::Max:
return VK_BLEND_OP_MAX;
default:
return VK_BLEND_OP_ADD;
}
}
vk::CompareOp to_compare_op(const CompareOp compare_op) {
switch(compare_op) {
case CompareOp::Never:
return VK_COMPARE_OP_NEVER;
case CompareOp::Less:
return VK_COMPARE_OP_LESS;
case CompareOp::LessEqual:
return VK_COMPARE_OP_LESS_OR_EQUAL;
case CompareOp::Greater:
return VK_COMPARE_OP_GREATER;
case CompareOp::GreaterEqual:
return VK_COMPARE_OP_GREATER_OR_EQUAL;
case CompareOp::Equal:
return VK_COMPARE_OP_EQUAL;
case CompareOp::NotEqual:
return VK_COMPARE_OP_NOT_EQUAL;
case CompareOp::Always:
return VK_COMPARE_OP_ALWAYS;
default:
return VK_COMPARE_OP_NEVER;
}
}
vk::StencilOp to_stencil_op(const StencilOp stencil_op) {
switch(stencil_op) {
case StencilOp::Keep:
return VK_STENCIL_OP_KEEP;
case StencilOp::Zero:
return VK_STENCIL_OP_ZERO;
case StencilOp::Replace:
return VK_STENCIL_OP_REPLACE;
case StencilOp::Increment:
return VK_STENCIL_OP_INCREMENT_AND_CLAMP;
case StencilOp::IncrementAndWrap:
return VK_STENCIL_OP_INCREMENT_AND_WRAP;
case StencilOp::Decrement:
return VK_STENCIL_OP_DECREMENT_AND_CLAMP;
case StencilOp::DecrementAndWrap:
return VK_STENCIL_OP_DECREMENT_AND_WRAP;
case StencilOp::Invert:
return VK_STENCIL_OP_INVERT;
default:
return VK_STENCIL_OP_KEEP;
}
}
vk::Format to_vk_format(const PixelFormat format) {
switch(format) {
case PixelFormat::Rgba8:
return VK_FORMAT_R8G8B8A8_UNORM;
case PixelFormat::Rgba16F:
return VK_FORMAT_R16G16B16A16_SFLOAT;
case PixelFormat::Rgba32F:
return VK_FORMAT_R32G32B32A32_SFLOAT;
case PixelFormat::Depth32:
return VK_FORMAT_D32_SFLOAT;
case PixelFormat::Depth24Stencil8:
return VK_FORMAT_D24_UNORM_S8_UINT;
default:
logger->error("Unknown pixel format, returning RGBA8");
return VK_FORMAT_R8G8B8A8_UNORM;
}
}
vk::Filter to_vk_filter(const TextureFilter filter) {
switch(filter) {
case TextureFilter::Point:
return VK_FILTER_NEAREST;
case TextureFilter::Bilinear:
return VK_FILTER_LINEAR;
case TextureFilter::Trilinear:
return VK_FILTER_CUBIC_IMG;
default:
return VK_FILTER_NEAREST;
}
}
vk::SamplerAddressMode to_vk_address_mode(const TextureCoordWrapMode wrap_mode) {
switch(wrap_mode) {
case TextureCoordWrapMode::Repeat:
return VK_SAMPLER_ADDRESS_MODE_REPEAT;
case TextureCoordWrapMode::MirroredRepeat:
return VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT;
case TextureCoordWrapMode::ClampToEdge:
return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
case TextureCoordWrapMode::ClampToBorder:
return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
case TextureCoordWrapMode::MirrorClampToEdge:
return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
default:
return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
}
}
vk::DescriptorType to_vk_descriptor_type(const DescriptorType type) {
switch(type) {
case DescriptorType::CombinedImageSampler:
return vk::DescriptorType::eCombinedImageSampler;
case DescriptorType::UniformBuffer:
return vk::DescriptorType::eUniformBuffer;
case DescriptorType::StorageBuffer:
return vk::DescriptorType::eStorageBuffer;
case DescriptorType::Texture:
return vk::DescriptorType::eSampledImage;
case DescriptorType::Sampler:
return vk::DescriptorType::eSampler;
default:
return vk::DescriptorType::eUniformBuffer;
}
}
vk::ShaderStageFlags to_vk_shader_stage_flags(const ShaderStage flags) {
vk::ShaderStageFlags vk_flags{};
if(flags & ShaderStage::Vertex) {
vk_flags |= vk::ShaderStageFlagBits::eVertex;
}
if(flags & ShaderStage::TessellationControl) {
vk_flags |= vk::ShaderStageFlagBits::eTessellationControl;
}
if(flags & ShaderStage::TessellationEvaluation) {
vk_flags |= vk::ShaderStageFlagBits::eTessellationEvaluation;
}
if(flags & ShaderStage::Geometry) {
vk_flags |= vk::ShaderStageFlagBits::eGeometry;
}
if(flags & ShaderStage::Pixel) {
vk_flags |= vk::ShaderStageFlagBits::eFragment;
}
if(flags & ShaderStage::Compute) {
vk_flags |= vk::ShaderStageFlagBits::eCompute;
}
if(flags & ShaderStage::Raygen) {
vk_flags |= vk::ShaderStageFlagBits::eRaygenNV;
}
if(flags & ShaderStage::AnyHit) {
vk_flags |= vk::ShaderStageFlagBits::eAnyHitNV;
}
if(flags & ShaderStage::ClosestHit) {
vk_flags |= vk::ShaderStageFlagBits::eClosestHitNV;
}
if(flags & ShaderStage::Miss) {
vk_flags |= vk::ShaderStageFlagBits::eMissNV;
}
if(flags & ShaderStage::Intersection) {
vk_flags |= vk::ShaderStageFlagBits::eIntersectionNV;
}
if(flags & ShaderStage::Task) {
vk_flags |= vk::ShaderStageFlagBits::eTaskNV;
}
if(flags & ShaderStage::Mesh) {
vk_flags |= vk::ShaderStageFlagBits::eMeshNV;
}
return vk_flags;
}
std::string to_string(vk::Result result) {
switch(result) {
case VK_SUCCESS:
return "VK_SUCCESS";
case VK_NOT_READY:
return "VK_NOT_READY";
case VK_TIMEOUT:
return "VK_TIMEOUT";
case VK_EVENT_SET:
return "VK_EVENT_SET";
case VK_EVENT_RESET:
return "VK_EVENT_RESET";
case VK_INCOMPLETE:
return "VK_INCOMPLETE";
case VK_ERROR_OUT_OF_HOST_MEMORY:
return "VK_ERROR_OUT_OF_HOST_MEMORY";
case VK_ERROR_OUT_OF_DEVICE_MEMORY:
return "VK_ERROR_OUT_OF_DEVICE_MEMORY";
case VK_ERROR_INITIALIZATION_FAILED:
return "VK_ERROR_INITIALIZATION_FAILED";
case VK_ERROR_DEVICE_LOST:
return "VK_ERROR_DEVICE_LOST";
case VK_ERROR_MEMORY_MAP_FAILED:
return "VK_ERROR_MEMORY_MAP_FAILED";
case VK_ERROR_LAYER_NOT_PRESENT:
return "VK_ERROR_LAYER_NOT_PRESENT";
case VK_ERROR_EXTENSION_NOT_PRESENT:
return "VK_ERROR_EXTENSION_NOT_PRESENT";
case VK_ERROR_FEATURE_NOT_PRESENT:
return "VK_ERROR_FEATURE_NOT_PRESENT";
case VK_ERROR_INCOMPATIBLE_DRIVER:
return "VK_ERROR_INCOMPATIBLE_DRIVER";
case VK_ERROR_TOO_MANY_OBJECTS:
return "VK_ERROR_TOO_MANY_OBJECTS";
case VK_ERROR_FORMAT_NOT_SUPPORTED:
return "VK_ERROR_FORMAT_NOT_SUPPORTED";
case VK_ERROR_FRAGMENTED_POOL:
return "VK_ERROR_FRAGMENTED_POOL";
case VK_ERROR_OUT_OF_POOL_MEMORY:
return "VK_ERROR_OUT_OF_POOL_MEMORY";
case VK_ERROR_INVALID_EXTERNAL_HANDLE:
return "VK_ERROR_INVALID_EXTERNAL_HANDLE";
case VK_ERROR_SURFACE_LOST_KHR:
return "VK_ERROR_SURFACE_LOST_KHR";
case VK_ERROR_NATIVE_WINDOW_IN_USE_KHR:
return "VK_ERROR_NATIVE_WINDOW_IN_USE_KHR";
case VK_SUBOPTIMAL_KHR:
return "VK_SUBOPTIMAL_KHR";
case VK_ERROR_OUT_OF_DATE_KHR:
return "VK_ERROR_OUT_OF_DATE_KHR";
case VK_ERROR_INCOMPATIBLE_DISPLAY_KHR:
return "VK_ERROR_INCOMPATIBLE_DISPLAY_KHR";
case VK_ERROR_VALIDATION_FAILED_EXT:
return "VK_ERROR_VALIDATION_FAILED_EXT";
case VK_ERROR_INVALID_SHADER_NV:
return "VK_ERROR_INVALID_SHADER_NV";
case VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT:
return "VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT";
case VK_ERROR_FRAGMENTATION_EXT:
return "VK_ERROR_FRAGMENTATION_EXT";
case VK_ERROR_NOT_PERMITTED_EXT:
return "VK_ERROR_NOT_PERMITTED_EXT";
case VK_ERROR_INVALID_DEVICE_ADDRESS_EXT:
return "VK_ERROR_INVALID_DEVICE_ADDRESS_EXT";
case VK_RESULT_RANGE_SIZE:
return "VK_RESULT_RANGE_SIZE";
default:
return "Unknown result";
}
}
std::string to_string(vk::ObjectType obj_type) {
switch(obj_type) {
case VK_OBJECT_TYPE_UNKNOWN:
return "Unknown";
case VK_OBJECT_TYPE_INSTANCE:
return "Instance";
case VK_OBJECT_TYPE_PHYSICAL_DEVICE:
return "Physical Device";
case VK_OBJECT_TYPE_DEVICE:
return "Device";
case VK_OBJECT_TYPE_QUEUE:
return "Queue";
case VK_OBJECT_TYPE_SEMAPHORE:
return "Semaphore";
case VK_OBJECT_TYPE_COMMAND_BUFFER:
return "Command Buffer";
case VK_OBJECT_TYPE_FENCE:
return "Fence";
case VK_OBJECT_TYPE_DEVICE_MEMORY:
return "Device Memory";
case VK_OBJECT_TYPE_BUFFER:
return "Buffer";
case VK_OBJECT_TYPE_IMAGE:
return "Image ";
case VK_OBJECT_TYPE_EVENT:
return "Event";
case VK_OBJECT_TYPE_QUERY_POOL:
return "Query Pool";
case VK_OBJECT_TYPE_BUFFER_VIEW:
return "Buffer View";
case VK_OBJECT_TYPE_IMAGE_VIEW:
return "Image View";
case VK_OBJECT_TYPE_SHADER_MODULE:
return "Shader Module";
case VK_OBJECT_TYPE_PIPELINE_CACHE:
return "Pipeline Cache";
case VK_OBJECT_TYPE_PIPELINE_LAYOUT:
return "Pipeline Layout";
case VK_OBJECT_TYPE_RENDER_PASS:
return "Render Pass";
case VK_OBJECT_TYPE_PIPELINE:
return "Pipeline";
case VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT:
return "Descriptor Set Layout";
case VK_OBJECT_TYPE_SAMPLER:
return "Sampler";
case VK_OBJECT_TYPE_DESCRIPTOR_POOL:
return "Descriptor Pool";
case VK_OBJECT_TYPE_DESCRIPTOR_SET:
return "Descriptor Set";
case VK_OBJECT_TYPE_FRAMEBUFFER:
return "Framebuffer";
case VK_OBJECT_TYPE_COMMAND_POOL:
return "Command Pool";
case VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION:
return "YCBCR Conversion";
case VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE:
return "Descriptor Update Template";
case VK_OBJECT_TYPE_SURFACE_KHR:
return "Surface";
case VK_OBJECT_TYPE_SWAPCHAIN_KHR:
return "Swapchain";
case VK_OBJECT_TYPE_DISPLAY_KHR:
return "Display KHR";
case VK_OBJECT_TYPE_DISPLAY_MODE_KHR:
return "Display Mode KHR";
case VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT:
return "Debug Report Callback EXT";
case VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT:
return "Debug Utils Messenger EXT";
case VK_OBJECT_TYPE_VALIDATION_CACHE_EXT:
return "Validation Cache EXT";
case VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV:
return "Acceleration Structure NV";
default:
return "Unknown";
}
}
vk::Format to_vk_vertex_format(const VertexFieldFormat field) {
switch(field) {
case VertexFieldFormat::Uint:
return VK_FORMAT_R32_UINT;
case VertexFieldFormat::Float2:
return VK_FORMAT_R32G32_SFLOAT;
case VertexFieldFormat::Float3:
return VK_FORMAT_R32G32B32_SFLOAT;
case VertexFieldFormat::Float4:
return VK_FORMAT_R32G32_SFLOAT;
default:
return VK_FORMAT_R32G32B32_SFLOAT;
}
}
std::vector<vk::DescriptorSetLayout> create_descriptor_set_layouts(
const std::unordered_map<std::string, RhiResourceBindingDescription>& all_bindings,
VulkanRenderDevice& render_device,
rx::memory::allocator& allocator) {
const auto max_sets = render_device.gpu.props.limits.maxBoundDescriptorSets;
uint32_t num_sets = 0;
all_bindings.each_value([&](const RhiResourceBindingDescription& desc) {
if(desc.set >= max_sets) {
logger->error("Descriptor set %u is out of range - your GPU only supports %u sets!", desc.set, max_sets);
} else {
num_sets = rx::algorithm::max(num_sets, desc.set + 1);
}
});
std::vector<uint32_t> variable_descriptor_counts{&allocator};
variable_descriptor_counts.resize(num_sets, 0);
// Some precalculations so we know how much room we actually need
std::vector<uint32_t> num_bindings_per_set{&allocator};
num_bindings_per_set.resize(num_sets);
all_bindings.each_value([&](const RhiResourceBindingDescription& desc) {
num_bindings_per_set[desc.set] = rx::algorithm::max(num_bindings_per_set[desc.set], desc.binding + 1);
});
std::vector<std::vector<vk::DescriptorSetLayoutBinding>> bindings_by_set{&allocator, num_sets};
std::vector<std::vector<vk::DescriptorBindingFlags>> binding_flags_by_set{&allocator, num_sets};
all_bindings.each_value([&](const RhiResourceBindingDescription& binding) {
if(binding.set >= bindings_by_set.size()) {
logger->error("You've skipped one or more descriptor sets! Don't do that, Nova can't handle it");
return true;
}
const auto descriptor_binding = vk::DescriptorSetLayoutBinding()
.setBinding(binding.binding)
.setDescriptorType(to_vk_descriptor_type(binding.type))
.setDescriptorCount(binding.count)
.setStageFlags(to_vk_shader_stage_flags(binding.stages));
logger->debug("Descriptor %u.%u is type %s", binding.set, binding.binding, descriptor_type_to_string(binding.type));
if(binding.is_unbounded) {
binding_flags_by_set[binding.set].push_back(vk::DescriptorBindingFlagBits::eVariableDescriptorCount |
vk::DescriptorBindingFlagBits::ePartiallyBound);
// Record the maximum number of descriptors in the variable size array in this set
variable_descriptor_counts[binding.set] = binding.count;
logger->debug("Descriptor %u.%u is unbounded", binding.set, binding.binding);
} else {
binding_flags_by_set[binding.set].push_back({});
}
bindings_by_set[binding.set].push_back(descriptor_binding);
return true;
});
std::vector<vk::DescriptorSetLayoutCreateInfo> dsl_create_infos{&allocator};
dsl_create_infos.reserve(bindings_by_set.size());
std::vector<vk::DescriptorSetLayoutBindingFlagsCreateInfo> flag_infos{&allocator};
flag_infos.reserve(bindings_by_set.size());
// We may make bindings_by_set much larger than it needs to be is there's multiple descriptor bindings per set. Thus, only iterate
// through the sets we actually care about
bindings_by_set.each_fwd([&](const std::vector<vk::DescriptorSetLayoutBinding>& bindings) {
vk::DescriptorSetLayoutCreateInfo create_info = {};
create_info.bindingCount = static_cast<uint32_t>(bindings.size());
create_info.pBindings = bindings.data();
const auto& flags = binding_flags_by_set[dsl_create_infos.size()];
vk::DescriptorSetLayoutBindingFlagsCreateInfo binding_flags = {};
binding_flags.bindingCount = static_cast<uint32_t>(flags.size());
binding_flags.pBindingFlags = flags.data();
flag_infos.emplace_back(binding_flags);
create_info.pNext = &flag_infos[flag_infos.size() - 1];
dsl_create_infos.push_back(create_info);
});
std::vector<vk::DescriptorSetLayout> ds_layouts{&allocator};
ds_layouts.resize(dsl_create_infos.size());
auto vk_allocator = wrap_allocator(allocator);
for(size_t i = 0; i < dsl_create_infos.size(); i++) {
render_device.device.createDescriptorSetLayout(&dsl_create_infos[i], &vk_allocator, &ds_layouts[i]);
}
return ds_layouts;
}
bool operator&(const ShaderStage& lhs, const ShaderStage& rhs) { return static_cast<uint32_t>(lhs) & static_cast<uint32_t>(rhs); }
} // namespace nova::renderer::rhi
| 24,291
|
C++
|
.cpp
| 518
| 33.330116
| 138
| 0.590513
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| true
| true
| false
| false
| true
| false
| false
|
753,190
|
vulkan_swapchain.cpp
|
NovaMods_nova-renderer/src/rhi/vulkan/vulkan_swapchain.cpp
|
#include "vulkan_swapchain.hpp"
#include <rx/core/log.h>
#include "Tracy.hpp"
#include "vulkan_render_device.hpp"
#include "vulkan_utils.hpp"
#ifdef ERROR
#undef ERROR
#endif
namespace nova::renderer::rhi {
RX_LOG("VulkanSwapchain", logger);
VulkanSwapchain::VulkanSwapchain(const uint32_t num_swapchain_images,
VulkanRenderDevice* render_device,
const glm::uvec2 window_dimensions,
const std::vector<vk::PresentModeKHR>& present_modes)
: Swapchain(num_swapchain_images, window_dimensions), render_device(render_device), num_swapchain_images(num_swapchain_images) {
ZoneScoped;
create_swapchain(num_swapchain_images, present_modes, window_dimensions);
std::vector<vk::Image> vk_images = get_swapchain_images();
if(vk_images.is_empty()) {
logger->error("The swapchain returned zero images");
}
swapchain_image_layouts.resize(num_swapchain_images);
swapchain_image_layouts.each_fwd(
[&](vk::ImageLayout& swapchain_image_layout) { swapchain_image_layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; });
// Create a dummy renderpass that writes to a single color attachment - the swapchain
const vk::RenderPass renderpass = create_dummy_renderpass();
const glm::uvec2 swapchain_size = {swapchain_extent.width, swapchain_extent.height};
for(uint32_t i = 0; i < num_swapchain_images; i++) {
create_resources_for_frame(vk_images[i], renderpass, swapchain_size);
}
vkDestroyRenderPass(render_device->device, renderpass, nullptr);
// move the swapchain images into the correct layout cause I guess they aren't for some reason?
transition_swapchain_images_into_color_attachment_layout(vk_images);
}
uint8_t VulkanSwapchain::acquire_next_swapchain_image(rx::memory::allocator& allocator) {
ZoneScoped; auto* fence = render_device->create_fence(false, allocator);
auto* vk_fence = static_cast<VulkanFence*>(fence);
uint32_t acquired_image_idx;
const auto acquire_result = vkAcquireNextImageKHR(render_device->device,
swapchain,
std::numeric_limits<uint64_t>::max(),
VK_NULL_HANDLE,
vk_fence->fence,
&acquired_image_idx);
if(acquire_result == VK_ERROR_OUT_OF_DATE_KHR || acquire_result == VK_SUBOPTIMAL_KHR) {
// TODO: Recreate the swapchain and all screen-relative textures
logger->error("Swapchain out of date! One day you'll write the code to recreate it");
return 0;
}
if(acquire_result != VK_SUCCESS) {
logger->error("%s:%u=>%s", __FILE__, __LINE__, to_string(acquire_result));
}
// Block until we have the swapchain image in order to mimic D3D12. TODO: Reevaluate this decision
std::vector<RhiFence*> fences;
fences.push_back(vk_fence);
render_device->wait_for_fences(fences);
render_device->destroy_fences(fences, allocator);
return static_cast<uint8_t>(acquired_image_idx);
}
void VulkanSwapchain::present(const uint32_t image_idx) {
ZoneScoped; vk::Result swapchain_result = {};
vk::PresentInfoKHR present_info = {};
present_info.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
present_info.waitSemaphoreCount = 0;
present_info.pWaitSemaphores = nullptr;
present_info.swapchainCount = 1;
present_info.pSwapchains = &swapchain;
present_info.pImageIndices = &image_idx;
present_info.pResults = &swapchain_result;
const auto result = vkQueuePresentKHR(render_device->graphics_queue, &present_info);
if(result != VK_SUCCESS) {
logger->error("Could not present swapchain images: vkQueuePresentKHR failed: %s", to_string(result));
}
if(swapchain_result != VK_SUCCESS) {
logger->error("Could not present swapchain image %u: Presenting failed: %s", image_idx, to_string(result));
}
}
void VulkanSwapchain::transition_swapchain_images_into_color_attachment_layout(const std::vector<vk::Image>& images) const {
ZoneScoped; std::vector<vk::ImageMemoryBarrier> barriers;
barriers.reserve(images.size());
images.each_fwd([&](const vk::Image& image) {
vk::ImageMemoryBarrier barrier = {};
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.srcQueueFamilyIndex = render_device->graphics_family_index;
barrier.dstQueueFamilyIndex = render_device->graphics_family_index;
barrier.image = image;
barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
barrier.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT;
barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
barrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; // Each swapchain image **will** be rendered to before it is
// presented
barrier.subresourceRange.baseMipLevel = 0;
barrier.subresourceRange.levelCount = 1;
barrier.subresourceRange.baseArrayLayer = 0;
barrier.subresourceRange.layerCount = 1;
barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
barriers.push_back(barrier);
});
vk::CommandPool command_pool;
vk::CommandPoolCreateInfo command_pool_create_info = {};
command_pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
command_pool_create_info.queueFamilyIndex = render_device->graphics_family_index;
vkCreateCommandPool(render_device->device, &command_pool_create_info, nullptr, &command_pool);
vk::CommandBufferAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
alloc_info.commandPool = command_pool;
alloc_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
alloc_info.commandBufferCount = 1;
vk::CommandBuffer cmds;
vkAllocateCommandBuffers(render_device->device, &alloc_info, &cmds);
vk::CommandBufferBeginInfo begin_info = {};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
vkBeginCommandBuffer(cmds, &begin_info);
vkCmdPipelineBarrier(cmds,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
0,
0,
nullptr,
0,
nullptr,
static_cast<uint32_t>(barriers.size()),
barriers.data());
vkEndCommandBuffer(cmds);
vk::Fence transition_done_fence;
vk::FenceCreateInfo fence_create_info = {};
fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
vkCreateFence(render_device->device, &fence_create_info, nullptr, &transition_done_fence);
vk::SubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &cmds;
vkQueueSubmit(render_device->graphics_queue, 1, &submit_info, transition_done_fence);
vkWaitForFences(render_device->device, 1, &transition_done_fence, VK_TRUE, std::numeric_limits<uint64_t>::max());
vkFreeCommandBuffers(render_device->device, command_pool, 1, &cmds);
}
void VulkanSwapchain::deinit() {
swapchain_images.each_fwd([&](const RhiImage* i) {
const VulkanImage* vk_image = static_cast<const VulkanImage*>(i);
vkDestroyImage(render_device->device, vk_image->image, nullptr);
delete i;
});
swapchain_images.clear();
swapchain_image_views.each_fwd([&](const vk::ImageView& iv) { vkDestroyImageView(render_device->device, iv, nullptr); });
swapchain_image_views.clear();
fences.each_fwd([&](const RhiFence* f) {
const VulkanFence* vk_fence = static_cast<const VulkanFence*>(f);
vkDestroyFence(render_device->device, vk_fence->fence, nullptr);
delete f;
});
fences.clear();
}
uint32_t VulkanSwapchain::get_num_images() const { return num_swapchain_images; }
vk::ImageLayout VulkanSwapchain::get_layout(const uint32_t frame_idx) { return swapchain_image_layouts[frame_idx]; }
vk::Extent2D VulkanSwapchain::get_swapchain_extent() const { return swapchain_extent; }
vk::Format VulkanSwapchain::get_swapchain_format() const { return swapchain_format; }
vk::SurfaceFormatKHR VulkanSwapchain::choose_surface_format(const std::vector<vk::SurfaceFormatKHR>& formats) {
vk::SurfaceFormatKHR result;
if(formats.size() == 1 && formats[0].format == VK_FORMAT_UNDEFINED) {
result.format = VK_FORMAT_B8G8R8A8_UNORM;
result.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
return result;
}
// We want 32 bit rgba and srgb nonlinear... I think? Will have to read up on it more and figure out what's up
if(const auto idx = formats.find_if([&](vk::SurfaceFormatKHR& fmt) {
return fmt.format == VK_FORMAT_B8G8R8A8_UNORM && fmt.colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
}) != std::vector<vk::SurfaceFormatKHR>::k_npos) {
return formats[idx];
}
// We can't have what we want, so I guess we'll just use what we got
return formats[0];
}
vk::PresentModeKHR VulkanSwapchain::choose_present_mode(const std::vector<vk::PresentModeKHR>& modes) {
const vk::PresentModeKHR desired_mode = VK_PRESENT_MODE_MAILBOX_KHR;
// Mailbox mode is best mode (also not sure why)
if(modes.find(desired_mode) != std::vector<vk::PresentModeKHR>::k_npos) {
return desired_mode;
}
// FIFO, like FIFA, is forever
return VK_PRESENT_MODE_FIFO_KHR;
}
vk::Extent2D VulkanSwapchain::choose_surface_extent(const vk::SurfaceCapabilitiesKHR& caps, const glm::ivec2& window_dimensions) {
vk::Extent2D extent;
if(caps.currentExtent.width == 0xFFFFFFFF) {
extent.width = static_cast<uint32_t>(window_dimensions.x);
extent.height = static_cast<uint32_t>(window_dimensions.y);
} else {
extent = caps.currentExtent;
}
return extent;
}
void VulkanSwapchain::create_swapchain(const uint32_t requested_num_swapchain_images,
const std::vector<vk::PresentModeKHR>& present_modes,
const glm::uvec2& window_dimensions) {
ZoneScoped;
const auto surface_format = choose_surface_format(render_device->gpu.surface_formats);
const auto present_mode = choose_present_mode(present_modes);
const auto extent = choose_surface_extent(render_device->gpu.surface_capabilities, window_dimensions);
vk::SwapchainCreateInfoKHR info = {};
info.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
info.surface = render_device->surface;
info.minImageCount = requested_num_swapchain_images;
info.imageFormat = surface_format.format;
info.imageColorSpace = surface_format.colorSpace;
info.imageExtent = extent;
info.imageArrayLayers = 1;
info.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
info.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
info.queueFamilyIndexCount = 1;
info.pQueueFamilyIndices = &render_device->graphics_family_index;
info.preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
info.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
info.presentMode = present_mode;
info.clipped = VK_TRUE;
auto res = vkCreateSwapchainKHR(render_device->device, &info, nullptr, &swapchain);
logger->error("%u", res);
swapchain_format = surface_format.format;
this->present_mode = present_mode;
swapchain_extent = extent;
}
void VulkanSwapchain::create_resources_for_frame(const vk::Image image, const vk::RenderPass renderpass, const glm::uvec2& swapchain_size) {
ZoneScoped; auto* vk_image = new VulkanImage;
vk_image->type = ResourceType::Image;
vk_image->is_dynamic = true;
vk_image->image = image;
vk::ImageViewCreateInfo image_view_create_info = {};
image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
image_view_create_info.image = image;
image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D;
image_view_create_info.format = swapchain_format;
image_view_create_info.components.r = VK_COMPONENT_SWIZZLE_R;
image_view_create_info.components.g = VK_COMPONENT_SWIZZLE_G;
image_view_create_info.components.b = VK_COMPONENT_SWIZZLE_B;
image_view_create_info.components.a = VK_COMPONENT_SWIZZLE_A;
image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
image_view_create_info.subresourceRange.baseMipLevel = 0;
image_view_create_info.subresourceRange.levelCount = 1;
image_view_create_info.subresourceRange.baseArrayLayer = 0;
image_view_create_info.subresourceRange.layerCount = 1;
vkCreateImageView(render_device->device, &image_view_create_info, nullptr, &vk_image->image_view);
swapchain_image_views.push_back(vk_image->image_view);
swapchain_images.push_back(vk_image);
vk::FramebufferCreateInfo framebuffer_create_info = {};
framebuffer_create_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
framebuffer_create_info.attachmentCount = 1;
framebuffer_create_info.pAttachments = &vk_image->image_view;
framebuffer_create_info.renderPass = renderpass;
framebuffer_create_info.width = swapchain_extent.width;
framebuffer_create_info.height = swapchain_extent.height;
framebuffer_create_info.layers = 1;
auto* vk_framebuffer = new VulkanFramebuffer;
vk_framebuffer->size = swapchain_size;
vk_framebuffer->num_attachments = 1;
vkCreateFramebuffer(render_device->device, &framebuffer_create_info, nullptr, &vk_framebuffer->framebuffer);
framebuffers.push_back(vk_framebuffer);
vk::FenceCreateInfo fence_create_info = {};
fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fence_create_info.flags = VK_FENCE_CREATE_SIGNALED_BIT;
vk::Fence fence;
vkCreateFence(render_device->device, &fence_create_info, nullptr, &fence);
fences.push_back(new VulkanFence{{}, fence});
}
std::vector<vk::Image> VulkanSwapchain::get_swapchain_images() {
ZoneScoped; std::vector<vk::Image> vk_images;
vkGetSwapchainImagesKHR(render_device->device, swapchain, &num_swapchain_images, nullptr);
vk_images.resize(num_swapchain_images);
vkGetSwapchainImagesKHR(render_device->device, swapchain, &num_swapchain_images, vk_images.data());
if(render_device->settings.settings.debug.enabled) {
for(uint32_t i = 0; i < vk_images.size(); i++) {
const auto image_name = "Swapchain image " + std::to_string(i);
vk::DebugUtilsObjectNameInfoEXT object_name = {};
object_name.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT;
object_name.objectType = VK_OBJECT_TYPE_IMAGE;
object_name.objectHandle = reinterpret_cast<uint64_t>(vk_images[i]);
object_name.pObjectName = image_name.c_str();
NOVA_CHECK_RESULT(render_device->vkSetDebugUtilsObjectNameEXT(render_device->device, &object_name));
}
}
return vk_images;
}
vk::RenderPass VulkanSwapchain::create_dummy_renderpass() const {
vk::AttachmentDescription color_attachment = {};
color_attachment.format = swapchain_format;
color_attachment.samples = VK_SAMPLE_COUNT_1_BIT;
color_attachment.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
color_attachment.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
color_attachment.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
color_attachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
vk::AttachmentReference color_ref = {};
color_ref.attachment = 0;
color_ref.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
vk::SubpassDescription subpass = {};
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpass.colorAttachmentCount = static_cast<uint32_t>(1);
subpass.pColorAttachments = &color_ref;
vk::RenderPassCreateInfo render_pass_create_info = {};
render_pass_create_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
render_pass_create_info.attachmentCount = 1;
render_pass_create_info.pAttachments = &color_attachment;
render_pass_create_info.subpassCount = 1;
render_pass_create_info.pSubpasses = &subpass;
render_pass_create_info.dependencyCount = 0;
vk::RenderPass renderpass;
vkCreateRenderPass(render_device->device, &render_pass_create_info, nullptr, &renderpass);
return renderpass;
}
} // namespace nova::renderer::rhi
| 18,049
|
C++
|
.cpp
| 308
| 47.094156
| 144
| 0.648708
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,191
|
window.cpp
|
NovaMods_nova-renderer/src/windowing/window.cpp
|
#include "nova_renderer/window.hpp"
#include <GLFW/glfw3.h>
#if NOVA_WINDOWS
#define GLFW_EXPOSE_NATIVE_WIN32
#elif NOVA_LINUX
typedef int Bool; // Because X11 is stupid
#define GLFW_EXPOSE_NATIVE_X11
#endif
// We have to include this here so it exists before we #undef Bool, but ReSharper doesn't know the horrors of X11
// ReSharper disable once CppUnusedIncludeDirective
#include <GLFW/glfw3native.h>
#include "nova_renderer/nova_renderer.hpp"
#include "nova_renderer/util/platform.hpp"
RX_LOG("Window", logger);
void glfw_error_callback(const int error, const char* desc) { logger->error("GLFW error(%u)%s", error, desc); }
namespace nova::renderer {
void NovaWindow::glfw_key_callback(GLFWwindow* window, const int key, int /* scancode */, const int action, int /* mods */) {
void* user_data = glfwGetWindowUserPointer(window);
auto* my_window = static_cast<NovaWindow*>(user_data);
const bool is_control_down = glfwGetKey(window, GLFW_KEY_LEFT_CONTROL) == GLFW_PRESS ||
glfwGetKey(window, GLFW_KEY_RIGHT_CONTROL) == GLFW_PRESS;
const bool is_shift_down = glfwGetKey(window, GLFW_KEY_LEFT_SHIFT) == GLFW_PRESS ||
glfwGetKey(window, GLFW_KEY_RIGHT_SHIFT) == GLFW_PRESS;
my_window->broadcast_key_event(key, action == GLFW_PRESS, is_control_down, is_shift_down);
}
void NovaWindow::glfw_mouse_callback(GLFWwindow* window, const double x_position, const double y_position) {
void* user_data = glfwGetWindowUserPointer(window);
auto* my_window = static_cast<NovaWindow*>(user_data);
my_window->broadcast_mouse_position(x_position, y_position);
}
void NovaWindow::glfw_mouse_button_callback(GLFWwindow* window, const int button, const int action, int /* mods */) {
void* user_data = glfwGetWindowUserPointer(window);
auto* my_window = static_cast<NovaWindow*>(user_data);
my_window->broadcast_mouse_button(button, action == GLFW_PRESS);
}
NovaWindow::NovaWindow(const NovaSettings& options) {
if(!glfwInit()) {
logger->error("Failed to init GLFW");
return;
}
glfwSetErrorCallback(glfw_error_callback);
glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
window = glfwCreateWindow(static_cast<int>(options.window.width),
static_cast<int>(options.window.height),
options.window.title,
nullptr,
nullptr);
if(!window) {
logger->error("Failed to create window");
return;
}
glfwSetWindowUserPointer(window, this);
glfwSetKeyCallback(window, &NovaWindow::glfw_key_callback);
glfwSetMouseButtonCallback(window, &NovaWindow::glfw_mouse_button_callback);
glfwSetCursorPosCallback(window, &NovaWindow::glfw_mouse_callback);
}
NovaWindow::~NovaWindow() {
glfwDestroyWindow(window);
glfwTerminate();
}
void NovaWindow::register_key_callback(std::function<void(uint32_t, bool, bool, bool)> key_callback) {
key_callbacks.emplace_back(std::move(key_callback));
}
void NovaWindow::register_mouse_callback(std::function<void(double, double)> mouse_callback) {
mouse_callbacks.emplace_back(std::move(mouse_callback));
}
void NovaWindow::register_mouse_button_callback(std::function<void(uint32_t, bool)> mouse_callback) {
mouse_button_callbacks.emplace_back(std::move(mouse_callback));
}
void NovaWindow::broadcast_key_event(const int key, const bool is_press, const bool is_control_down, const bool is_shift_down) {
key_callbacks.each_fwd([&](const std::function<void(uint32_t, bool, bool, bool)>& callback) {
callback(key, is_press, is_control_down, is_shift_down);
});
}
void NovaWindow::broadcast_mouse_position(const double x_position, const double y_position) {
mouse_callbacks.each_fwd([&](const std::function<void(double, double)>& callback) { callback(x_position, y_position); });
}
void NovaWindow::broadcast_mouse_button(const int button, const bool is_pressed) {
mouse_button_callbacks.each_fwd([&](const std::function<void(uint32_t, bool)>& callback) { callback(button, is_pressed); });
}
// This _can_ be static, but I don't want it to be
// ReSharper disable once CppMemberFunctionMayBeStatic
void NovaWindow::poll_input() const { glfwPollEvents(); }
bool NovaWindow::should_close() const { return glfwWindowShouldClose(window); }
glm::uvec2 NovaWindow::get_framebuffer_size() const {
int width;
int height;
glfwGetFramebufferSize(window, &width, &height);
return {width, height};
}
glm::uvec2 NovaWindow::get_window_size() const {
int width;
int height;
glfwGetWindowSize(window, &width, &height);
return {width, height};
}
glm::vec2 NovaWindow::get_framebuffer_to_window_ratio() const {
const auto window_size = get_window_size();
const auto framebuffer_size = get_framebuffer_size();
return {static_cast<float>(framebuffer_size.x) / static_cast<float>(window_size.x),
static_cast<float>(framebuffer_size.y) / static_cast<float>(window_size.y)};
}
#if NOVA_WINDOWS
HWND NovaWindow::get_window_handle() const { return glfwGetWin32Window(window); }
#elif NOVA_LINUX
Window NovaWindow::get_window_handle() const { return glfwGetX11Window(window); }
Display* NovaWindow::get_display() const { return glfwGetX11Display(); }
#endif
} // namespace nova::renderer
| 5,742
|
C++
|
.cpp
| 109
| 44.385321
| 132
| 0.668989
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,192
|
NovaSettings.cpp
|
NovaMods_nova-renderer/src/settings/NovaSettings.cpp
|
/*!
* \author David
* \date 23-Jun-16.
*/
#include <nova_renderer/nova_settings.hpp>
#include <nova_renderer/util/utils.hpp>
namespace nova::renderer {
void nova_settings::register_change_listener(IconfigListener* new_listener) { config_change_listeners.push_back(new_listener); }
void nova_settings::update_config_changed() {
for(IconfigListener* l : config_change_listeners) {
l->on_config_change(*this);
}
}
void nova_settings::update_config_loaded() {
for(IconfigListener* l : config_change_listeners) {
l->on_config_loaded(*this);
}
}
} // namespace nova::renderer
| 653
|
C++
|
.cpp
| 19
| 29
| 132
| 0.666667
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,193
|
nova_settings.cpp
|
NovaMods_nova-renderer/src/settings/nova_settings.cpp
|
#include "nova_renderer/nova_settings.hpp"
namespace nova::renderer {
NovaSettingsAccessManager::NovaSettingsAccessManager(NovaSettings settings) : settings(std::move(settings)) {}
void NovaSettingsAccessManager::register_change_listener(ConfigListener* new_listener) {
config_change_listeners.push_back(new_listener);
}
void NovaSettingsAccessManager::update_config_changed() {
config_change_listeners.each_fwd([&](ConfigListener* l) { l->on_config_change(*this); });
}
void NovaSettingsAccessManager::update_config_loaded() {
config_change_listeners.each_fwd([&](ConfigListener* l) { l->on_config_loaded(*this); });
}
const NovaSettings* NovaSettingsAccessManager::operator->() const { return &settings; }
} // namespace nova::renderer
| 799
|
C++
|
.cpp
| 14
| 51.714286
| 114
| 0.734615
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| true
| true
| false
| false
| true
| false
| false
|
753,194
|
vulkan_render_backend.cpp
|
NovaMods_nova-renderer/src/render/backend/vulkan_render_backend.cpp
|
#include "vulkan_render_backend.hpp"
#include <stdexcept>
#include <Tracy.hpp>
#include <spdlog/sinks/stdout_color_sinks.h>
#include <spdlog/spdlog.h>
#include "VkBootstrap.h"
namespace nova::renderer {
static auto logger = spdlog::stdout_color_mt("VulkanBackend");
#pragma region Options
constexpr bool ENABLE_DEBUG_LAYER = true;
constexpr bool ENABLE_GPU_BASED_VALIDATION = false;
constexpr bool BREAK_ON_VALIDATION_ERRORS = true;
#pragma endregion
VulkanBackend::VulkanBackend(HWND window_handle) {
ZoneScoped;
auto builder = vkb::InstanceBuilder()
.set_app_name("Minecraft")
.set_engine_name("Nova")
.set_app_version(1, 16, 0)
.set_engine_version(0, 10, 0)
.require_api_version(1, 2, 0)
.enable_extension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
if(ENABLE_DEBUG_LAYER) {
builder.request_validation_layers()
.add_validation_feature_enable(VK_VALIDATION_FEATURE_ENABLE_BEST_PRACTICES_EXT)
.add_validation_feature_enable(VK_VALIDATION_FEATURE_ENABLE_SYNCHRONIZATION_VALIDATION_EXT)
.set_debug_callback(&VulkanBackend::debug_report_callback);
}
if(ENABLE_GPU_BASED_VALIDATION) {
builder.add_validation_feature_enable(VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT);
}
auto inst_ret = builder.build();
if(!inst_ret) {
const auto msg = fmt::format("Failed to create Vulkan instance. Error: {}", inst_ret.error().message());
throw std::runtime_error(msg);
}
auto vkb_inst = inst_ret.value();
instance = vkb_inst.instance;
create_surface(window_handle);
auto physical_features = vk::PhysicalDeviceFeatures{};
physical_features.fullDrawIndexUint32 = true;
physical_features.multiDrawIndirect = true;
#ifndef NDEBUG
physical_features.robustBufferAccess = true;
#endif
auto selector = vkb::PhysicalDeviceSelector{vkb_inst}
.set_surface(surface)
.set_minimum_version(1, 2)
.require_dedicated_transfer_queue();
auto phys_ret = selector.select();
if(!phys_ret) {
const auto msg = fmt::format("Failed to select Vulkan Physical Device. Error: {}", phys_ret.error().message());
throw std::runtime_error(msg);
}
vkb::DeviceBuilder device_builder{phys_ret.value()};
// automatically propagate needed data from instance & physical device
auto dev_ret = device_builder.build();
if(!dev_ret) {
const auto msg = fmt::format("Failed to create Vulkan device. Error: {}", dev_ret.error().message());
throw std::runtime_error(msg);
}
vkb::Device vkb_device = dev_ret.value();
// Get the VkDevice handle used in the rest of a vulkan application
device = vkb_device.device;
// Get the graphics queue with a helper function
auto graphics_queue_ret = vkb_device.get_queue(vkb::QueueType::graphics);
if(!graphics_queue_ret) {
const auto msg = fmt::format("Failed to get graphics queue. Error: {}", graphics_queue_ret.error().message());
throw std::runtime_error(msg);
}
VkQueue graphics_queue = graphics_queue_ret.value();
create_per_thread_command_pools();
create_standard_pipeline_layout();
logger->info("Initialized Vulkan backend");
}
VulkanBackend::~VulkanBackend() {}
void VulkanBackend::begin_frame() {
frame_idx++;
if(frame_idx == num_gpu_frames) {
frame_idx = 0;
}
}
vk::Instance VulkanBackend::get_instance() const { return instance; }
#pragma region Init
void VulkanBackend::create_surface(HWND window_handle) {
ZoneScoped;
VkWin32SurfaceCreateInfoKHR win32_surface_create = {};
win32_surface_create.sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR;
win32_surface_create.hwnd = window_handle;
vkCreateWin32SurfaceKHR(instance, &win32_surface_create, nullptr, &surface);
}
#pragma endregion
#pragma region Debug
VKAPI_ATTR VkBool32 VKAPI_CALL VulkanBackend::debug_report_callback(const VkDebugUtilsMessageSeverityFlagBitsEXT message_severity,
const VkDebugUtilsMessageTypeFlagsEXT message_types,
const VkDebugUtilsMessengerCallbackDataEXT* callback_data,
void* render_device) {
std::string type = "General";
if((message_types & VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT) != 0U) {
type = "Validation";
} else if((message_types & VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT) != 0U) {
type = "Performance";
}
std::string queue_list;
if(callback_data->queueLabelCount != 0) {
queue_list.append(" Queues: ");
for(uint32_t i = 0; i < callback_data->queueLabelCount; i++) {
queue_list.append(callback_data->pQueueLabels[i].pLabelName);
if(i != callback_data->queueLabelCount - 1) {
queue_list.append(", ");
}
}
}
std::string command_buffer_list;
if(callback_data->cmdBufLabelCount != 0) {
command_buffer_list.append("Command Buffers: ");
for(uint32_t i = 0; i < callback_data->cmdBufLabelCount; i++) {
command_buffer_list.append(callback_data->pCmdBufLabels[i].pLabelName);
if(i != callback_data->cmdBufLabelCount - 1) {
command_buffer_list.append(", ");
}
}
}
std::string object_list;
if(callback_data->objectCount != 0) {
object_list.append("Objects: ");
for(uint32_t i = 0; i < callback_data->objectCount; i++) {
object_list.append(vk::to_string(static_cast<vk::ObjectType>(callback_data->pObjects[i].objectType)));
if(callback_data->pObjects[i].pObjectName != nullptr) {
object_list.append(fmt::format(" \"{}\"", callback_data->pObjects[i].pObjectName));
}
object_list.append(fmt::format(" ({})", callback_data->pObjects[i].objectHandle));
if(i != callback_data->objectCount - 1) {
object_list.append(", ");
}
}
}
std::string vk_message;
if(callback_data->pMessage != nullptr) {
vk_message.append(callback_data->pMessage);
}
const auto msg = fmt::format("[{}] {} {} {} {}", type, queue_list, command_buffer_list, object_list, vk_message);
if((message_severity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) != 0) {
logger->error("{}", msg);
#ifdef NOVA_LINUX
nova_backtrace();
#endif
if(BREAK_ON_VALIDATION_ERRORS) {
DebugBreak();
}
} else if((message_severity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT) != 0) {
// Warnings may hint at unexpected / non-spec API usage
logger->warn("{}", msg);
} else if(((message_severity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT) != 0) &&
((message_types & VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT) == 0U)) { // No validation info!
// Informal messages that may become handy during debugging
logger->info("{}", msg);
} else if((message_severity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT) != 0) {
// Diagnostic info from the Vulkan loader and layers
// Usually not helpful in terms of API usage, but may help to debug layer and loader problems
logger->debug("{}", msg);
} else if((message_types & VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT) == 0U) { // No validation info!
// Catch-all to be super sure
logger->info("{}", msg);
}
return VK_FALSE;
}
void VulkanBackend::enable_debug_output() {
// Uses the Vulkan C bindings cause idk I don't wanna figure this bs out
ZoneScoped;
vk_create_debug_utils_messenger_ext = reinterpret_cast<PFN_vkCreateDebugUtilsMessengerEXT>(
vkGetInstanceProcAddr(instance, "vkCreateDebugUtilsMessengerEXT"));
vk_destroy_debug_report_callback_ext = reinterpret_cast<PFN_vkDestroyDebugReportCallbackEXT>(
vkGetInstanceProcAddr(instance, "vkDestroyDebugReportCallbackEXT"));
VkDebugUtilsMessengerCreateInfoEXT debug_create_info = {};
debug_create_info.pNext = nullptr;
debug_create_info.messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT |
VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT;
debug_create_info.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT |
VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT;
debug_create_info.pfnUserCallback = reinterpret_cast<PFN_vkDebugUtilsMessengerCallbackEXT>(&debug_report_callback);
debug_create_info.pUserData = this;
const auto result = vk_create_debug_utils_messenger_ext(instance, &debug_create_info, nullptr, &debug_callback);
if(result != VK_SUCCESS) {
logger->error("Could not register debug callback");
}
}
#pragma endregion
} // namespace nova::renderer
| 10,023
|
C++
|
.cpp
| 189
| 40.661376
| 140
| 0.60333
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,195
|
json_utils.cpp
|
NovaMods_nova-renderer/src/loading/json_utils.cpp
|
#include "json_utils.hpp"
namespace nova {
std::string to_string(const std::string &str) {
return str;
}
} // namespace nova
| 143
|
C++
|
.cpp
| 6
| 20
| 51
| 0.647059
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,196
|
renderpack_data_conversions.cpp
|
NovaMods_nova-renderer/src/loading/renderpack/renderpack_data_conversions.cpp
|
#include "nova_renderer/renderpack_data_conversions.hpp"
#include "nova_renderer/rendergraph.hpp"
#include "nova_renderer/renderpack_data.hpp"
#include "nova_renderer/rhi/rhi_enums.hpp"
#include "rx/core/log.h"
#include "spirv_glsl.hpp"
namespace nova::renderer::renderpack {
using namespace spirv_cross;
RX_LOG("RenderpackConvert", logger);
ShaderSource to_shader_source(const RenderpackShaderSource& rp_source) {
ShaderSource source{};
source.filename = rp_source.filename;
source.source = rp_source.source;
return source;
}
rhi::VertexFieldFormat to_rhi_vertex_format(const SPIRType& spirv_type) {
switch(spirv_type.basetype) {
case SPIRType::UInt:
return rhi::VertexFieldFormat::Uint;
case SPIRType::Float: {
switch(spirv_type.vecsize) {
case 2:
return rhi::VertexFieldFormat::Float2;
case 3:
return rhi::VertexFieldFormat::Float3;
case 4:
return rhi::VertexFieldFormat::Float4;
default:
logger->error("Nova does not support float fields with %u vector elements", spirv_type.vecsize);
return rhi::VertexFieldFormat::Invalid;
}
};
case SPIRType::Unknown:
[[fallthrough]];
case SPIRType::Void:
[[fallthrough]];
case SPIRType::Boolean:
[[fallthrough]];
case SPIRType::SByte:
[[fallthrough]];
case SPIRType::UByte:
[[fallthrough]];
case SPIRType::Short:
[[fallthrough]];
case SPIRType::UShort:
[[fallthrough]];
case SPIRType::Int:
[[fallthrough]];
case SPIRType::Int64:
[[fallthrough]];
case SPIRType::UInt64:
[[fallthrough]];
case SPIRType::AtomicCounter:
[[fallthrough]];
case SPIRType::Half:
[[fallthrough]];
case SPIRType::Double:
[[fallthrough]];
case SPIRType::Struct:
[[fallthrough]];
case SPIRType::Image:
[[fallthrough]];
case SPIRType::SampledImage:
[[fallthrough]];
case SPIRType::Sampler:
[[fallthrough]];
case SPIRType::AccelerationStructureNV:
[[fallthrough]];
case SPIRType::ControlPointArray:
[[fallthrough]];
case SPIRType::Char:
[[fallthrough]];
default:
logger->error("Nova does not support vertex fields of type %u", spirv_type.basetype);
}
return {};
}
std::vector<rhi::RhiVertexField> get_vertex_fields(const ShaderSource& vertex_shader) {
// TODO: Figure out if there's a better way to reflect on the shader
const CompilerGLSL shader_compiler{vertex_shader.source.data(), vertex_shader.source.size()};
const auto& shader_vertex_fields = shader_compiler.get_shader_resources().stage_inputs;
std::vector<rhi::RhiVertexField> vertex_fields;
vertex_fields.reserve(shader_vertex_fields.size());
for(const auto& spirv_field : shader_vertex_fields) {
const auto& spirv_type = shader_compiler.get_type(spirv_field.base_type_id);
const auto format = to_rhi_vertex_format(spirv_type);
vertex_fields.emplace_back(spirv_field.name.c_str(), format);
}
return vertex_fields;
}
PrimitiveTopology to_primitive_topology(const RPPrimitiveTopology primitive_mode) {
switch(primitive_mode) {
case RPPrimitiveTopology::Triangles:
return PrimitiveTopology::TriangleList;
case RPPrimitiveTopology::Lines:
return PrimitiveTopology::LineList;
default:
return PrimitiveTopology::TriangleList;
}
}
CompareOp to_compare_op(const RPCompareOp compare_op) {
switch(compare_op) {
case RPCompareOp::Never:
return CompareOp::Never;
case RPCompareOp::Less:
return CompareOp::Less;
case RPCompareOp::LessEqual:
return CompareOp::LessEqual;
case RPCompareOp::Greater:
return CompareOp::Greater;
case RPCompareOp::GreaterEqual:
return CompareOp::GreaterEqual;
case RPCompareOp::Equal:
return CompareOp::Equal;
case RPCompareOp::NotEqual:
return CompareOp::NotEqual;
case RPCompareOp::Always:
return CompareOp::Always;
default:
return CompareOp::Greater;
}
}
StencilOp to_stencil_op(const RPStencilOp stencil_op) {
switch(stencil_op) {
case RPStencilOp::Keep:
return StencilOp::Keep;
case RPStencilOp::Zero:
return StencilOp::Zero;
case RPStencilOp::Replace:
return StencilOp::Replace;
case RPStencilOp::Increment:
return StencilOp::Increment;
case RPStencilOp::IncrementAndWrap:
return StencilOp::IncrementAndWrap;
case RPStencilOp::Decrement:
return StencilOp::Decrement;
case RPStencilOp::DecrementAndWrap:
return StencilOp::DecrementAndWrap;
case RPStencilOp::Invert:
return StencilOp::Invert;
default:
return StencilOp::Keep;
}
}
BlendFactor to_blend_factor(const RPBlendFactor blend_factor) {
switch(blend_factor) {
case RPBlendFactor::One:
return BlendFactor::One;
case RPBlendFactor::Zero:
return BlendFactor::Zero;
case RPBlendFactor::SrcColor:
return BlendFactor::SrcColor;
case RPBlendFactor::DstColor:
return BlendFactor::DstColor;
case RPBlendFactor::OneMinusSrcColor:
return BlendFactor::OneMinusSrcColor;
case RPBlendFactor::OneMinusDstColor:
return BlendFactor::OneMinusDstColor;
case RPBlendFactor::SrcAlpha:
return BlendFactor::SrcAlpha;
case RPBlendFactor::DstAlpha:
return BlendFactor::DstAlpha;
case RPBlendFactor::OneMinusSrcAlpha:
return BlendFactor::OneMinusSrcAlpha;
case RPBlendFactor::OneMinusDstAlpha:
return BlendFactor::OneMinusDstAlpha;
default:
return BlendFactor::One;
}
}
std::optional<RhiGraphicsPipelineState> to_pipeline_state_create_info(const PipelineData& data, const Rendergraph& rendergraph) {
constexpr auto npos = std::vector<RasterizerState>::k_npos;
RhiGraphicsPipelineState info{};
info.name = data.name;
// Shaders
info.vertex_shader = to_shader_source(data.vertex_shader);
if(data.geometry_shader) {
info.geometry_shader = to_shader_source(*data.geometry_shader);
}
if(data.fragment_shader) {
info.pixel_shader = to_shader_source(*data.fragment_shader);
}
info.vertex_fields = get_vertex_fields(info.vertex_shader);
// Viewport and scissor test
const auto* pass = rendergraph.get_renderpass(data.pass);
if(pass == nullptr) {
logger->error("Could not find render pass %s, which pipeline %s needs", data.pass, data.name);
return rx::nullopt;
}
info.viewport_size = pass->framebuffer->size;
info.enable_scissor_test = data.scissor_mode == ScissorTestMode::DynamicScissorRect;
// Input assembly
info.topology = to_primitive_topology(data.primitive_mode);
// Rasterizer state
if(data.states.find(RasterizerState::InvertCulling) != npos) {
info.rasterizer_state.cull_mode = PrimitiveCullingMode::FrontFace;
} else if(data.states.find(RasterizerState::DisableCulling) != npos) {
info.rasterizer_state.cull_mode = PrimitiveCullingMode::None;
}
info.rasterizer_state.depth_bias = data.depth_bias;
info.rasterizer_state.slope_scaled_depth_bias = data.slope_scaled_depth_bias;
// Depth state
if(data.states.find(RasterizerState::DisableDepthTest) != npos) {
info.depth_state = rx::nullopt;
} else {
if(data.states.find(RasterizerState::DisableDepthWrite) != npos) {
info.depth_state->enable_depth_write = false;
info.depth_state->compare_op = to_compare_op(data.depth_func);
}
}
// Stencil state
if(data.states.find(RasterizerState::EnableStencilTest) != npos) {
info.stencil_state = StencilState{};
if(data.front_face) {
info.stencil_state->front_face_op.fail_op = to_stencil_op(data.front_face->fail_op);
info.stencil_state->front_face_op.pass_op = to_stencil_op(data.front_face->fail_op);
info.stencil_state->front_face_op.depth_fail_op = to_stencil_op(data.front_face->depth_fail_op);
info.stencil_state->front_face_op.compare_op = to_compare_op(data.front_face->compare_op);
info.stencil_state->front_face_op.compare_mask = data.front_face->compare_mask;
info.stencil_state->front_face_op.write_mask = data.front_face->write_mask;
info.stencil_state->front_face_op.reference_value = data.stencil_ref;
}
if(data.back_face) {
info.stencil_state->back_face_op.fail_op = to_stencil_op(data.back_face->fail_op);
info.stencil_state->back_face_op.pass_op = to_stencil_op(data.back_face->fail_op);
info.stencil_state->back_face_op.depth_fail_op = to_stencil_op(data.back_face->depth_fail_op);
info.stencil_state->back_face_op.compare_op = to_compare_op(data.back_face->compare_op);
info.stencil_state->back_face_op.compare_mask = data.back_face->compare_mask;
info.stencil_state->back_face_op.write_mask = data.back_face->write_mask;
info.stencil_state->back_face_op.reference_value = data.stencil_ref;
}
}
// Blend state
if(data.states.find(RasterizerState::Blending) != npos) {
info.blend_state = BlendState{};
info.blend_state->render_target_states.resize(pass->framebuffer->num_attachments);
info.blend_state->render_target_states.each_fwd([&](RenderTargetBlendState& target_blend) {
target_blend.enable = true;
target_blend.src_color_factor = to_blend_factor(data.source_color_blend_factor);
target_blend.dst_color_factor = to_blend_factor(data.destination_color_blend_factor);
target_blend.color_op = BlendOp::Add;
target_blend.src_alpha_factor = to_blend_factor(data.source_alpha_blend_factor);
target_blend.dst_alpha_factor = to_blend_factor(data.destination_alpha_blend_factor);
target_blend.alpha_op = BlendOp::Add;
});
}
if(data.states.find(RasterizerState::DisableColorWrite) != npos) {
info.enable_color_write = false;
}
if(data.states.find(RasterizerState::DisableAlphaWrite) != npos) {
info.enable_alpha_write = false;
}
const auto& pass_data = rendergraph.get_metadata_for_renderpass(data.pass);
if(!pass_data) {
logger->error("Could not retrieve metadata for renderpass %s. Why can we retrieve the renderpass but not its metadata?",
data.pass);
return rx::nullopt;
}
info.color_attachments = pass_data->data.texture_outputs;
info.depth_texture = pass_data->data.depth_texture;
return info;
}
}; // namespace nova::renderer::renderpack
| 12,416
|
C++
|
.cpp
| 263
| 34.250951
| 133
| 0.595546
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| true
| true
| false
| false
| true
| false
| false
|
753,197
|
render_graph_builder.cpp
|
NovaMods_nova-renderer/src/loading/renderpack/render_graph_builder.cpp
|
#include "render_graph_builder.hpp"
#include <Tracy.hpp>
#include <rx/core/algorithm/max.h>
#include <rx/core/algorithm/min.h>
#include <rx/core/log.h>
#include "nova_renderer/constants.hpp"
namespace nova::renderer::renderpack {
RX_LOG("RenderGraphBuilder", logger);
/*!
* \brief Adds all the passes that `pass_name` depends on to the list of ordered passes
*
* This method performs a depth-first traversal of the pass tree. It shouldn't matter whether we do depth or
* breadth first, but depth first feels cleaner
*
* \param pass_name The passes that were just added to the list of ordered passes
* \param passes A map from pass name to pass. Useful for the explicit dependencies of a pass
* \param ordered_passes The passes in submissions order... almost. When this function adds to ordered_passes the
* list has a lot of duplicates. They're removed in a later step
* \param resource_to_write_pass A map from resource name to list of passes that write to that resource. Useful for
* resolving the implicit dependencies of a pass
* \param depth The depth in the tree that we're at. If this number ever grows bigger than the total number of
* passes, there's a circular dependency somewhere in the render graph. This is Bad and we hate it
*/
void add_dependent_passes(const std::string& pass_name,
const std::unordered_map<std::string, RenderPassCreateInfo>& passes,
std::vector<std::string>& ordered_passes,
const std::unordered_map<std::string, std::vector<std::string>>& resource_to_write_pass,
uint32_t depth);
bool Range::has_writer() const { return first_write_pass <= last_write_pass; }
bool Range::has_reader() const { return first_read_pass <= last_read_pass; }
bool Range::is_used() const { return has_writer() || has_reader(); }
bool Range::can_alias() const {
// If we read before we have completely written to a resource we need to preserve it, so no alias is possible.
return !(has_reader() && has_writer() && first_read_pass <= first_write_pass);
}
unsigned Range::last_used_pass() const {
unsigned last_pass = 0;
if(has_writer()) {
last_pass = rx::algorithm::max(last_pass, last_write_pass);
}
if(has_reader()) {
last_pass = rx::algorithm::max(last_pass, last_read_pass);
}
return last_pass;
}
unsigned Range::first_used_pass() const {
unsigned first_pass = ~0U;
if(has_writer()) {
first_pass = rx::algorithm::min(first_pass, first_write_pass);
}
if(has_reader()) {
first_pass = rx::algorithm::min(first_pass, first_read_pass);
}
return first_pass;
}
bool Range::is_disjoint_with(const Range& other) const {
if(!is_used() || !other.is_used()) {
return false;
}
if(!can_alias() || !other.can_alias()) {
return false;
}
const bool left = last_used_pass() < other.first_used_pass();
const bool right = other.last_used_pass() < first_used_pass();
return left || right;
}
ntl::Result<std::vector<RenderPassCreateInfo>> order_passes(const std::vector<RenderPassCreateInfo>& passes) {
ZoneScoped;
logger->debug("Executing Pass Scheduler");
std::unordered_map<std::string, RenderPassCreateInfo> render_passes_to_order;
passes.each_fwd([&](const RenderPassCreateInfo& create_info) { render_passes_to_order.insert(create_info.name, create_info); });
std::vector<std::string> ordered_passes;
ordered_passes.reserve(passes.size());
/*
* Build some acceleration structures
*/
logger->debug("Collecting passes that write to each resource...");
// Maps from resource name to pass that writes to that resource, then from resource name to pass that reads from
// that resource
auto resource_to_write_pass = std::unordered_map<std::string, std::vector<std::string>>{};
passes.each_fwd([&](const RenderPassCreateInfo& pass) {
pass.texture_outputs.each_fwd([&](const TextureAttachmentInfo& output) {
auto* write_pass_list = resource_to_write_pass.find(output.name);
if(!write_pass_list) {
write_pass_list = resource_to_write_pass.insert(output.name, {});
}
write_pass_list->push_back(pass.name);
});
pass.output_buffers.each_fwd([&](const std::string& buffer_name) {
auto* write_pass_list = resource_to_write_pass.find(buffer_name);
if(!write_pass_list) {
write_pass_list = resource_to_write_pass.insert(buffer_name, {});
}
write_pass_list->push_back(pass.name);
});
});
/*
* Initial ordering of passes
*/
logger->debug("First pass at ordering passes...");
// The passes, in simple dependency order
if(resource_to_write_pass.find(BACKBUFFER_NAME) == nullptr) {
logger->error(
"This render graph does not write to the backbuffer. Unable to load this renderpack because it can't render anything");
return ntl::Result<std::vector<RenderPassCreateInfo>>(ntl::NovaError("Failed to order passes because no backbuffer was found"));
}
const auto& backbuffer_writes = *resource_to_write_pass.find(BACKBUFFER_NAME);
ordered_passes += backbuffer_writes;
backbuffer_writes.each_fwd([&](const std::string& pass_name) {
add_dependent_passes(pass_name, render_passes_to_order, ordered_passes, resource_to_write_pass, 1);
});
// We're going to loop through the original list of passes and remove them from the original list of passes
// We want to keep the original passes around
// This code taken from `RenderGraph::filter_passes` in the Granite engine
// It loops through the ordered passes. When it sees the name of a new pass, it writes the pass to
// ordered_passes and increments the write position. After all the passes are written, we remove all the
// passes after the last one we wrote to, shrinking the list of ordered passes to only include the exact passes we want
std::vector<std::string> unique_passes;
ordered_passes.each_rev([&](const std::string& pass_name) {
if(unique_passes.find(pass_name) == std::vector<std::string>::k_npos) {
unique_passes.push_back(pass_name);
}
});
ordered_passes = unique_passes;
// Granite does some reordering to try and find a submission order that has the fewest pipeline barriers. Not
// gonna worry about that now
std::vector<RenderPassCreateInfo> passes_in_submission_order;
passes_in_submission_order.reserve(ordered_passes.size());
ordered_passes.each_fwd(
[&](const std::string& pass_name) { passes_in_submission_order.push_back(*render_passes_to_order.find(pass_name)); });
return ntl::Result(passes_in_submission_order);
}
void add_dependent_passes(const std::string& pass_name,
const std::unordered_map<std::string, RenderPassCreateInfo>& passes,
std::vector<std::string>& ordered_passes,
const std::unordered_map<std::string, std::vector<std::string>>& resource_to_write_pass,
const uint32_t depth) {
if(depth > passes.size()) {
logger->error("Circular render graph detected! Please fix your render graph to not have circular dependencies");
}
const auto& pass = *passes.find(pass_name);
pass.texture_inputs.each_fwd([&](const std::string& texture_name) {
if(const auto write_passes = resource_to_write_pass.find(texture_name); write_passes == nullptr) {
// TODO: Ignore the implicitly defined resources
logger->error("Pass %s reads from resource %s, but nothing writes to it", pass_name, texture_name);
} else {
ordered_passes += *write_passes;
write_passes->each_fwd([&](const std::string& write_pass) {
add_dependent_passes(write_pass, passes, ordered_passes, resource_to_write_pass, depth + 1);
});
}
});
pass.input_buffers.each_fwd([&](const std::string& buffer_name) {
if(const auto& write_passes = resource_to_write_pass.find(buffer_name); write_passes == nullptr) {
logger->error("Pass %s reads from buffer %s, but no passes write to it", pass_name, buffer_name);
} else {
ordered_passes += *write_passes;
write_passes->each_fwd([&](const std::string& write_pass) {
add_dependent_passes(write_pass, passes, ordered_passes, resource_to_write_pass, depth + 1);
});
}
});
}
void determine_usage_order_of_textures(const std::vector<RenderPassCreateInfo>& passes,
std::unordered_map<std::string, Range>& resource_used_range,
std::vector<std::string>& resources_in_order) {
uint32_t pass_idx = 0;
passes.each_fwd([&](const RenderPassCreateInfo& pass) {
// color attachments
pass.texture_inputs.each_fwd([&](const std::string& input) {
const auto tex_range = resource_used_range.find(input);
if(pass_idx < tex_range->first_write_pass) {
tex_range->first_write_pass = pass_idx;
} else if(pass_idx > tex_range->last_write_pass) {
tex_range->last_write_pass = pass_idx;
}
if(resources_in_order.find(input) == std::vector<std::string>::k_npos) {
resources_in_order.push_back(input);
}
});
pass.texture_outputs.each_fwd([&](const TextureAttachmentInfo& output) {
const auto tex_range = resource_used_range.find(output.name);
if(pass_idx < tex_range->first_write_pass) {
tex_range->first_write_pass = pass_idx;
} else if(pass_idx > tex_range->last_write_pass) {
tex_range->last_write_pass = pass_idx;
}
if(resources_in_order.find(output.name) == std::vector<std::string>::k_npos) {
resources_in_order.push_back(output.name);
}
});
pass_idx++;
});
}
std::unordered_map<std::string, std::string> determine_aliasing_of_textures(const std::unordered_map<std::string, TextureCreateInfo>& textures,
const std::unordered_map<std::string, Range>& resource_used_range,
const std::vector<std::string>& resources_in_order) {
std::unordered_map<std::string, std::string> aliases;
for(size_t i = 0; i < resources_in_order.size(); i++) {
const auto& to_alias_name = resources_in_order[i];
logger->debug("Determining if we can alias `%s`. Does it exist? %d",
to_alias_name,
(textures.find(to_alias_name) != nullptr));
if(to_alias_name == BACKBUFFER_NAME || to_alias_name == SCENE_OUTPUT_RT_NAME) {
// Yay special cases!
continue;
}
const auto& to_alias_format = textures.find(to_alias_name)->format;
// Only try to alias with lower-indexed resources
for(size_t j = 0; j < i; j++) {
logger->debug("Trying to alias it with resource at index %zu out of %zu", j, resources_in_order.size());
const std::string& try_alias_name = resources_in_order[j];
if(resource_used_range.find(to_alias_name)->is_disjoint_with(*resource_used_range.find(try_alias_name))) {
// They can be aliased if they have the same format
const auto& try_alias_format = textures.find(try_alias_name)->format;
if(to_alias_format == try_alias_format) {
aliases.insert(to_alias_name, try_alias_name);
}
}
}
}
return aliases;
}
} // namespace nova::renderer::renderpack
| 12,838
|
C++
|
.cpp
| 224
| 44.321429
| 147
| 0.59274
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| true
| true
| true
| false
| false
| true
| false
| false
|
753,198
|
renderpack_loading.cpp
|
NovaMods_nova-renderer/src/loading/renderpack/renderpack_loading.cpp
|
#include "nova_renderer/loading/renderpack_loading.hpp"
#include <rx/core/json.h>
#include <rx/core/log.h>
#include "nova_renderer/util/platform.hpp"
#include "nova_renderer/constants.hpp"
#include "nova_renderer/filesystem/filesystem_helpers.hpp"
#include "nova_renderer/filesystem/folder_accessor.hpp"
#include "nova_renderer/filesystem/virtual_filesystem.hpp"
#include "nova_renderer/loading/shader_includer.hpp"
#include "../json_utils.hpp"
#include "Tracy.hpp"
#include "render_graph_builder.hpp"
#include "renderpack_validator.hpp"
namespace nova::renderer::renderpack {
RX_LOG("RenderpackLoading", logger);
using namespace filesystem;
std::optional<RenderpackResourcesData> load_dynamic_resources_file(FolderAccessorBase* folder_access);
ntl::Result<RendergraphData> load_rendergraph_file(FolderAccessorBase* folder_access);
std::vector<PipelineData> load_pipeline_files(FolderAccessorBase* folder_access);
std::optional<PipelineData> load_single_pipeline(FolderAccessorBase* folder_access, const std::string& pipeline_path);
std::vector<MaterialData> load_material_files(FolderAccessorBase* folder_access);
MaterialData load_single_material(FolderAccessorBase* folder_access, const std::string& material_path);
void fill_in_render_target_formats(RenderpackData& data) {
const auto& textures = data.resources.render_targets;
data.graph_data.passes.each_fwd([&](RenderPassCreateInfo& pass) {
pass.texture_outputs.each_fwd([&](TextureAttachmentInfo& output) {
if(output.name == BACKBUFFER_NAME) {
// Backbuffer is a special snowflake
return true;
} else if(output.name == SCENE_OUTPUT_RT_NAME) {
// Another special snowflake
return true;
// TODO: Figure out how to tell the loader about all the builtin resources
}
std::optional<rhi::PixelFormat> pixel_format;
textures.each_fwd([&](const TextureCreateInfo& texture_info) {
if(texture_info.name == output.name) {
pixel_format = texture_info.format.pixel_format;
return false;
}
return true;
});
if(pixel_format) {
output.pixel_format = *pixel_format;
} else {
logger->error("Render pass %s is trying to use texture %s, but it's not in the render graph's dynamic texture list",
pass.name,
output.name);
}
return true;
});
if(pass.depth_texture) {
std::optional<rhi::PixelFormat> pixel_format;
textures.each_fwd([&](const TextureCreateInfo& texture_info) {
if(texture_info.name == pass.depth_texture->name) {
pixel_format = texture_info.format.pixel_format;
return false;
}
return true;
});
if(pixel_format) {
pass.depth_texture->pixel_format = *pixel_format;
}
}
});
}
void cache_pipelines_by_renderpass(RenderpackData& data);
RenderpackData load_renderpack_data(const std::string& renderpack_name) {
ZoneScoped;
FolderAccessorBase* folder_access = VirtualFilesystem::get_instance()->get_folder_accessor(renderpack_name);
// The renderpack has a number of items: There's the shaders themselves, of course, but there's so, so much more
// What else is there?
// - resources.json, to describe the dynamic resources that a renderpack needs
// - passes.json, to describe the frame graph itself
// - All the pipeline descriptions
// - All the material descriptions
//
// All these things are loaded from the filesystem
RenderpackData data{};
data.resources = *load_dynamic_resources_file(folder_access);
const auto& graph_data = load_rendergraph_file(folder_access);
if(graph_data) {
data.graph_data = *graph_data;
} else {
logger->error("Could not load render graph file. Error: %s", graph_data.error.to_string());
}
data.pipelines = load_pipeline_files(folder_access);
data.materials = load_material_files(folder_access);
fill_in_render_target_formats(data);
cache_pipelines_by_renderpass(data);
return data;
}
std::optional<RenderpackResourcesData> load_dynamic_resources_file(FolderAccessorBase* folder_access) {
ZoneScoped;
const std::string resources_string = folder_access->read_text_file(RESOURCES_FILE);
auto json_resources = nlohmann::json(resources_string);
const ValidationReport report = validate_renderpack_resources_data(json_resources);
print(report);
if(!report.errors.is_empty()) {
return rx::nullopt;
}
return RenderpackResourcesData::from_json(json_resources);
}
ntl::Result<RendergraphData> load_rendergraph_file(FolderAccessorBase* folder_access) {
ZoneScoped;
const auto passes_bytes = folder_access->read_text_file("rendergraph.json");
const auto json_passes = nlohmann::json(passes_bytes);
auto rendergraph_file = json_passes.decode<RendergraphData>({});
bool writes_to_scene_output_rt = false;
rendergraph_file.passes.each_fwd([&](const RenderPassCreateInfo& pass) {
// Check if this pass writes to the scene output RT
pass.texture_outputs.each_fwd([&](const TextureAttachmentInfo& tex) {
if(tex.name == SCENE_OUTPUT_RT_NAME) {
writes_to_scene_output_rt = true;
return false;
}
return true;
});
if(writes_to_scene_output_rt) {
return false;
} else {
return true;
}
});
if(writes_to_scene_output_rt) {
return ntl::Result<RendergraphData>(rendergraph_file);
} else {
return ntl::Result<RendergraphData>(
MAKE_ERROR("At least one pass must write to the render target named %s", SCENE_OUTPUT_RT_NAME));
}
}
std::vector<PipelineData> load_pipeline_files(FolderAccessorBase* folder_access) {
ZoneScoped;
std::vector<std::string> potential_pipeline_files = folder_access->get_all_items_in_folder("materials");
std::vector<PipelineData> output;
// The resize will make this vector about twice as big as it should be, but there won't be any reallocating
// so I'm into it
output.reserve(potential_pipeline_files.size());
potential_pipeline_files.each_fwd([&](const std::string& potential_file) {
if(potential_file.ends_with(".pipeline")) {
// Pipeline file!
const auto pipeline_relative_path = std::string::format("%s/%s", "materials", potential_file);
const auto& pipeline = load_single_pipeline(folder_access, pipeline_relative_path);
if(pipeline) {
output.push_back(*pipeline);
}
}
});
return output;
}
std::optional<PipelineData> load_single_pipeline(FolderAccessorBase* folder_access, const std::string& pipeline_path) {
ZoneScoped;
const auto pipeline_bytes = folder_access->read_text_file(pipeline_path);
auto json_pipeline = nlohmann::json{pipeline_bytes};
const ValidationReport report = validate_graphics_pipeline(json_pipeline);
print(report);
if(!report.errors.is_empty()) {
logger->error("Loading pipeline file %s failed", pipeline_path);
return rx::nullopt;
}
auto new_pipeline = json_pipeline.decode<PipelineData>({});
new_pipeline.vertex_shader.source = load_shader_file(new_pipeline.vertex_shader.filename,
folder_access,
rhi::ShaderStage::Vertex,
new_pipeline.defines);
if(new_pipeline.geometry_shader) {
(*new_pipeline.geometry_shader).source = load_shader_file((*new_pipeline.geometry_shader).filename,
folder_access,
rhi::ShaderStage::Geometry,
new_pipeline.defines);
}
if(new_pipeline.tessellation_control_shader) {
(*new_pipeline.tessellation_control_shader).source = load_shader_file((*new_pipeline.tessellation_control_shader).filename,
folder_access,
rhi::ShaderStage::TessellationControl,
new_pipeline.defines);
}
if(new_pipeline.tessellation_evaluation_shader) {
(*new_pipeline.tessellation_evaluation_shader)
.source = load_shader_file((*new_pipeline.tessellation_evaluation_shader).filename,
folder_access,
rhi::ShaderStage::TessellationEvaluation,
new_pipeline.defines);
}
if(new_pipeline.fragment_shader) {
(*new_pipeline.fragment_shader).source = load_shader_file((*new_pipeline.fragment_shader).filename,
folder_access,
rhi::ShaderStage::Pixel,
new_pipeline.defines);
}
logger->debug("Load of pipeline %s succeeded", pipeline_path);
return new_pipeline;
}
std::vector<uint32_t> load_shader_file(const std::string& filename,
FolderAccessorBase* folder_access,
const rhi::ShaderStage stage,
const std::vector<std::string>& defines) {
ZoneScoped;
if(filename.ends_with(".spirv")) {
// SPIR-V file!
std::vector<uint8_t> bytes = folder_access->read_file(filename);
const auto view = bytes.disown();
return std::vector<uint32_t>{view};
}
std::string shader_source = folder_access->read_text_file(filename);
const auto& compiled_shader = [&] {
if(filename.ends_with(".hlsl")) {
return compile_shader(shader_source, stage, rhi::ShaderLanguage::Hlsl, folder_access);
} else {
return compile_shader(shader_source, stage, rhi::ShaderLanguage::Glsl, folder_access);
}
}();
if(compiled_shader.is_empty()) {
logger->error("Could not compile shader file %s", filename);
}
return compiled_shader;
}
std::vector<MaterialData> load_material_files(FolderAccessorBase* folder_access) {
ZoneScoped;
std::vector<std::string> potential_material_files = folder_access->get_all_items_in_folder("materials");
// The resize will make this vector about twice as big as it should be, but there won't be any reallocating
// so I'm into it
std::vector<MaterialData> output;
output.reserve(potential_material_files.size());
potential_material_files.each_fwd([&](const std::string& potential_file) {
if(potential_file.ends_with(".mat")) {
const auto material_filename = std::string::format("%s/%s", MATERIALS_DIRECTORY, potential_file);
const MaterialData& material = load_single_material(folder_access, material_filename);
output.push_back(material);
}
});
return output;
}
MaterialData load_single_material(FolderAccessorBase* folder_access, const std::string& material_path) {
ZoneScoped;
const std::string material_text = folder_access->read_text_file(material_path);
const auto json_material = nlohmann::json{material_text};
const auto report = validate_material(json_material);
print(report);
if(!report.errors.is_empty()) {
// There were errors, this material can't be loaded
logger->error("Load of material %s failed", material_path);
return {};
}
const auto material_file_name = get_file_name(material_path);
const auto material_extension_begin_idx = material_file_name.size() - 4; // ".mat"
auto material = json_material.decode<MaterialData>({});
material.name = material_file_name.substring(0, material_extension_begin_idx);
material.passes.each_fwd([&](MaterialPass& pass) { pass.material_name = material.name; });
logger->debug("Load of material &s succeeded - name %s", material_path, material.name);
return material;
}
void cache_pipelines_by_renderpass(RenderpackData& data) {
data.pipelines.each_fwd([&](const PipelineData& pipeline_info) {
data.graph_data.passes.each_fwd([&](RenderPassCreateInfo& renderpass_info) {
if(pipeline_info.pass == renderpass_info.name) {
renderpass_info.pipeline_names.emplace_back(pipeline_info.pass);
}
});
});
}
LPCWSTR to_hlsl_profile(const rhi::ShaderStage stage) {
switch(stage) {
case rhi::ShaderStage::Vertex:
return L"vs_6_4";
case rhi::ShaderStage::TessellationControl:
return L"hs_6_4";
case rhi::ShaderStage::TessellationEvaluation:
return L"ds_6_4";
case rhi::ShaderStage::Geometry:
return L"gs_6_4";
case rhi::ShaderStage::Pixel:
return L"ps_6_4";
case rhi::ShaderStage::Compute:
return L"cs_6_4";
case rhi::ShaderStage::Task:
return L"as_6_4";
case rhi::ShaderStage::Mesh:
return L"ms_6_4";
case rhi::ShaderStage::Raygen:
[[fallthrough]];
case rhi::ShaderStage::AnyHit:
[[fallthrough]];
case rhi::ShaderStage::ClosestHit:
[[fallthrough]];
case rhi::ShaderStage::Miss:
[[fallthrough]];
case rhi::ShaderStage::Intersection:
[[fallthrough]];
default:;
logger->error("Unsupported shader stage %u", stage);
return {};
}
}
std::vector<uint32_t> compile_shader(const std::string& source,
const rhi::ShaderStage stage,
const rhi::ShaderLanguage source_language,
FolderAccessorBase* folder_accessor) {
/*
* Compile HLSL -> SPIR-V, using delicious DXC
*
* We use the old interface IDxcCompiler instead of IDxcCompiler3 because IDxcCompiler3 does not work, at all. It tells me that it
* has a result, then it won't give me that result. I asked on the DirectX server and many other places, but apparently not even
* Microsoft knows how to the new API for their compiler. Thus, I'm using the old and deprecated API - because it actually works
*/
IDxcLibrary* lib;
auto hr = DxcCreateInstance(CLSID_DxcLibrary, IID_PPV_ARGS(&lib));
if(FAILED(hr)) {
logger->error("Could not create DXC Library instance");
return {};
}
IDxcCompiler* compiler;
hr = DxcCreateInstance(CLSID_DxcCompiler, IID_PPV_ARGS(&compiler));
if(FAILED(hr)) {
logger->error("Could not create DXC instance");
return {};
}
IDxcBlobEncoding* encoding;
hr = lib->CreateBlobWithEncodingFromPinned(source.data(), static_cast<UINT32>(source.size()), CP_UTF8, &encoding);
if(FAILED(hr)) {
logger->error("Could not create blob from shader");
return {};
}
const auto profile = to_hlsl_profile(stage);
std::vector<LPCWSTR> args = std::array{L"-spirv", L"-fspv-target-env=vulkan1.1", L"-fspv-reflect"};
auto* includer = new NovaDxcIncludeHandler{*(&rx::memory::g_system_allocator), *lib, folder_accessor};
IDxcOperationResult* compile_result;
hr = compiler->Compile(encoding,
L"unknown", // File name, for error messages
L"main", // Entry point
profile,
args.data(),
static_cast<UINT32>(args.size()),
nullptr,
0,
includer,
&compile_result);
if(FAILED(hr)) {
logger->error("Could not compile shader");
return {};
}
compile_result->GetStatus(&hr);
if(SUCCEEDED(hr)) {
IDxcBlob* result_blob;
hr = compile_result->GetResult(&result_blob);
std::vector<uint32_t> spirv{result_blob->GetBufferSize() / sizeof(uint32_t)};
memcpy(spirv.data(), result_blob->GetBufferPointer(), result_blob->GetBufferSize());
return spirv;
} else {
IDxcBlobEncoding* error_buffer;
compile_result->GetErrorBuffer(&error_buffer);
logger->error("Error compiling shader:\n%s\n", static_cast<char const*>(error_buffer->GetBufferPointer()));
error_buffer->Release();
return {};
}
}
} // namespace nova::renderer::renderpack
| 18,529
|
C++
|
.cpp
| 357
| 36.988796
| 138
| 0.568742
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| true
| true
| true
| false
| false
| true
| false
| false
|
753,199
|
renderpack_data.cpp
|
NovaMods_nova-renderer/src/loading/renderpack/renderpack_data.cpp
|
#include "nova_renderer/renderpack_data.hpp"
#include <rx/core/log.h>
#include "nova_renderer/rhi/rhi_enums.hpp"
#include "../json_utils.hpp"
#define FILL_REQUIRED_FIELD(field, expr) \
[&] { \
const auto val = expr; \
if(val) { \
(field) = *val; \
} \
}();
namespace nova::renderer::renderpack {
RX_LOG("RenderpackData", logger);
bool TextureFormat::operator==(const TextureFormat& other) const {
return pixel_format == other.pixel_format && dimension_type == other.dimension_type && width == other.width &&
height == other.height;
}
bool TextureFormat::operator!=(const TextureFormat& other) const { return !(*this == other); }
TextureFormat TextureFormat::from_json(const nlohmann::json& json) {
TextureFormat format = {};
format.pixel_format = get_json_value(json, "pixelFormat", rhi::PixelFormat::Rgba8, pixel_format_enum_from_json);
format.dimension_type = get_json_value<TextureDimensionType>(json,
"dimensionType",
TextureDimensionType::ScreenRelative,
texture_dimension_type_enum_from_json);
format.width = get_json_value<float>(json, "width", 0);
format.height = get_json_value<float>(json, "height", 0);
return format;
}
TextureCreateInfo TextureCreateInfo::from_json(const nlohmann::json& json) {
TextureCreateInfo info = {};
FILL_REQUIRED_FIELD(info.name, get_json_opt<std::string>(json, "name"));
FILL_REQUIRED_FIELD(info.format, get_json_opt<TextureFormat>(json, "format"));
return info;
}
RenderpackResourcesData RenderpackResourcesData::from_json(const nlohmann::json& json) {
RenderpackResourcesData data;
data.render_targets = get_json_array<TextureCreateInfo>(json, "textures");
data.samplers = get_json_array<SamplerCreateInfo>(json, "samplers");
// TODO: buffers
// TODO: arbitrary images
return data;
}
bool TextureAttachmentInfo::operator==(const TextureAttachmentInfo& other) const { return other.name == name; }
TextureAttachmentInfo TextureAttachmentInfo::from_json(const nlohmann::json& json) {
TextureAttachmentInfo info = {};
FILL_REQUIRED_FIELD(info.name, get_json_opt<std::string>(json, "name"));
info.clear = get_json_value(json, "clear", false);
return info;
}
RenderPassCreateInfo RenderPassCreateInfo::from_json(const nlohmann::json& json) {
RenderPassCreateInfo info = {};
info.texture_inputs = get_json_array<std::string>(json, "textureInputs");
info.texture_outputs = get_json_array<TextureAttachmentInfo>(json, "textureOutputs");
info.depth_texture = get_json_opt<TextureAttachmentInfo>(json, "depthTexture");
info.input_buffers = get_json_array<std::string>(json, "inputBuffers");
info.output_buffers = get_json_array<std::string>(json, "outputBuffers");
info.name = get_json_value<std::string>(json, "name", "<NAME_MISSING>");
return info;
}
RendergraphData RendergraphData::from_json(const nlohmann::json& json) {
RendergraphData data;
data.passes = get_json_array<RenderPassCreateInfo>(json, "passes");
data.builtin_passes = get_json_array<std::string>(json, "builtinPasses");
return data;
}
SamplerCreateInfo SamplerCreateInfo::from_json(const nlohmann::json& json) {
SamplerCreateInfo info = {};
info.filter = get_json_value(json, "filter", TextureFilter::Point, texture_filter_enum_from_json);
info.wrap_mode = get_json_value(json, "wrapMode", WrapMode::Clamp, wrap_mode_enum_from_json);
return info;
}
StencilOpState StencilOpState::from_json(const nlohmann::json& json) {
StencilOpState state = {};
FILL_REQUIRED_FIELD(state.fail_op, get_json_opt<RPStencilOp>(json, "failOp", stencil_op_enum_from_json));
FILL_REQUIRED_FIELD(state.pass_op, get_json_opt<RPStencilOp>(json, "passOp", stencil_op_enum_from_json));
FILL_REQUIRED_FIELD(state.depth_fail_op, get_json_opt<RPStencilOp>(json, "depthFailOp", stencil_op_enum_from_json));
FILL_REQUIRED_FIELD(state.compare_op, get_json_opt<RPCompareOp>(json, "compareOp", compare_op_enum_from_json));
FILL_REQUIRED_FIELD(state.compare_mask, get_json_opt<uint32_t>(json, "compareMask"));
FILL_REQUIRED_FIELD(state.write_mask, get_json_opt<uint32_t>(json, "writeMask"));
return state;
}
PipelineData PipelineData::from_json(const nlohmann::json& json) {
PipelineData pipeline = {};
FILL_REQUIRED_FIELD(pipeline.name, get_json_opt<std::string>(json, "name"));
FILL_REQUIRED_FIELD(pipeline.pass, get_json_opt<std::string>(json, "pass"));
pipeline.parent_name = get_json_value(json, "parent", "");
pipeline.defines = get_json_array<std::string>(json, "defined");
pipeline.states = get_json_array<RasterizerState>(json, "states", state_enum_from_json);
pipeline.front_face = get_json_opt<StencilOpState>(json, "frontFace");
pipeline.back_face = get_json_opt<StencilOpState>(json, "backFace");
pipeline.fallback = get_json_value<std::string>(json, "fallback", {});
pipeline.depth_bias = get_json_value<float>(json, "depthBias", 0);
pipeline.slope_scaled_depth_bias = get_json_value<float>(json, "slopeScaledDepthBias", 0);
pipeline.stencil_ref = get_json_value<uint32_t>(json, "stencilRef", 0);
pipeline.stencil_read_mask = get_json_value<uint32_t>(json, "stencilReadMask", 0);
pipeline.stencil_write_mask = get_json_value<uint32_t>(json, "stencilWriteMask", 0);
pipeline.msaa_support = get_json_value<MsaaSupport>(json, "msaaSupport", MsaaSupport::None, msaa_support_enum_from_json);
pipeline.primitive_mode = get_json_value<RPPrimitiveTopology>(json,
"primitiveMode",
RPPrimitiveTopology::Triangles,
primitive_topology_enum_from_json);
pipeline.source_color_blend_factor = get_json_value<RPBlendFactor>(json,
"sourceBlendFactor",
RPBlendFactor::One,
blend_factor_enum_from_json);
pipeline.destination_color_blend_factor = get_json_value<RPBlendFactor>(json,
"destBlendFactor",
RPBlendFactor::Zero,
blend_factor_enum_from_json);
pipeline.source_alpha_blend_factor = get_json_value<RPBlendFactor>(json,
"alphaSrc",
RPBlendFactor::One,
blend_factor_enum_from_json);
pipeline.destination_alpha_blend_factor = get_json_value<RPBlendFactor>(json,
"alphaDest",
RPBlendFactor::Zero,
blend_factor_enum_from_json);
pipeline.depth_func = get_json_value<RPCompareOp>(json, "depthFunc", RPCompareOp::Less, compare_op_enum_from_json);
pipeline.render_queue = get_json_value<RenderQueue>(json, "renderQueue", RenderQueue::Opaque, render_queue_enum_from_json);
pipeline.scissor_mode = get_json_value<ScissorTestMode>(json, "scissorMode", ScissorTestMode::Off, scissor_test_mode_from_json);
pipeline.vertex_shader.filename = get_json_value<std::string>(json, "vertexShader", "<NAME_MISSING>");
const auto geometry_shader_name = get_json_opt<std::string>(json, "geometryShader");
if(geometry_shader_name) {
pipeline.geometry_shader = RenderpackShaderSource{};
pipeline.geometry_shader->filename = *geometry_shader_name;
}
const auto tess_control_shader_name = get_json_opt<std::string>(json, "tessellationControlShader");
if(tess_control_shader_name) {
pipeline.tessellation_control_shader = RenderpackShaderSource{};
pipeline.tessellation_control_shader->filename = *tess_control_shader_name;
}
const auto tess_eval_shader_name = get_json_opt<std::string>(json, "tessellationEvalShader");
if(tess_eval_shader_name) {
pipeline.tessellation_evaluation_shader = RenderpackShaderSource{};
pipeline.tessellation_evaluation_shader->filename = *tess_eval_shader_name;
}
const auto fragment_shader_name = get_json_opt<std::string>(json, "fragmentShader");
if(fragment_shader_name) {
pipeline.fragment_shader = RenderpackShaderSource{};
pipeline.fragment_shader->filename = *fragment_shader_name;
}
return pipeline;
}
glm::uvec2 TextureFormat::get_size_in_pixels(const glm::uvec2& screen_size) const {
float pixel_width = width;
float pixel_height = height;
if(dimension_type == TextureDimensionType::ScreenRelative) {
pixel_width *= static_cast<float>(screen_size.x);
pixel_height *= static_cast<float>(screen_size.y);
}
return {std::round(pixel_width), std::round(pixel_height)};
}
std::optional<std::unordered_map<std::string, std::string>> map_from_json_object(const nlohmann::json& json) {
std::unordered_map<std::string, std::string> map;
json.each([&](const nlohmann::json& elem) {
std::string shader_variable;
FILL_REQUIRED_FIELD(shader_variable, get_json_opt<std::string>(elem, "variable"));
std::string resource_name;
FILL_REQUIRED_FIELD(resource_name, get_json_opt<std::string>(elem, "resource"));
map.insert(shader_variable, resource_name);
});
return map;
}
MaterialPass MaterialPass::from_json(const nlohmann::json& json) {
MaterialPass pass = {};
FILL_REQUIRED_FIELD(pass.name, get_json_opt<std::string>(json, "name"));
FILL_REQUIRED_FIELD(pass.pipeline, get_json_opt<std::string>(json, "pipeline"));
const auto val = get_json_opt<std::unordered_map<std::string, std::string>>(json, "bindings", map_from_json_object);
if(val) {
pass.bindings = *val;
}
// FILL_REQUIRED_FIELD(pass.bindings, get_json_opt<std::unordered_map<std::string, std::string>>(json, "bindings", map_from_json_object));
return pass;
}
MaterialData MaterialData::from_json(const nlohmann::json& json) {
MaterialData data = {};
FILL_REQUIRED_FIELD(data.name, get_json_opt<std::string>(json, "name"));
data.passes = get_json_array<MaterialPass>(json, "passes");
FILL_REQUIRED_FIELD(data.geometry_filter, get_json_opt<std::string>(json, "filter"));
return data;
}
rhi::PixelFormat pixel_format_enum_from_string(const std::string& str) {
if(str == "RGBA8") {
return rhi::PixelFormat::Rgba8;
}
if(str == "RGBA16F") {
return rhi::PixelFormat::Rgba16F;
}
if(str == "RGBA32F") {
return rhi::PixelFormat::Rgba32F;
}
if(str == "Depth") {
return rhi::PixelFormat::Depth32;
}
if(str == "DepthStencil") {
return rhi::PixelFormat::Depth24Stencil8;
}
logger->error("Unsupported pixel format %s", str);
return {};
}
TextureDimensionType texture_dimension_type_enum_from_string(const std::string& str) {
if(str == "ScreenRelative") {
return TextureDimensionType ::ScreenRelative;
}
if(str == "Absolute") {
return TextureDimensionType::Absolute;
}
logger->error("Unsupported texture dimension type %s", str);
return {};
}
TextureFilter texture_filter_enum_from_string(const std::string& str) {
if(str == "TexelAA") {
return TextureFilter::TexelAA;
}
if(str == "Bilinear") {
return TextureFilter::Bilinear;
}
if(str == "Point") {
return TextureFilter::Point;
}
logger->error("Unsupported texture filter %s", str);
return {};
}
WrapMode wrap_mode_enum_from_string(const std::string& str) {
if(str == "Repeat") {
return WrapMode::Repeat;
}
if(str == "Clamp") {
return WrapMode::Clamp;
}
logger->error("Unsupported wrap mode %s", str);
return {};
}
RPStencilOp stencil_op_enum_from_string(const std::string& str) {
if(str == "Keep") {
return RPStencilOp::Keep;
}
if(str == "Zero") {
return RPStencilOp::Zero;
}
if(str == "Replace") {
return RPStencilOp::Replace;
}
if(str == "Incr") {
return RPStencilOp::Increment;
}
if(str == "IncrWrap") {
return RPStencilOp::IncrementAndWrap;
}
if(str == "Decr") {
return RPStencilOp::Decrement;
}
if(str == "DecrWrap") {
return RPStencilOp::DecrementAndWrap;
}
if(str == "Invert") {
return RPStencilOp::Invert;
}
logger->error("Unsupported stencil op %s", str);
return {};
}
RPCompareOp compare_op_enum_from_string(const std::string& str) {
if(str == "Never") {
return RPCompareOp::Never;
}
if(str == "Less") {
return RPCompareOp::Less;
}
if(str == "LessEqual") {
return RPCompareOp::LessEqual;
}
if(str == "Greater") {
return RPCompareOp::Greater;
}
if(str == "GreaterEqual") {
return RPCompareOp::GreaterEqual;
}
if(str == "Equal") {
return RPCompareOp::Equal;
}
if(str == "NotEqual") {
return RPCompareOp::NotEqual;
}
if(str == "Always") {
return RPCompareOp::Always;
}
logger->error("Unsupported compare op ", str);
return {};
}
MsaaSupport msaa_support_enum_from_string(const std::string& str) {
if(str == "MSAA") {
return MsaaSupport::MSAA;
}
if(str == "Both") {
return MsaaSupport::Both;
}
if(str == "None") {
return MsaaSupport::None;
}
logger->error("Unsupported antialiasing mode %s", str);
return {};
}
RPPrimitiveTopology primitive_topology_enum_from_string(const std::string& str) {
if(str == "Triangles") {
return RPPrimitiveTopology::Triangles;
}
if(str == "Lines") {
return RPPrimitiveTopology::Lines;
}
logger->error("Unsupported primitive mode %s", str);
return {};
}
RPBlendFactor blend_factor_enum_from_string(const std::string& str) {
if(str == "One") {
return RPBlendFactor::One;
}
if(str == "Zero") {
return RPBlendFactor::Zero;
}
if(str == "SrcColor") {
return RPBlendFactor::SrcColor;
}
if(str == "DstColor") {
return RPBlendFactor::DstColor;
}
if(str == "OneMinusSrcColor") {
return RPBlendFactor::OneMinusSrcColor;
}
if(str == "OneMinusDstColor") {
return RPBlendFactor::OneMinusDstColor;
}
if(str == "SrcAlpha") {
return RPBlendFactor::SrcAlpha;
}
if(str == "DstAlpha") {
return RPBlendFactor::DstAlpha;
}
if(str == "OneMinusSrcAlpha") {
return RPBlendFactor::OneMinusSrcAlpha;
}
if(str == "OneMinusDstAlpha") {
return RPBlendFactor::OneMinusDstAlpha;
}
logger->error("Unsupported blend factor %s", str);
return {};
}
RenderQueue render_queue_enum_from_string(const std::string& str) {
if(str == "Transparent") {
return RenderQueue::Transparent;
}
if(str == "Opaque") {
return RenderQueue::Opaque;
}
if(str == "Cutout") {
return RenderQueue::Cutout;
}
logger->error("Unsupported render queue %s", str);
return {};
}
ScissorTestMode scissor_test_mode_from_string(const std::string& str) {
if(str == "Off") {
return ScissorTestMode::Off;
} else if(str == "StaticScissorRect") {
return ScissorTestMode::StaticScissorRect;
} else if(str == "DynamicScissorRect") {
return ScissorTestMode::DynamicScissorRect;
}
logger->error("Unsupported scissor mode %s", str);
return {};
}
RasterizerState state_enum_from_string(const std::string& str) {
if(str == "Blending") {
return RasterizerState::Blending;
}
if(str == "InvertCulling") {
return RasterizerState::InvertCulling;
}
if(str == "DisableCulling") {
return RasterizerState::DisableCulling;
}
if(str == "DisableDepthWrite") {
return RasterizerState::DisableDepthWrite;
}
if(str == "DisableDepthTest") {
return RasterizerState::DisableDepthTest;
}
if(str == "EnableStencilTest") {
return RasterizerState::EnableStencilTest;
}
if(str == "StencilWrite") {
return RasterizerState::StencilWrite;
}
if(str == "DisableColorWrite") {
return RasterizerState::DisableColorWrite;
}
if(str == "EnableAlphaToCoverage") {
return RasterizerState::EnableAlphaToCoverage;
}
if(str == "DisableAlphaWrite") {
return RasterizerState::DisableAlphaWrite;
}
logger->error("Unsupported state enum %s", str);
return {};
}
rhi::PixelFormat pixel_format_enum_from_json(const nlohmann::json& j) { return pixel_format_enum_from_string(j.as_string()); }
TextureDimensionType texture_dimension_type_enum_from_json(const nlohmann::json& j) {
return texture_dimension_type_enum_from_string(j.as_string());
}
TextureFilter texture_filter_enum_from_json(const nlohmann::json& j) { return texture_filter_enum_from_string(j.as_string()); }
WrapMode wrap_mode_enum_from_json(const nlohmann::json& j) { return wrap_mode_enum_from_string(j.as_string()); }
RPStencilOp stencil_op_enum_from_json(const nlohmann::json& j) { return stencil_op_enum_from_string(j.as_string()); }
RPCompareOp compare_op_enum_from_json(const nlohmann::json& j) { return compare_op_enum_from_string(j.as_string()); }
MsaaSupport msaa_support_enum_from_json(const nlohmann::json& j) { return msaa_support_enum_from_string(j.as_string()); }
RPPrimitiveTopology primitive_topology_enum_from_json(const nlohmann::json& j) { return primitive_topology_enum_from_string(j.as_string()); }
RPBlendFactor blend_factor_enum_from_json(const nlohmann::json& j) { return blend_factor_enum_from_string(j.as_string()); }
RenderQueue render_queue_enum_from_json(const nlohmann::json& j) { return render_queue_enum_from_string(j.as_string()); }
ScissorTestMode scissor_test_mode_from_json(const nlohmann::json& j) { return scissor_test_mode_from_string(j.as_string()); }
RasterizerState state_enum_from_json(const nlohmann::json& j) { return state_enum_from_string(j.as_string()); }
std::string to_string(const rhi::PixelFormat val) {
switch(val) {
case rhi::PixelFormat::Rgba8:
return "RGBA8";
case rhi::PixelFormat::Rgba16F:
return "RGBA16F";
case rhi::PixelFormat::Rgba32F:
return "RGBA32F";
case rhi::PixelFormat::Depth32:
return "Depth";
case rhi::PixelFormat::Depth24Stencil8:
return "DepthStencil";
}
return "Unknown value";
}
std::string to_string(const TextureDimensionType val) {
switch(val) {
case TextureDimensionType::ScreenRelative:
return "ScreenRelative";
case TextureDimensionType::Absolute:
return "Absolute";
}
return "Unknown value";
}
std::string to_string(const TextureFilter val) {
switch(val) {
case TextureFilter::TexelAA:
return "TexelAA";
case TextureFilter::Bilinear:
return "Bilinear";
case TextureFilter::Point:
return "Point";
}
return "Unknown value";
}
std::string to_string(const WrapMode val) {
switch(val) {
case WrapMode::Repeat:
return "Repeat";
case WrapMode::Clamp:
return "Clamp";
}
return "Unknown value";
}
std::string to_string(const RPStencilOp val) {
switch(val) {
case RPStencilOp::Keep:
return "Keep";
case RPStencilOp::Zero:
return "Zero";
case RPStencilOp::Replace:
return "Replace";
case RPStencilOp::Increment:
return "Incr";
case RPStencilOp::IncrementAndWrap:
return "IncrWrap";
case RPStencilOp::Decrement:
return "Decr";
case RPStencilOp::DecrementAndWrap:
return "DecrWrap";
case RPStencilOp::Invert:
return "Invert";
}
return "Unknown value";
}
std::string to_string(const RPCompareOp val) {
switch(val) {
case RPCompareOp::Never:
return "Never";
case RPCompareOp::Less:
return "Less";
case RPCompareOp::LessEqual:
return "LessEqual";
case RPCompareOp::Greater:
return "Greater";
case RPCompareOp::GreaterEqual:
return "GreaterEqual";
case RPCompareOp::Equal:
return "Equal";
case RPCompareOp::NotEqual:
return "NotEqual";
case RPCompareOp::Always:
return "Always";
}
return "Unknown value";
}
std::string to_string(const MsaaSupport val) {
switch(val) {
case MsaaSupport::MSAA:
return "MSAA";
case MsaaSupport::Both:
return "Both";
case MsaaSupport::None:
return "None";
}
return "Unknown value";
}
std::string to_string(const RPPrimitiveTopology val) {
switch(val) {
case RPPrimitiveTopology::Triangles:
return "Triangles";
case RPPrimitiveTopology::Lines:
return "Lines";
}
return "Unknown value";
}
std::string to_string(const RPBlendFactor val) {
switch(val) {
case RPBlendFactor::One:
return "One";
case RPBlendFactor::Zero:
return "Zero";
case RPBlendFactor::SrcColor:
return "SrcColor";
case RPBlendFactor::DstColor:
return "DstColor";
case RPBlendFactor::OneMinusSrcColor:
return "OneMinusSrcColor";
case RPBlendFactor::OneMinusDstColor:
return "OneMinusDstColor";
case RPBlendFactor::SrcAlpha:
return "SrcAlpha";
case RPBlendFactor::DstAlpha:
return "DstAlpha";
case RPBlendFactor::OneMinusSrcAlpha:
return "OneMinusSrcAlpha";
case RPBlendFactor::OneMinusDstAlpha:
return "OneMinusDstAlpha";
}
return "Unknown value";
}
std::string to_string(const RenderQueue val) {
switch(val) {
case RenderQueue::Transparent:
return "Transparent";
case RenderQueue::Opaque:
return "Opaque";
case RenderQueue::Cutout:
return "Cutout";
}
return "Unknown value";
}
std::string to_string(const RasterizerState val) {
switch(val) {
case RasterizerState::Blending:
return "Blending";
case RasterizerState::InvertCulling:
return "InvertCulling";
case RasterizerState::DisableCulling:
return "DisableCulling";
case RasterizerState::DisableDepthWrite:
return "DisableDepthWrite";
case RasterizerState::DisableDepthTest:
return "DisableDepthTest";
case RasterizerState::EnableStencilTest:
return "EnableStencilTest";
case RasterizerState::StencilWrite:
return "StencilWrite";
case RasterizerState::DisableColorWrite:
return "DisableColorWrite";
case RasterizerState::EnableAlphaToCoverage:
return "EnableAlphaToCoverage";
case RasterizerState::DisableAlphaWrite:
return "DisableAlphaWrite";
}
return "Unknown value";
}
uint32_t pixel_format_to_pixel_width(const rhi::PixelFormat format) {
switch(format) {
case rhi::PixelFormat::Rgba8:
return 4 * 8;
case rhi::PixelFormat::Rgba16F:
return 4 * 16;
case rhi::PixelFormat::Rgba32F:
return 4 * 32;
case rhi::PixelFormat::Depth32:
return 32;
case rhi::PixelFormat::Depth24Stencil8:
return 32;
default:
return 32;
}
}
} // namespace nova::renderer::renderpack
| 27,612
|
C++
|
.cpp
| 603
| 33.477612
| 146
| 0.560532
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| true
| true
| false
| false
| true
| false
| false
|
753,200
|
renderpack_validator.cpp
|
NovaMods_nova-renderer/src/loading/renderpack/renderpack_validator.cpp
|
#include "renderpack_validator.hpp"
#include <array>
#include <array>
#include <rx/core/log.h>
#include "nova_renderer/util/utils.hpp"
#include "../json_utils.hpp"
namespace nova::renderer::renderpack {
RX_LOG("RenderpackValidator", logger);
constexpr uint32_t NUM_REQUIRED_FIELDS = 23;
/*!
* \brief All the default values for a JSON pipeline
*
* If a field is in `pipeline_data` but not in this structure, it is a required field and cannot be given a
* default value. It will thus cause an exception
*/
const char* required_fields[NUM_REQUIRED_FIELDS] = {"parentName",
"defines",
"states",
"frontFace",
"backFace",
"fallback",
"depthBias",
"slopeScaledDepthBias",
"stencilRef",
"stencilReadMask",
"stencilWriteMask",
"msaaSupport",
"primitiveMode",
"sourceBlendFactor",
"destinationBlendFactor",
"alphaSrc",
"alphaDst",
"depthFunc",
"renderQueue",
"fragmentShader",
"tessellationControlShader",
"tessellationEvaluationShader",
"geometryShader"};
;
const std::array<std::string[3]> required_graphics_pipeline_fields = {"name", "pass", "vertexShader"};
const std::array<std::string[2]> required_texture_fields = {"pixelFormat", "dimensionType"};
void ensure_field_exists(
nlohmann::json& j, const std::string& field_name, const std::string& context, const nlohmann::json& default_value, ValidationReport& report);
static std::string pipeline_msg(const std::string& name, const std::string& field_name) {
return std::string::format("Pipeline %s: Missing field %s", name, field_name);
}
ValidationReport validate_graphics_pipeline(nlohmann::json& pipeline_json) {
ValidationReport report;
const auto name_json = pipeline_json["name"];
const auto name = name_json ? name_json.as_string() : "<NAME_MISSING>";
// Don't need to check for the name's existence here, it'll be checked with the rest of the required fields
const std::string pipeline_context = std::string::format("Pipeline %s", name);
// Check non-required fields first
for(uint32_t i = 0; i < NUM_REQUIRED_FIELDS; i++) {
if(!pipeline_json[required_fields[i]]) {
report.warnings.emplace_back(std::string::format("%s: Missing optional field %s", pipeline_context, required_fields[i]));
}
}
// Check required items
report.errors.reserve(required_graphics_pipeline_fields.size());
for(uint32_t i = 0; i < required_graphics_pipeline_fields.size(); i++) {
const auto& field_name = required_graphics_pipeline_fields[i];
if(!pipeline_json[field_name.data()]) {
report.errors.emplace_back(pipeline_msg(name, field_name));
}
}
return report;
}
static std::string resources_msg(const std::string& msg) { return std::string::format("Resources file: %s", msg); }
ValidationReport validate_renderpack_resources_data(nlohmann::json& resources_json) {
ValidationReport report;
bool missing_textures = false;
const auto textures_itr = resources_json["textures"];
if(!textures_itr) {
missing_textures = true;
} else {
if(!textures_itr.is_array() || textures_itr.is_empty()) {
missing_textures = true;
} else {
textures_itr.each([&](const nlohmann::json& tex) {
const ValidationReport texture_report = validate_texture_data(tex);
report.merge_in(texture_report);
});
}
}
if(missing_textures) {
report.warnings.emplace_back(
resources_msg("Missing dynamic resources. If you ONLY use the backbuffer in your renderpack, you can ignore this message"));
}
const nlohmann::json samplers_itr = resources_json["samplers"];
if(samplers_itr) {
if(!samplers_itr.is_array()) {
report.errors.emplace_back(resources_msg("Samplers array must be an array, but like it isn't"));
} else {
samplers_itr.each([&](const nlohmann::json& sampler) {
const ValidationReport sampler_report = validate_sampler_data(sampler);
report.merge_in(sampler_report);
});
}
}
return report;
}
static std::string texture_msg(const std::string& name, const std::string& msg) { return std::string::format("Texture %s: %s", name, msg); }
ValidationReport validate_texture_data(const nlohmann::json& texture_json) {
ValidationReport report;
const auto name_json = texture_json["name"];
std::string name;
if(name_json) {
name = name_json.as_string();
} else {
name = "<NAME_MISSING>";
texture_json["name"] = name.data();
report.errors.emplace_back(texture_msg(name, "Missing field name"));
}
auto format = texture_json["format"];
if(!format) {
report.errors.emplace_back(texture_msg(name, "Missing field format"));
} else {
const ValidationReport format_report = validate_texture_format(format, name);
report.merge_in(format_report);
}
return report;
}
static std::string format_msg(const std::string& tex_name, const std::string& msg) {
return std::string::format("Format of texture %s: %s", tex_name, msg);
}
ValidationReport validate_texture_format(const nlohmann::json& format_json, const std::string& texture_name) {
ValidationReport report;
const std::string context = std::string::format("Format of texture %s", texture_name);
for(uint32_t i = 0; i < required_texture_fields.size(); i++) {
if(!format_json[required_texture_fields[i].data()]) {
report.warnings.emplace_back(std::string::format("%s: Missing required field %s", context, required_texture_fields[i]));
}
}
const bool missing_width = !format_json["width"];
if(missing_width) {
report.errors.emplace_back(format_msg(texture_name, "Missing field width"));
}
const bool missing_height = !format_json["height"];
if(missing_height) {
report.errors.emplace_back(format_msg(texture_name, "Missing field height"));
}
return report;
}
static std::string sampler_msg(const std::string& name, const std::string& msg) { return std::string::format("Sampler %s: %s", name, msg); }
ValidationReport validate_sampler_data(const nlohmann::json& sampler_json) {
ValidationReport report;
const std::string name = get_json_value<std::string>(sampler_json, "name", "<NAME_MISSING>");
if(name == "<NAME_MISSING>") {
report.errors.emplace_back(sampler_msg(name, "Missing field name"));
}
const bool missing_filter = !sampler_json["filter"];
if(missing_filter) {
report.errors.emplace_back(sampler_msg(name, "Missing field filter"));
}
const bool missing_wrap_mode = !sampler_json["wrapMode"];
if(missing_wrap_mode) {
report.errors.emplace_back(sampler_msg(name, "Missing field wrapMode"));
}
return report;
}
static std::string material_msg(const std::string& name, const std::string& msg) {
return std::string::format("Material %s: %s", name, msg);
}
static std::string material_pass_msg(const std::string& mat_name, const std::string& pass_name, const std::string& error) {
return std::string::format("Material pass %s in material %s: %s", pass_name, mat_name, error);
}
ValidationReport validate_material(const nlohmann::json& material_json) {
ValidationReport report;
const auto name_maybe = material_json["name"];
std::string name = "<NAME_MISSING>";
if(!name_maybe) {
report.errors.emplace_back(material_msg("<NAME_MISSING>", "Missing material name"));
} else {
name = name_maybe.as_string();
}
const bool missing_geometry_filter = !material_json["filter"];
if(missing_geometry_filter) {
report.errors.emplace_back(material_msg(name, "Missing geometry filter"));
}
const bool missing_passes = !material_json["passes"];
if(missing_passes) {
report.errors.emplace_back(material_msg(name, "Missing material passes"));
} else {
const nlohmann::json& passes_json = material_json["passes"];
if(!passes_json.is_array()) {
report.errors.emplace_back(material_msg(name, "Passes field must be an array"));
return report;
}
if(passes_json.is_empty()) {
report.errors.emplace_back(material_msg(name, "Passes field must have at least one item"));
return report;
}
passes_json.each([&](const nlohmann::json& pass_json) {
const auto pass_name_maybe = pass_json["name"];
std::string pass_name = "<NAME_MISSING>";
if(!pass_name_maybe) {
report.errors.emplace_back(material_pass_msg(name, pass_name, "Missing field name"));
} else {
pass_name = pass_name_maybe.as_string();
}
if(!pass_json["pipeline"]) {
report.errors.emplace_back(material_pass_msg(name, pass_name, "Missing field pipeline"));
}
const auto bindings = pass_json["bindings"];
if(!bindings) {
report.warnings.emplace_back(material_pass_msg(name, pass_name, "Missing field bindings"));
} else {
if(bindings.is_empty()) {
report.warnings.emplace_back(material_pass_msg(name, pass_name, "Field bindings exists but it's empty"));
}
}
});
}
return report;
}
void ensure_field_exists(
nlohmann::json& j, const char* field_name, const std::string& context, const nlohmann::json& default_value, ValidationReport& report) {
if(!j[field_name]) {
j[field_name] = default_value[field_name];
size_t out_size;
// const char* json_string = reinterpret_cast<const char*>(json_write_minified(j[field_name].raw(), &out_size));
report.warnings.emplace_back(context + ": Missing field " + field_name + ". A default value will be used");
}
}
void print(const ValidationReport& report) {
report.errors.each_fwd([&](const std::string& error) { logger->error("%s", error); });
report.warnings.each_fwd([&](const std::string& warning) { logger->debug("%s", warning); });
}
void ValidationReport::merge_in(const ValidationReport& other) {
errors += other.errors;
warnings += other.warnings;
}
} // namespace nova::renderer::renderpack
| 12,357
|
C++
|
.cpp
| 234
| 37.901709
| 149
| 0.551184
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,201
|
shader_includer.cpp
|
NovaMods_nova-renderer/src/loading/renderpack/shader_includer.cpp
|
#include "nova_renderer/loading/shader_includer.hpp"
#include <rx/core/log.h>
#include "nova_renderer/filesystem/folder_accessor.hpp"
namespace nova::renderer {
RX_LOG("NovaDxcIncludeHandler", logger);
constexpr const char* STANDARD_PIPELINE_LAYOUT_FILE_NAME = "./nova/standard_pipeline_layout.hlsl";
NovaDxcIncludeHandler::NovaDxcIncludeHandler(rx::memory::allocator& allocator,
IDxcLibrary& library,
filesystem::FolderAccessorBase* folder_accessor)
: allocator{allocator}, library{library}, folder_accessor{folder_accessor}, builtin_files{&allocator} {
const auto standard_pipeline_layout_hlsl = R"(
struct Camera {
float4x4 view;
float4x4 projection;
float4x4 previous_view;
float4x4 previous_projection;
};
/*!
* \brief All the push constants that are available to a shader that uses the standard pipeline layout
*/
[[vk::push_constant]]
struct StandardPushConstants {
/*!
* \brief Index of the camera that will render this draw
*/
uint camera_index;
/*!
* \brief Index of the material data for the current draw
*/
uint material_index;
} constants;
/*!
* \brief Array of all the materials
*/
[[vk::binding(0, 0)]]
StructuredBuffer<Camera> cameras : register (t0);
/*!
* \brief Array of all the materials
*/
[[vk::binding(1, 0)]]
StructuredBuffer<MaterialData> material_buffer : register (t1);
/*!
* \brief Point sampler you can use to sample any texture
*/
[[vk::binding(2, 0)]]
SamplerState point_sampler : register(s0);
/*!
* \brief Bilinear sampler you can use to sample any texture
*/
[[vk::binding(3, 0)]]
SamplerState bilinear_filter : register(s1);
/*!
* \brief Trilinear sampler you can use to sample any texture
*/
[[vk::binding(4, 0)]]
SamplerState trilinear_filter : register(s3);
/*!
* \brief Array of all the textures that are available for a shader to sample from
*/
[[vk::binding(5, 0)]]
Texture2D textures[] : register(t3);
)";
builtin_files.insert(STANDARD_PIPELINE_LAYOUT_FILE_NAME, standard_pipeline_layout_hlsl);
}
HRESULT NovaDxcIncludeHandler::QueryInterface(const REFIID class_id, void** output_object) {
if(!output_object) {
return E_INVALIDARG;
}
*output_object = nullptr;
if(class_id == __uuidof(IDxcIncludeHandler)) {
*output_object = reinterpret_cast<LPVOID>(this);
AddRef();
return 0;
}
return E_NOINTERFACE;
}
#if NOVA_WINDOWS
ULONG NovaDxcIncludeHandler::AddRef() {
rx::concurrency::scope_lock l{mtx};
num_refs++;
return num_refs;
}
ULONG NovaDxcIncludeHandler::Release() {
rx::concurrency::scope_lock l{mtx};
const auto ref_count = --num_refs;
if(ref_count == 0) {
// TODO: Figure out how to use a Rex allocator instead of forcing things to be on the heap
delete this;
}
return ref_count;
}
#endif
HRESULT NovaDxcIncludeHandler::LoadSource(const LPCWSTR wide_filename, IDxcBlob** included_source) {
const rx::wide_string wide_filename_str{&allocator, reinterpret_cast<const rx_u16*>(wide_filename)};
const auto filename = wide_filename_str.to_utf8();
logger->debug("Trying to include file (%s)", filename);
if(const auto* file = builtin_files.find(filename)) {
IDxcBlobEncoding* encoding;
library.CreateBlobWithEncodingFromPinned(file->data(), static_cast<uint32_t>(file->size()), CP_UTF8, &encoding);
*included_source = encoding;
logger->debug("Included %s from builtin snippets", filename);
return 0;
} else if(folder_accessor != nullptr && folder_accessor->does_resource_exist(filename)) {
const auto included_shader = folder_accessor->read_text_file(filename);
IDxcBlobEncoding* encoding;
library.CreateBlobWithEncodingFromPinned(filename.data(), static_cast<uint32_t>(filename.size()), CP_UTF8, &encoding);
*included_source = encoding;
logger->debug("Included %s from renderpack", filename);
return 0;
}
return ERROR_FILE_NOT_FOUND;
}
} // namespace nova::renderer
| 4,358
|
C++
|
.cpp
| 113
| 31.79646
| 130
| 0.658513
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| true
| true
| true
| false
| false
| true
| false
| false
|
753,202
|
json_interop.cpp
|
NovaMods_nova-renderer/src/loading/renderpack/json_interop.cpp
|
#include "json_interop.hpp"
#include "../json_utils.hpp"
namespace nova::renderer::shaderpack {
void from_json(const nlohmann::json& j, TextureFormat& format) {
format.pixel_format = get_json_value<PixelFormatEnum>(j, "pixelFormat", PixelFormatEnum::RGBA8, pixel_format_enum_from_string);
format.dimension_type = get_json_value<TextureDimensionTypeEnum>(j,
"dimensionType",
TextureDimensionTypeEnum::ScreenRelative,
texture_dimension_type_enum_from_string);
format.width = get_json_value<float>(j, "width", 0);
format.height = get_json_value<float>(j, "height", 0);
}
void from_json(const nlohmann::json& j, TextureCreateInfo& tex) {
tex.name = *get_json_value<rx::string>(j, "name");
tex.format = *get_json_value<TextureFormat>(j, "format");
}
void from_json(const nlohmann::json& j, SamplerCreateInfo& sampler) {
sampler.filter = get_json_value<TextureFilterEnum>(j, "filter", TextureFilterEnum::Point, texture_filter_enum_from_string);
sampler.wrap_mode = get_json_value<WrapModeEnum>(j, "wrapMode", WrapModeEnum::Clamp, wrap_mode_enum_from_string);
}
void from_json(const nlohmann::json& j, ShaderpackResourcesData& res) {
res.render_targets = get_json_array<TextureCreateInfo>(j, "textures");
res.samplers = get_json_array<SamplerCreateInfo>(j, "samplers");
}
void from_json(const nlohmann::json& j, RenderPassCreateInfo& pass) {
// Something something string bad
const auto& std_vec = get_json_array<std::string>(j, "textureInputs").each_fwd([&](const std::string& str) {
pass.texture_inputs.emplace_back(str.c_str());
});
pass.texture_outputs = get_json_array<TextureAttachmentInfo>(j, "textureOutputs");
pass.depth_texture = get_json_value<TextureAttachmentInfo>(j, "depthTexture");
get_json_array<std::string>(j, "inputBuffers").each_fwd([&](const std::string& str) {
pass.input_buffers.emplace_back(str.c_str());
});
get_json_array<std::string>(j, "outputBuffers").each_fwd([&](const std::string& str) {
pass.output_buffers.emplace_back(str.c_str());
});
pass.name = get_json_value<std::string>(j, "name", std::string{"<NAME_MISSING>"}).c_str();
}
void from_json(const nlohmann::json& j, StencilOpState& stencil_op) {
stencil_op.fail_op = get_json_value<StencilOpEnum>(j, "failOp", StencilOpEnum::Keep, stencil_op_enum_from_string);
stencil_op.pass_op = get_json_value<StencilOpEnum>(j, "passOp", StencilOpEnum::Keep, stencil_op_enum_from_string);
stencil_op.depth_fail_op = get_json_value<StencilOpEnum>(j, "depthFailOp", StencilOpEnum::Keep, stencil_op_enum_from_string);
stencil_op.compare_op = get_json_value<CompareOpEnum>(j, "compareOp", CompareOpEnum::Equal, compare_op_enum_from_string);
stencil_op.compare_mask = get_json_value<uint32_t>(j, "compareMask", 0);
stencil_op.write_mask = get_json_value<uint32_t>(j, "writeMask", 0);
}
void from_json(const nlohmann::json& j, PipelineCreateInfo& pipeline) {
pipeline.name = get_json_value<std::string>(j, "name")->c_str();
pipeline.parent_name = get_json_value<std::string>(j, "parent", std::string{}).c_str();
pipeline.pass = get_json_value<std::string>(j, "pass")->c_str();
get_json_array<std::string>(j, "defines").each_fwd([&](const std::string& str) { pipeline.defines.emplace_back(str.c_str()); });
pipeline.states = get_json_array<StateEnum>(j, "states", state_enum_from_string);
pipeline.front_face = get_json_value<StencilOpState>(j, "frontFace");
pipeline.back_face = get_json_value<StencilOpState>(j, "backFace");
pipeline.fallback = get_json_value<std::string>(j, "fallback", std::string{}).c_str();
pipeline.depth_bias = get_json_value<float>(j, "depthBias", 0);
pipeline.slope_scaled_depth_bias = get_json_value<float>(j, "slopeScaledDepthBias", 0);
pipeline.stencil_ref = get_json_value<uint32_t>(j, "stencilRef", 0);
pipeline.stencil_read_mask = get_json_value<uint32_t>(j, "stencilReadMask", 0);
pipeline.stencil_write_mask = get_json_value<uint32_t>(j, "stencilWriteMask", 0);
pipeline.msaa_support = get_json_value<MsaaSupportEnum>(j, "msaaSupport", MsaaSupportEnum::None, msaa_support_enum_from_string);
pipeline.primitive_mode = get_json_value<PrimitiveTopologyEnum>(j,
"primitiveMode",
PrimitiveTopologyEnum::Triangles,
primitive_topology_enum_from_string);
pipeline.source_color_blend_factor = get_json_value<BlendFactorEnum>(j,
"sourceBlendFactor",
BlendFactorEnum::One,
blend_factor_enum_from_string);
pipeline.destination_color_blend_factor = get_json_value<BlendFactorEnum>(j,
"destBlendFactor",
BlendFactorEnum::Zero,
blend_factor_enum_from_string);
pipeline.source_alpha_blend_factor = get_json_value<BlendFactorEnum>(j,
"alphaSrc",
BlendFactorEnum::One,
blend_factor_enum_from_string);
pipeline.destination_alpha_blend_factor = get_json_value<BlendFactorEnum>(j,
"alphaDest",
BlendFactorEnum::Zero,
blend_factor_enum_from_string);
pipeline.depth_func = get_json_value<CompareOpEnum>(j, "depthFunc", CompareOpEnum::Less, compare_op_enum_from_string);
pipeline.render_queue = get_json_value<RenderQueueEnum>(j, "renderQueue", RenderQueueEnum::Opaque, render_queue_enum_from_string);
pipeline.vertex_shader.filename = get_json_value<std::string>(j, "vertexShader", std::string{"<NAME_MISSING>"}).c_str();
rx::optional<std::string> geometry_shader_name = get_json_value<std::string>(j, "geometryShader");
if(geometry_shader_name) {
pipeline.geometry_shader = rx::optional<ShaderSource>();
pipeline.geometry_shader->filename = geometry_shader_name->c_str();
}
rx::optional<std::string> tess_control_shader_name = get_json_value<std::string>(j, "tessellationControlShader");
if(tess_control_shader_name) {
pipeline.tessellation_control_shader = rx::optional<ShaderSource>();
pipeline.tessellation_control_shader->filename = tess_control_shader_name->c_str();
}
rx::optional<std::string> tess_eval_shader_name = get_json_value<std::string>(j, "tessellationEvalShader");
if(tess_eval_shader_name) {
pipeline.tessellation_evaluation_shader = rx::optional<ShaderSource>();
pipeline.tessellation_evaluation_shader->filename = tess_eval_shader_name->c_str();
}
rx::optional<std::string> fragment_shader_name = get_json_value<std::string>(j, "fragmentShader");
if(fragment_shader_name) {
pipeline.fragment_shader = rx::optional<ShaderSource>();
pipeline.fragment_shader->filename = fragment_shader_name->c_str();
}
pipeline.scissor_mode = get_json_value<ScissorTestMode>(j, "scissorMode", ScissorTestMode::Off, scissor_test_mode_from_string);
}
void from_json(const nlohmann::json& j, MaterialPass& pass) {
pass.name = *get_json_value<rx::string>(j, "name");
pass.pipeline = *get_json_value<rx::string>(j, "pipeline");
// std allowed for JSON interop
const auto& bindings_map = *get_json_value<std::unordered_map<std::string, std::string>>(j, "bindings");
for(const auto& [binding_name, bound_resource] : bindings_map) {
pass.bindings.insert(binding_name.c_str(), bound_resource.c_str());
}
}
void from_json(const nlohmann::json& j, MaterialData& mat) {
mat.name = *get_json_value<rx::string>(j, "name");
mat.passes = get_json_array<MaterialPass>(j, "passes");
mat.geometry_filter = *get_json_value<rx::string>(j, "filter");
mat.passes.each_fwd([&](MaterialPass& pass) { pass.material_name = mat.name; });
}
void from_json(const nlohmann::json& j, TextureAttachmentInfo& tex) {
tex.name = *get_json_value<rx::string>(j, "name");
tex.clear = get_json_value<bool>(j, "clear", false);
}
void from_json(const nlohmann::json& j, rx::vector<RenderPassCreateInfo>& passes) {
for(const auto& node : j) {
passes.push_back(node.get<RenderPassCreateInfo>());
}
}
void from_json(const nlohmann::json& j, RendergraphData& data) {
data.passes = get_json_array<RenderPassCreateInfo>(j, "passes");
get_json_array<std::string>(j, "builtin_passes").each_fwd([&](const std::string& str) {
data.builtin_passes.emplace_back(str.c_str());
});
}
} // namespace nova::renderer::shaderpack
| 10,050
|
C++
|
.cpp
| 138
| 55.130435
| 138
| 0.580172
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,203
|
console_log_stream.cpp
|
NovaMods_nova-renderer/src/logging/console_log_stream.cpp
|
#include "console_log_stream.hpp"
#include <stdio.h>
namespace nova {
StdoutStream::StdoutStream() : rx::stream(k_flush | k_write) {}
rx_u64 StdoutStream::on_write(const uint8_t* data, const rx_u64 size) {
fwrite(data, size, 1, stdout);
return size;
}
bool StdoutStream::on_flush() {
fflush(stdout);
return true;
}
const std::string& StdoutStream::name() const& { return my_name; }
} // namespace nova
| 463
|
C++
|
.cpp
| 14
| 27.714286
| 75
| 0.641892
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| true
| true
| false
| false
| true
| false
| false
|
753,204
|
resource_loader.cpp
|
NovaMods_nova-renderer/src/renderer/resource_loader.cpp
|
#include "nova_renderer/resource_loader.hpp"
#include "nova_renderer/nova_renderer.hpp"
using namespace nova::mem;
namespace nova::renderer {
RX_LOG("DeviceResources", logger);
using namespace rhi;
using namespace renderpack;
constexpr size_t STAGING_BUFFER_ALIGNMENT = 2048;
constexpr size_t STAGING_BUFFER_TOTAL_MEMORY_SIZE = 8388608;
constexpr size_t UNIFORM_BUFFER_ALIGNMENT = 64; // TODO: Get a real value
constexpr size_t UNIFORM_BUFFER_TOTAL_MEMORY_SIZE = 8096; // TODO: Get a real value
size_t size_in_bytes(PixelFormat pixel_format);
DeviceResources::DeviceResources(NovaRenderer& renderer)
: renderer{renderer},
device{renderer.get_device()},
internal_allocator{renderer.get_global_allocator()},
textures{&internal_allocator},
staging_buffers{&internal_allocator},
uniform_buffers{&internal_allocator} {
create_default_textures();
}
std::optional<BufferResourceAccessor> DeviceResources::create_uniform_buffer(const std::string& name, const Bytes size) {
const auto event_name = std::string::format("create_uniform_buffer(%s)", name);
ZoneScoped; BufferResource resource = {};
resource.name = name;
resource.size = size;
const RhiBufferCreateInfo create_info = {std::string::format("UniformBuffer%s", name), size.b_count(), BufferUsage::UniformBuffer};
resource.buffer = device.create_buffer(create_info, internal_allocator);
if(resource.buffer == nullptr) {
logger->error("Could not create uniform buffer %s", name);
return rx::nullopt;
}
uniform_buffers.insert(name, resource);
return BufferResourceAccessor{&uniform_buffers, name};
}
std::optional<BufferResourceAccessor> DeviceResources::get_uniform_buffer(const std::string& name) {
if(uniform_buffers.find(name) != nullptr) {
return BufferResourceAccessor{&uniform_buffers, name};
}
return rx::nullopt;
}
void DeviceResources::destroy_uniform_buffer(const std::string& name) {
if(const BufferResource* res = uniform_buffers.find(name)) {
// TODO device.destroy_buffer(res->buffer);
}
uniform_buffers.erase(name);
}
std::optional<TextureResourceAccessor> DeviceResources::create_texture(const std::string& name,
const std::size_t width,
const std::size_t height,
const PixelFormat pixel_format,
const void* data,
rx::memory::allocator& allocator) {
const auto event_name = std::string::format("create_texture(%s)", name);
ZoneScoped;
TextureResource resource = {};
resource.name = name;
resource.width = width;
resource.height = height;
resource.format = pixel_format;
const size_t pixel_size = size_in_bytes(pixel_format);
renderpack::TextureCreateInfo info = {};
info.name = name;
info.usage = ImageUsage::SampledImage;
info.format.pixel_format = pixel_format;
info.format.dimension_type = TextureDimensionType::Absolute;
info.format.width = static_cast<float>(width);
info.format.height = static_cast<float>(height);
resource.image = device.create_image(info, allocator);
resource.image->is_dynamic = false;
if(data != nullptr) {
ZoneScoped; RhiBuffer* staging_buffer = get_staging_buffer_with_size(width * height * pixel_size);
RhiRenderCommandList* cmds = device.create_command_list(0,
QueueType::Transfer,
RhiRenderCommandList::Level::Primary,
allocator);
cmds->set_debug_name(std::string::format("UploadTo%s", name));
RhiResourceBarrier initial_texture_barrier = {};
initial_texture_barrier.resource_to_barrier = resource.image;
initial_texture_barrier.access_before_barrier = ResourceAccess::CopyRead;
initial_texture_barrier.access_after_barrier = ResourceAccess::CopyWrite;
initial_texture_barrier.old_state = ResourceState::Undefined;
initial_texture_barrier.new_state = ResourceState::CopyDestination;
initial_texture_barrier.source_queue = QueueType::Transfer;
initial_texture_barrier.destination_queue = QueueType::Transfer;
initial_texture_barrier.image_memory_barrier.aspect = ImageAspect::Color;
std::vector<RhiResourceBarrier> initial_barriers{&allocator};
initial_barriers.push_back(initial_texture_barrier);
cmds->resource_barriers(PipelineStage::Transfer, PipelineStage::Transfer, initial_barriers);
cmds->upload_data_to_image(resource.image, width, height, pixel_size, staging_buffer, data);
RhiResourceBarrier final_texture_barrier = {};
final_texture_barrier.resource_to_barrier = resource.image;
final_texture_barrier.access_before_barrier = ResourceAccess::CopyWrite;
final_texture_barrier.access_after_barrier = ResourceAccess::ShaderRead;
final_texture_barrier.old_state = ResourceState::CopyDestination;
final_texture_barrier.new_state = ResourceState::ShaderRead;
final_texture_barrier.source_queue = QueueType::Transfer;
final_texture_barrier.destination_queue = QueueType::Graphics;
final_texture_barrier.image_memory_barrier.aspect = ImageAspect::Color;
std::vector<RhiResourceBarrier> final_barriers{&allocator};
final_barriers.push_back(final_texture_barrier);
cmds->resource_barriers(PipelineStage::Transfer, PipelineStage::VertexShader, final_barriers);
RhiFence* upload_done_fence = device.create_fence(false, allocator);
device.submit_command_list(cmds, QueueType::Transfer, upload_done_fence);
// Be sure that the data copy is complete, so that this method doesn't return before the GPU is done with the staging buffer
std::vector<RhiFence*> upload_done_fences{&allocator};
upload_done_fences.push_back(upload_done_fence);
device.wait_for_fences(upload_done_fences);
device.destroy_fences(upload_done_fences, allocator);
return_staging_buffer(staging_buffer);
logger->debug("Uploaded texture data to texture %s", name);
}
auto idx = textures.size();
textures.push_back(resource);
texture_name_to_idx.insert(name, static_cast<uint32_t>(idx));
logger->debug("Added texture %s to the textures array, there's now %u textures total", name, textures.size());
return TextureResourceAccessor{&textures, idx};
}
std::optional<uint32_t> DeviceResources::get_texture_idx_for_name(const std::string& name) const {
if(auto* idx = texture_name_to_idx.find(name); idx != nullptr) {
return *idx;
}
// Return a default value so the user sees something beautiful
return 0_u32;
}
std::optional<TextureResourceAccessor> DeviceResources::get_texture(const std::string& name) {
if(auto idx = get_texture_idx_for_name(name); idx) {
return TextureResourceAccessor{&textures, *idx};
}
#if NOVA_DEBUG
else {
logger->error("Could not find image \"%s\"", name);
}
#endif
return rx::nullopt;
}
std::optional<RenderTargetAccessor> DeviceResources::create_render_target(const std::string& name,
const size_t width,
const size_t height,
const PixelFormat pixel_format,
rx::memory::allocator& allocator,
const bool /* can_be_sampled // Not yet supported */) {
const auto event_name = std::string::format("create_render_target(%s)", name);
ZoneScoped;
renderpack::TextureCreateInfo create_info;
create_info.name = name;
create_info.usage = ImageUsage::RenderTarget;
create_info.format.pixel_format = pixel_format;
create_info.format.dimension_type = TextureDimensionType::Absolute;
create_info.format.width = static_cast<float>(width);
create_info.format.height = static_cast<float>(height);
auto* image = device.create_image(create_info, allocator);
if(image) {
// Barrier it into the correct format and return it
image->is_dynamic = true;
TextureResource resource = {};
resource.name = name;
resource.format = pixel_format;
resource.height = height;
resource.width = width;
resource.image = image;
{
ZoneScoped;
RhiRenderCommandList* cmds = device.create_command_list(0,
QueueType::Graphics,
RhiRenderCommandList::Level::Primary,
allocator);
cmds->set_debug_name(std::string::format("ChangeFormatOf%s", name));
RhiResourceBarrier initial_texture_barrier = {};
initial_texture_barrier.resource_to_barrier = resource.image;
initial_texture_barrier.old_state = ResourceState::Undefined;
initial_texture_barrier.source_queue = QueueType::Graphics;
initial_texture_barrier.destination_queue = QueueType::Graphics;
PipelineStage stage_after_barrier;
if(is_depth_format(pixel_format)) {
initial_texture_barrier.image_memory_barrier.aspect = ImageAspect::Depth;
initial_texture_barrier.new_state = ResourceState::DepthWrite;
initial_texture_barrier.access_before_barrier = ResourceAccess::MemoryWrite;
initial_texture_barrier.access_after_barrier = ResourceAccess::DepthStencilAttachmentRead;
stage_after_barrier = PipelineStage::EarlyFragmentTests;
} else {
initial_texture_barrier.image_memory_barrier.aspect = ImageAspect::Color;
initial_texture_barrier.new_state = ResourceState::RenderTarget;
initial_texture_barrier.access_before_barrier = ResourceAccess::MemoryWrite;
initial_texture_barrier.access_after_barrier = ResourceAccess::ColorAttachmentRead;
stage_after_barrier = PipelineStage::ColorAttachmentOutput;
}
std::vector<RhiResourceBarrier> initial_barriers{&allocator};
initial_barriers.push_back(initial_texture_barrier);
cmds->resource_barriers(PipelineStage::TopOfPipe, stage_after_barrier, initial_barriers);
RhiFence* upload_done_fence = device.create_fence(false, allocator);
device.submit_command_list(cmds, QueueType::Graphics, upload_done_fence);
// Be sure that the data copy is complete, so that this method doesn't return before the GPU is done with the staging buffer
std::vector<RhiFence*> upload_done_fences{&allocator};
upload_done_fences.push_back(upload_done_fence);
device.wait_for_fences(upload_done_fences);
device.destroy_fences(upload_done_fences, allocator);
}
render_targets.insert(name, resource);
return RenderTargetAccessor{&render_targets, name};
} else {
logger->error("Could not create render target %s", name);
return rx::nullopt;
}
}
std::optional<RenderTargetAccessor> DeviceResources::get_render_target(const std::string& name) {
if(render_targets.find(name) != nullptr) {
return RenderTargetAccessor{&render_targets, name};
} else {
return rx::nullopt;
}
}
void DeviceResources::destroy_render_target(const std::string& texture_name, rx::memory::allocator& allocator) {
if(const auto idx = get_texture_idx_for_name(texture_name); idx) {
const auto texture = textures[*idx];
device.destroy_texture(texture.image, allocator);
textures.erase(*idx, *idx);
}
#if NOVA_DEBUG
else {
logger->error("Could not delete texture %s, are you sure you spelled it correctly?", texture_name);
}
#endif
}
RhiBuffer* DeviceResources::get_staging_buffer_with_size(const Bytes size) {
// Align the size so we can bin the staging buffers
// TODO: Experiment and find a good alignment
const auto a = size.b_count() % STAGING_BUFFER_ALIGNMENT;
const auto needed_size = STAGING_BUFFER_ALIGNMENT + a;
const auto actual_size = size.b_count() + needed_size;
if(auto* staging_buffer = staging_buffers.find(actual_size); staging_buffer != nullptr) {
auto& buffer_list = *staging_buffer;
if(buffer_list.size() > 0) {
auto* buffer = buffer_list.last();
buffer_list.erase(buffer_list.size() - 1, buffer_list.size() - 1);
return buffer;
}
}
const RhiBufferCreateInfo info = {"GenericStagingBuffer", actual_size, BufferUsage::StagingBuffer};
RhiBuffer* buffer = device.create_buffer(info, internal_allocator);
return buffer;
}
void DeviceResources::return_staging_buffer(RhiBuffer* buffer) {
const auto size = buffer->size.b_count();
auto* buffers = staging_buffers.find(size);
if(!buffers) {
buffers = staging_buffers.insert(size, {});
}
buffers->push_back(buffer);
}
const std::vector<TextureResource>& DeviceResources::get_all_textures() const { return textures; }
void DeviceResources::create_default_textures() {
ZoneScoped;
const auto make_color_tex = [&](const std::string& name, const uint32_t color) {
std::array<uint8_t[64 * 4]> tex_data;
for(uint32_t i = 0; i < tex_data.size(); i++) {
tex_data[i] = color;
}
if(!create_texture(name, 8, 8, PixelFormat::Rgba8, tex_data.data(), internal_allocator)) {
logger->error("Could not create texture %s", name);
}
};
make_color_tex(WHITE_TEXTURE_NAME, 0xFFFFFFFF);
make_color_tex(BLACK_TEXTURE_NAME, 0x00000000);
make_color_tex(GRAY_TEXTURE_NAME, 0x80808080);
}
size_t size_in_bytes(const PixelFormat pixel_format) {
switch(pixel_format) {
case PixelFormat::Rgba8:
return 4;
case PixelFormat::Rgba16F:
return 8;
case PixelFormat::Rgba32F:
return 16;
case PixelFormat::Depth32:
return 4;
case PixelFormat::Depth24Stencil8:
return 4;
default:
return 4;
}
}
} // namespace nova::renderer
| 16,020
|
C++
|
.cpp
| 283
| 41.908127
| 140
| 0.599323
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| true
| true
| true
| false
| false
| true
| false
| false
|
753,205
|
camera.cpp
|
NovaMods_nova-renderer/src/renderer/camera.cpp
|
#include "nova_renderer/camera.hpp"
namespace nova::renderer {
const std::string& Camera::get_name() const { return name; }
Camera::Camera(const CameraCreateInfo& create_info)
: is_active(true),
aspect_ratio{create_info.aspect_ratio},
field_of_view{create_info.field_of_view},
near_plane{create_info.near_plane},
far_plane{create_info.far_plane},
name{create_info.name} {}
} // namespace nova::renderer
| 471
|
C++
|
.cpp
| 11
| 35.636364
| 64
| 0.661572
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,206
|
material_data_buffer.cpp
|
NovaMods_nova-renderer/src/renderer/material_data_buffer.cpp
|
#include "material_data_buffer.hpp"
#include <rx/core/utility/move.h>
namespace nova::renderer {
MaterialDataBuffer::MaterialDataBuffer(size_t num_bytes) : buffer(std::move(buffer)) {}
uint8_t* MaterialDataBuffer::data() const { return buffer.data; }
} // namespace nova::renderer
| 292
|
C++
|
.cpp
| 6
| 45.833333
| 91
| 0.749117
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,207
|
pipeline_reflection.cpp
|
NovaMods_nova-renderer/src/renderer/pipeline_reflection.cpp
|
#include "pipeline_reflection.hpp"
#include <rx/core/log.h>
#include <spirv_cross.hpp>
namespace nova::renderer {
using namespace rhi;
RX_LOG("PipelineReflection", logger);
std::unordered_map<std::string, RhiResourceBindingDescription> get_all_descriptors(const RhiGraphicsPipelineState& pipeline_state) {
std::unordered_map<std::string, RhiResourceBindingDescription> bindings;
get_shader_module_descriptors(pipeline_state.vertex_shader.source, ShaderStage::Vertex, bindings);
if(pipeline_state.geometry_shader) {
get_shader_module_descriptors(pipeline_state.geometry_shader->source, ShaderStage::Geometry, bindings);
}
if(pipeline_state.pixel_shader) {
get_shader_module_descriptors(pipeline_state.pixel_shader->source, ShaderStage::Pixel, bindings);
}
return bindings;
}
void get_shader_module_descriptors(const std::vector<uint32_t>& spirv,
const ShaderStage shader_stage,
std::unordered_map<std::string, RhiResourceBindingDescription>& bindings) {
const spirv_cross::Compiler shader_compiler{spirv.data(), spirv.size()};
const spirv_cross::ShaderResources& resources = shader_compiler.get_shader_resources();
for(const auto& resource : resources.separate_images) {
add_resource_to_bindings(bindings, shader_stage, shader_compiler, resource, DescriptorType::Texture);
}
for(const auto& resource : resources.separate_samplers) {
add_resource_to_bindings(bindings, shader_stage, shader_compiler, resource, DescriptorType::Sampler);
}
for(const auto& resource : resources.uniform_buffers) {
add_resource_to_bindings(bindings, shader_stage, shader_compiler, resource, DescriptorType::UniformBuffer);
}
for(const auto& resource : resources.storage_buffers) {
add_resource_to_bindings(bindings, shader_stage, shader_compiler, resource, DescriptorType::StorageBuffer);
}
}
void add_resource_to_bindings(std::unordered_map<std::string, RhiResourceBindingDescription>& bindings,
const ShaderStage shader_stage,
const spirv_cross::Compiler& shader_compiler,
const spirv_cross::Resource& resource,
const DescriptorType type) {
const uint32_t set_idx = shader_compiler.get_decoration(resource.id, spv::DecorationDescriptorSet);
const uint32_t binding_idx = shader_compiler.get_decoration(resource.id, spv::DecorationBinding);
RhiResourceBindingDescription new_binding = {};
new_binding.set = set_idx;
new_binding.binding = binding_idx;
new_binding.type = type;
new_binding.count = 1;
new_binding.stages = shader_stage;
logger->debug("Pipeline reflection found resource %s of type %s in binding %u.%u",
resource.name.c_str(),
descriptor_type_to_string(type),
set_idx,
binding_idx);
const spirv_cross::SPIRType& type_information = shader_compiler.get_type(resource.type_id);
if(!type_information.array.empty()) {
new_binding.count = type_information.array[0];
// All arrays are unbounded until I figure out how to use SPIRV-Cross to detect unbounded arrays
new_binding.is_unbounded = true;
}
const std::string& resource_name = resource.name.c_str();
if(auto* binding = bindings.find(resource_name)) {
// Existing binding. Is it the same as our binding?
RhiResourceBindingDescription& existing_binding = *binding;
if(existing_binding != new_binding) {
// They have two different bindings with the same name. Not allowed
logger->error("You have two different uniforms named %s in different shader stages. This is not allowed. Use unique names",
resource.name);
} else {
// Same binding, probably at different stages - let's fix that
existing_binding.stages |= shader_stage;
}
} else {
// Totally new binding!
bindings.insert(resource_name, new_binding);
}
}
} // namespace nova::renderer
| 4,488
|
C++
|
.cpp
| 77
| 45.441558
| 139
| 0.636529
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| true
| true
| false
| false
| true
| false
| false
|
753,208
|
visibility_cache.cpp
|
NovaMods_nova-renderer/src/renderer/visibility_cache.cpp
|
#include "visibility_cache.hpp"
namespace nova::renderer {
VisibilityCache::VisibilityCache(rx::memory::allocator& allocator) : cached_cameras{&allocator}, visibility_cache{&allocator} {}
} // namespace nova::renderer
| 223
|
C++
|
.cpp
| 4
| 53.5
| 132
| 0.775229
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,209
|
rendergraph.cpp
|
NovaMods_nova-renderer/src/renderer/rendergraph.cpp
|
#include "nova_renderer/rendergraph.hpp"
#include <utility>
#include <Tracy.hpp>
#include "nova_renderer/nova_renderer.hpp"
#include "nova_renderer/rhi/command_list.hpp"
#include "../loading/renderpack/render_graph_builder.hpp"
#include "pipeline_reflection.hpp"
namespace nova::renderer {
using namespace renderpack;
RX_LOG("Rendergraph", logger);
Renderpass::Renderpass(std::string name, const bool is_builtin) : name(std::move(name)), is_builtin(is_builtin) {}
void Renderpass::execute(rhi::RhiRenderCommandList& cmds, FrameContext& ctx) {
const auto& profiling_event_name = std::string::format("Execute %s", name);
ZoneScoped; // TODO: Figure if any of these barriers are implicit
// TODO: Use shader reflection to figure our the stage that the pipelines in this renderpass need access to this resource instead of
// using a robust default
record_pre_renderpass_barriers(cmds, ctx);
setup_renderpass(cmds, ctx);
const auto framebuffer = get_framebuffer(ctx);
cmds.begin_renderpass(renderpass, framebuffer);
record_renderpass_contents(cmds, ctx);
cmds.end_renderpass();
record_post_renderpass_barriers(cmds, ctx);
}
void Renderpass::record_pre_renderpass_barriers(rhi::RhiRenderCommandList& cmds, FrameContext& ctx) const {
ZoneScoped; if(read_texture_barriers.size() > 0) {
// TODO: Use shader reflection to figure our the stage that the pipelines in this renderpass need access to this resource
// instead of using a robust default
cmds.resource_barriers(rhi::PipelineStage::ColorAttachmentOutput, rhi::PipelineStage::FragmentShader, read_texture_barriers);
}
if(write_texture_barriers.size() > 0) {
// TODO: Use shader reflection to figure our the stage that the pipelines in this renderpass need access to this resource
// instead of using a robust default
cmds.resource_barriers(rhi::PipelineStage::ColorAttachmentOutput, rhi::PipelineStage::FragmentShader, write_texture_barriers);
}
if(writes_to_backbuffer) {
rhi::RhiResourceBarrier backbuffer_barrier{};
backbuffer_barrier.resource_to_barrier = ctx.swapchain_image;
backbuffer_barrier.access_before_barrier = rhi::ResourceAccess::MemoryRead;
backbuffer_barrier.access_after_barrier = rhi::ResourceAccess::ColorAttachmentWrite;
backbuffer_barrier.old_state = rhi::ResourceState::PresentSource;
backbuffer_barrier.new_state = rhi::ResourceState::RenderTarget;
backbuffer_barrier.source_queue = rhi::QueueType::Graphics;
backbuffer_barrier.destination_queue = rhi::QueueType::Graphics;
backbuffer_barrier.image_memory_barrier.aspect = rhi::ImageAspect::Color;
// TODO: Use shader reflection to figure our the stage that the pipelines in this renderpass need access to this resource
// instead of using a robust default
std::vector<rhi::RhiResourceBarrier> barriers{&rx::memory::g_system_allocator};
barriers.push_back(backbuffer_barrier);
cmds.resource_barriers(rhi::PipelineStage::TopOfPipe, rhi::PipelineStage::ColorAttachmentOutput, barriers);
}
}
void Renderpass::record_renderpass_contents(rhi::RhiRenderCommandList& cmds, FrameContext& ctx) {
ZoneScoped;
pipeline_names.each_fwd([&](const std::string& pipeline_name) {
const auto* pipeline = ctx.nova->find_pipeline(pipeline_name);
if(pipeline) {
pipeline->record(cmds, ctx);
}
});
}
void Renderpass::record_post_renderpass_barriers(rhi::RhiRenderCommandList& cmds, FrameContext& ctx) const {
ZoneScoped; if(writes_to_backbuffer) {
rhi::RhiResourceBarrier backbuffer_barrier{};
backbuffer_barrier.resource_to_barrier = ctx.swapchain_image;
backbuffer_barrier.access_before_barrier = rhi::ResourceAccess::ColorAttachmentWrite;
backbuffer_barrier.access_after_barrier = rhi::ResourceAccess::MemoryRead;
backbuffer_barrier.old_state = rhi::ResourceState::RenderTarget;
backbuffer_barrier.new_state = rhi::ResourceState::PresentSource;
backbuffer_barrier.source_queue = rhi::QueueType::Graphics;
backbuffer_barrier.destination_queue = rhi::QueueType::Graphics;
backbuffer_barrier.image_memory_barrier.aspect = rhi::ImageAspect::Color;
std::vector<rhi::RhiResourceBarrier> barriers{&rx::memory::g_system_allocator};
barriers.push_back(backbuffer_barrier);
cmds.resource_barriers(rhi::PipelineStage::ColorAttachmentOutput, rhi::PipelineStage::BottomOfPipe, barriers);
}
}
void SceneRenderpass::record_renderpass_contents(rhi::RhiRenderCommandList& cmds, FrameContext& ctx) {}
GlobalRenderpass::GlobalRenderpass(const std::string& name, std::unique_ptr<rhi::RhiPipeline> pipeline, const MeshId mesh, const bool is_builtin)
: Renderpass{name, is_builtin}, pipeline{std::move(pipeline)}, mesh{mesh} {}
void GlobalRenderpass::record_renderpass_contents(rhi::RhiRenderCommandList& cmds, FrameContext& ctx) {
cmds.set_pipeline(*pipeline);
cmds.bind_resources(*resource_binder);
const auto mesh_data = ctx.nova->get_mesh(mesh);
cmds.bind_index_buffer(mesh_data->index_buffer, rhi::IndexType::Uint32);
cmds.bind_vertex_buffers(std::array{mesh_data->vertex_buffer});
cmds.draw_indexed_mesh(3);
}
Rendergraph::Rendergraph(rhi::RenderDevice& device) : device(device) {}
void Rendergraph::destroy_renderpass(const std::string& name) {
if(Renderpass** renderpass = renderpasses.find(name)) {
if((*renderpass)->framebuffer) {
device.destroy_framebuffer((*renderpass)->framebuffer, allocator);
}
device.destroy_renderpass((*renderpass)->renderpass, allocator);
renderpasses.erase(name);
renderpass_metadatas.erase(name);
is_dirty = true;
}
}
std::vector<std::string> Rendergraph::calculate_renderpass_execution_order() {
ZoneScoped; if(is_dirty) {
const auto create_infos = [&]() {
std::vector<RenderPassCreateInfo> create_info_temp{&allocator};
create_info_temp.reserve(renderpass_metadatas.size());
renderpass_metadatas.each_value([&](const RenderpassMetadata& metadata) { create_info_temp.emplace_back(metadata.data); });
return create_info_temp;
}();
order_passes(create_infos)
.map([&](const std::vector<RenderPassCreateInfo>& order) {
cached_execution_order.clear();
cached_execution_order.reserve(order.size());
order.each_fwd([&](const RenderPassCreateInfo& create_info) { cached_execution_order.emplace_back(create_info.name); });
return true;
})
.on_error([&](const auto& err) { rg_log->error("Could not determine renderpass execution order: %s", err.to_string()); });
is_dirty = false;
}
return cached_execution_order;
}
Renderpass* Rendergraph::get_renderpass(const std::string& name) const {
if(Renderpass* const* renderpass = renderpasses.find(name)) {
return *renderpass;
}
return nullptr;
}
std::optional<RenderpassMetadata> Rendergraph::get_metadata_for_renderpass(const std::string& name) const {
if(const auto* metadata = renderpass_metadatas.find(name)) {
return *metadata;
}
return rx::nullopt;
}
rhi::RhiFramebuffer* Renderpass::get_framebuffer(const FrameContext& ctx) const {
if(!writes_to_backbuffer) {
return framebuffer;
} else {
return ctx.swapchain_framebuffer;
}
}
void Renderpass::setup_renderpass(rhi::RhiRenderCommandList& /* cmds */, FrameContext& /* ctx */) {}
void renderer::MaterialPass::record(rhi::RhiRenderCommandList& cmds, FrameContext& ctx) const {
ZoneScoped;
cmds.bind_descriptor_sets(descriptor_sets, pipeline_interface);
static_mesh_draws.each_fwd(
[&](const MeshBatch<StaticMeshRenderCommand>& batch) { record_rendering_static_mesh_batch(batch, cmds, ctx); });
static_procedural_mesh_draws.each_fwd(
[&](const ProceduralMeshBatch<StaticMeshRenderCommand>& batch) { record_rendering_static_mesh_batch(batch, cmds, ctx); });
}
void renderer::MaterialPass::record_rendering_static_mesh_batch(const MeshBatch<StaticMeshRenderCommand>& batch,
rhi::RhiRenderCommandList& cmds,
FrameContext& ctx) {
ZoneScoped; const uint64_t start_index = ctx.cur_model_matrix_index;
auto model_matrix_buffer = ctx.nova->get_resource_manager().get_uniform_buffer(MODEL_MATRIX_BUFFER_NAME);
batch.commands.each_fwd([&](const StaticMeshRenderCommand& command) {
if(command.is_visible) {
/* ctx.nova->get_device().write_data_to_buffer(&command.model_matrix,
sizeof(glm::mat4),
ctx.cur_model_matrix_index * sizeof(glm::mat4),
(*model_matrix_buffer)->buffer);*/
ctx.cur_model_matrix_index++;
}
});
if(start_index != ctx.cur_model_matrix_index) {
// TODO: There's probably a better way to do this
std::vector<rhi::RhiBuffer*> vertex_buffers{ctx.allocator};
vertex_buffers.reserve(batch.num_vertex_attributes);
for(uint32_t i = 0; i < batch.num_vertex_attributes; i++) {
vertex_buffers.push_back(batch.vertex_buffer);
}
cmds.bind_vertex_buffers(vertex_buffers);
cmds.bind_index_buffer(batch.index_buffer, rhi::IndexType::Uint32);
cmds.draw_indexed_mesh(batch.num_indices);
}
}
void renderer::MaterialPass::record_rendering_static_mesh_batch(const ProceduralMeshBatch<StaticMeshRenderCommand>& batch,
rhi::RhiRenderCommandList& cmds,
FrameContext& ctx) {
ZoneScoped; const uint64_t start_index = ctx.cur_model_matrix_index;
auto model_matrix_buffer = ctx.nova->get_resource_manager().get_uniform_buffer(MODEL_MATRIX_BUFFER_NAME);
batch.commands.each_fwd([&](const StaticMeshRenderCommand& command) {
if(command.is_visible) {
/*ctx.nova->get_device().write_data_to_buffer(&command.model_matrix,
sizeof(glm::mat4),
ctx.cur_model_matrix_index * sizeof(glm::mat4),
(*model_matrix_buffer)->buffer);*/
ctx.cur_model_matrix_index++;
}
});
if(start_index != ctx.cur_model_matrix_index) {
const auto& [vertex_buffer, index_buffer] = batch.mesh->get_buffers_for_frame(ctx.frame_idx);
// TODO: There's probably a better way to do this
std::vector<rhi::RhiBuffer*> vertex_buffers;
vertex_buffers.reserve(7);
for(uint32_t i = 0; i < 7; i++) {
vertex_buffers.push_back(vertex_buffer);
}
cmds.bind_vertex_buffers(vertex_buffers);
cmds.bind_index_buffer(index_buffer, rhi::IndexType::Uint32);
}
}
void Pipeline::record(rhi::RhiRenderCommandList& cmds, FrameContext& ctx) const {
ZoneScoped; cmds.set_pipeline(*pipeline);
const auto& passes = ctx.nova->get_material_passes_for_pipeline(pipeline->name);
passes.each_fwd([&](const renderer::MaterialPass& pass) { pass.record(cmds, ctx); });
}
} // namespace nova::renderer
| 12,468
|
C++
|
.cpp
| 206
| 47.368932
| 149
| 0.627899
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,210
|
backbuffer_output_pass.cpp
|
NovaMods_nova-renderer/src/renderer/builtin/backbuffer_output_pass.cpp
|
#include "backbuffer_output_pass.hpp"
#include "nova_renderer/loading/renderpack_loading.hpp"
#include "nova_renderer/nova_renderer.hpp"
namespace nova::renderer {
RX_LOG("BackbufferOut", logger);
struct RX_HINT_EMPTY_BASES BackbufferOutputRenderpassCreateInfo : renderpack::RenderPassCreateInfo {
BackbufferOutputRenderpassCreateInfo();
};
BackbufferOutputRenderpassCreateInfo::BackbufferOutputRenderpassCreateInfo() {
name = BACKBUFFER_OUTPUT_RENDER_PASS_NAME;
texture_inputs.reserve(2);
texture_inputs.emplace_back(UI_OUTPUT_RT_NAME);
texture_inputs.emplace_back(SCENE_OUTPUT_RT_NAME);
texture_outputs.reserve(1);
texture_outputs.emplace_back(BACKBUFFER_NAME, rhi::PixelFormat::Rgba8, false);
pipeline_names.reserve(1);
pipeline_names.emplace_back(BACKBUFFER_OUTPUT_PIPELINE_NAME);
}
rx::global<BackbufferOutputRenderpassCreateInfo> backbuffer_output_create_info{"Nova", "BackbufferOutputCreateInfo"};
BackbufferOutputRenderpass::BackbufferOutputRenderpass(rhi::RhiImage* ui_output,
rhi::RhiImage* scene_output,
rhi::RhiSampler* point_sampler,
std::unique_ptr<rhi::RhiPipeline> pipeline,
MeshId mesh,
rhi::RenderDevice& device)
: GlobalRenderpass(BACKBUFFER_OUTPUT_RENDER_PASS_NAME, std::move(pipeline), mesh, true) {
resource_binder = device.create_resource_binder_for_pipeline(*(this->pipeline), device.get_allocator());
resource_binder->bind_image("ui_output", ui_output);
resource_binder->bind_image("scene_output", scene_output);
resource_binder->bind_sampler("tex_sampler", point_sampler);
rhi::RhiResourceBarrier pre_pass_barrier;
pre_pass_barrier.access_before_barrier = rhi::ResourceAccess::ColorAttachmentWrite;
pre_pass_barrier.access_after_barrier = rhi::ResourceAccess::ShaderRead;
pre_pass_barrier.old_state = rhi::ResourceState::RenderTarget;
pre_pass_barrier.new_state = rhi::ResourceState::ShaderRead;
pre_pass_barrier.source_queue = rhi::QueueType::Graphics;
pre_pass_barrier.destination_queue = rhi::QueueType::Graphics;
pre_pass_barrier.image_memory_barrier.aspect = rhi::ImageAspect::Color;
read_texture_barriers.reserve(2);
pre_pass_barrier.resource_to_barrier = ui_output;
read_texture_barriers.push_back(pre_pass_barrier);
pre_pass_barrier.resource_to_barrier = scene_output;
read_texture_barriers.push_back(pre_pass_barrier);
rhi::RhiResourceBarrier post_pass_barrier;
post_pass_barrier.access_before_barrier = rhi::ResourceAccess::ShaderRead;
post_pass_barrier.access_after_barrier = rhi::ResourceAccess::ColorAttachmentWrite;
post_pass_barrier.old_state = rhi::ResourceState::ShaderRead;
post_pass_barrier.new_state = rhi::ResourceState::RenderTarget;
post_pass_barrier.source_queue = rhi::QueueType::Graphics;
post_pass_barrier.destination_queue = rhi::QueueType::Graphics;
post_pass_barrier.image_memory_barrier.aspect = rhi::ImageAspect::Color;
post_pass_barriers.reserve(2);
post_pass_barrier.resource_to_barrier = ui_output;
post_pass_barriers.push_back(post_pass_barrier);
post_pass_barrier.resource_to_barrier = scene_output;
post_pass_barriers.push_back(post_pass_barrier);
}
const renderpack::RenderPassCreateInfo& BackbufferOutputRenderpass::get_create_info() { return *backbuffer_output_create_info; }
void BackbufferOutputRenderpass::record_post_renderpass_barriers(rhi::RhiRenderCommandList& cmds, FrameContext& ctx) const {
Renderpass::record_post_renderpass_barriers(cmds, ctx);
// TODO: Figure out how to make the backend deal with the barriers
cmds.resource_barriers(rhi::PipelineStage::FragmentShader, rhi::PipelineStage::ColorAttachmentOutput, post_pass_barriers);
}
} // namespace nova::renderer
| 4,249
|
C++
|
.cpp
| 64
| 54.40625
| 132
| 0.684034
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,211
|
ui_renderer.cpp
|
NovaMods_nova-renderer/src/renderer/ui/ui_renderer.cpp
|
#include "nova_renderer/ui_renderer.hpp"
#include "nova_renderer/rhi/render_device.hpp"
namespace nova::renderer {
struct RX_HINT_EMPTY_BASES UiRenderpassCreateInfo : renderpack::RenderPassCreateInfo {
UiRenderpassCreateInfo();
};
UiRenderpassCreateInfo::UiRenderpassCreateInfo() {
name = UI_RENDER_PASS_NAME;
texture_outputs.emplace_back(UI_OUTPUT_RT_NAME, rhi::PixelFormat::Rgba8, true);
}
rx::global<UiRenderpassCreateInfo> ui_create_info{"Nova", "UiRenderpassCreateInfo"};
UiRenderpass::UiRenderpass() : Renderpass(UI_RENDER_PASS_NAME, true) {}
void UiRenderpass::record_renderpass_contents(rhi::RhiRenderCommandList& cmds, FrameContext& ctx) { render_ui(cmds, ctx); }
const renderpack::RenderPassCreateInfo& UiRenderpass::get_create_info() {
return *ui_create_info;
}
void NullUiRenderpass::render_ui(rhi::RhiRenderCommandList& /* cmds */, FrameContext& /* ctx */) {
// Intentionally empty
}
} // namespace nova::renderer
| 1,019
|
C++
|
.cpp
| 20
| 45.35
| 127
| 0.724521
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,212
|
virtual_filesystem.cpp
|
NovaMods_nova-renderer/src/filesystem/virtual_filesystem.cpp
|
#include "nova_renderer/filesystem/virtual_filesystem.hpp"
#include <rx/core/log.h>
#include "regular_folder_accessor.hpp"
namespace nova::filesystem {
RX_LOG("VirtualFilesystem", logger);
VirtualFilesystem* VirtualFilesystem::instance = nullptr;
VirtualFilesystem* VirtualFilesystem::get_instance() {
if(!instance) {
rx::memory::allocator* allocator = &rx::memory::g_system_allocator;
instance = allocator->create<VirtualFilesystem>();
}
return instance;
}
void VirtualFilesystem::add_resource_root(const rx::string& root) { resource_roots.emplace_back(FolderAccessorBase::create(root)); }
void VirtualFilesystem::add_resource_root(FolderAccessorBase* root_accessor) { resource_roots.push_back(root_accessor); }
FolderAccessorBase* VirtualFilesystem::get_folder_accessor(const rx::string& path) const {
if(resource_roots.is_empty()) {
logger->error("No resource roots available in the virtual filesystem! You must register at least one resource root path");
return nullptr;
}
FolderAccessorBase* ret_val = nullptr;
resource_roots.each_fwd([&](FolderAccessorBase* root) {
if(root && root->does_resource_exist(path)) {
ret_val = root->create_subfolder_accessor(path);
return false;
}
return true;
});
if(ret_val == nullptr) {
logger->error("Could not find folder %s", path);
}
return ret_val;
}
} // namespace nova::filesystem
| 1,589
|
C++
|
.cpp
| 34
| 38
| 136
| 0.657792
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,213
|
zip_folder_accessor.cpp
|
NovaMods_nova-renderer/src/filesystem/zip_folder_accessor.cpp
|
#include "zip_folder_accessor.hpp"
#include <array>
#include <memory>
#include <sstream>
#include <rx/core/array.h>
#include <rx/core/log.h>
#include "nova_renderer/util/utils.hpp"
namespace nova::filesystem {
RX_LOG("ZipFilesystem", logger);
ZipFolderAccessor::ZipFolderAccessor(const rx::string& folder) : FolderAccessorBase(folder) {
if(mz_zip_reader_init_file(&zip_archive, folder.data(), 0) == 0) {
logger->error("Could not open zip archive %s", folder);
}
build_file_tree();
}
ZipFolderAccessor::~ZipFolderAccessor() { mz_zip_reader_end(&zip_archive); }
rx::vector<uint8_t> ZipFolderAccessor::read_file(const rx::string& path) {
const auto full_path = rx::string::format("%s/%s", root_folder, path);
if(!does_resource_exist_on_filesystem(full_path)) {
logger->error("Resource at path %s does not exist", full_path);
return {};
}
const auto file_idx = resource_indexes.find(full_path);
mz_zip_archive_file_stat file_stat = {};
const mz_bool has_file_stat = mz_zip_reader_file_stat(&zip_archive, *file_idx, &file_stat);
if(has_file_stat == 0) {
const mz_zip_error err_code = mz_zip_get_last_error(&zip_archive);
const rx::string err = mz_zip_get_error_string(err_code);
logger->error("Could not get information for file %s:%s", full_path, err);
}
rx::vector<uint8_t> resource_buffer;
resource_buffer.reserve(static_cast<uint64_t>(file_stat.m_uncomp_size));
const mz_bool file_extracted = mz_zip_reader_extract_to_mem(&zip_archive,
*file_idx,
resource_buffer.data(),
resource_buffer.size(),
0);
if(file_extracted == 0) {
const mz_zip_error err_code = mz_zip_get_last_error(&zip_archive);
const rx::string err = mz_zip_get_error_string(err_code);
logger->error("Could not extract file %s:%s", full_path, err);
}
return resource_buffer;
}
rx::vector<rx::string> ZipFolderAccessor::get_all_items_in_folder(const rx::string& folder) {
const rx::vector<rx::string> folder_path_parts = folder.split('/');
FileTreeNode* cur_node = &files;
// Get the node at this path
folder_path_parts.each_fwd([&](const rx::string& part) {
bool found_node = false;
cur_node->children.each_fwd([&](FileTreeNode& child) {
if(child.name == part) {
cur_node = &child;
found_node = true;
return false;
}
return true;
});
if(!found_node) {
logger->error("Couldn't find node %s", folder);
}
});
rx::vector<rx::string> children_paths;
children_paths.reserve(cur_node->children.size());
cur_node->children.each_fwd([&](const FileTreeNode& child) {
rx::string s = child.get_full_path();
children_paths.emplace_back(s);
});
return children_paths;
}
FolderAccessorBase* ZipFolderAccessor::create_subfolder_accessor(const rx::string& path) const {
rx::memory::allocator* allocator = &rx::memory::g_system_allocator;
return allocator->create<ZipFolderAccessor>(rx::string::format("%s/%s", root_folder, path), zip_archive);
}
ZipFolderAccessor::ZipFolderAccessor(const rx::string& folder, const mz_zip_archive archive)
: FolderAccessorBase(folder), zip_archive(archive) {
build_file_tree();
}
void ZipFolderAccessor::build_file_tree() {
const uint32_t num_files = mz_zip_reader_get_num_files(&zip_archive);
rx::vector<rx::string> all_file_names;
all_file_names.resize(num_files);
char filename_buffer[1024];
for(uint32_t i = 0; i < num_files; i++) {
const uint32_t num_bytes_in_filename = mz_zip_reader_get_filename(&zip_archive, i, filename_buffer, 1024);
filename_buffer[num_bytes_in_filename] = '\0';
all_file_names.emplace_back(filename_buffer);
}
// Build a tree from all the files
all_file_names.each_fwd([&](const rx::string& filename) {
const rx::vector<rx::string> filename_parts = filename.split('/');
FileTreeNode* cur_node = &files;
filename_parts.each_fwd([&](const rx::string& part) {
bool node_found = false;
cur_node->children.each_fwd([&](FileTreeNode& child) {
if(child.name == part) {
// We already have a node for the current folder. Set this node as the current one and go to the
// next iteration of the loop
cur_node = &child;
node_found = true;
return false;
}
return true;
});
if(!node_found) {
// We didn't find a node for the current part of the path, so let's add one
FileTreeNode new_node;
new_node.name = part;
new_node.parent = cur_node;
cur_node->children.push_back(new_node);
cur_node = &cur_node->children.last();
}
});
});
}
bool ZipFolderAccessor::does_resource_exist_on_filesystem(const rx::string& resource_path) {
const auto existence_maybe = does_resource_exist_in_map(resource_path);
if(existence_maybe) {
return *existence_maybe;
}
const int32_t ret_val = mz_zip_reader_locate_file(&zip_archive, resource_path.data(), "", 0);
if(ret_val != -1) {
// resource found!
resource_indexes.insert(resource_path, ret_val);
resource_existence.insert(resource_path, true);
return true;
}
// resource not found
resource_existence.insert(resource_path, false);
return false;
}
void print_file_tree(const FileTreeNode& folder, const uint32_t depth) {
std::stringstream ss;
for(uint32_t i = 0; i < depth; i++) {
ss << " ";
}
ss << folder.name.data();
logger->info(ss.str().c_str());
folder.children.each_fwd([&](const FileTreeNode& child) { print_file_tree(child, depth + 1); });
}
rx::string FileTreeNode::get_full_path() const {
rx::vector<rx::string> names;
const FileTreeNode* cur_node = this;
while(cur_node != nullptr) {
names.push_back(cur_node->name);
cur_node = cur_node->parent;
}
// Skip the last string in the vector, since it's the resourcepack root node
bool is_first = false;
const uint32_t num_path_parts = static_cast<uint32_t>(names.size() - 1);
uint32_t cur_path_part = 0;
rx::string joined;
names.each_rev([&](rx::string& str) {
if(!is_first && cur_path_part > 0 && cur_path_part < num_path_parts - 1) {
joined = rx::string::format("%s/%s", joined, str);
}
if(!is_first) {
cur_path_part++;
}
is_first = false;
return true;
});
return joined;
}
} // namespace nova::filesystem
| 7,705
|
C++
|
.cpp
| 165
| 34.10303
| 120
| 0.550367
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,214
|
regular_folder_accessor.cpp
|
NovaMods_nova-renderer/src/filesystem/regular_folder_accessor.cpp
|
#include "regular_folder_accessor.hpp"
#include <rx/core/concurrency/scope_lock.h>
#include <rx/core/filesystem/directory.h>
#include <rx/core/filesystem/file.h>
#include <rx/core/log.h>
namespace nova::filesystem {
RX_LOG("RegularFilesystem", logger);
RegularFolderAccessor::RegularFolderAccessor(const rx::string& folder) : FolderAccessorBase(folder) {}
rx::vector<uint8_t> RegularFolderAccessor::read_file(const rx::string& path) {
rx::concurrency::scope_lock l(*resource_existence_mutex);
const auto full_path = [&] {
if(has_root(path, root_folder)) {
return path;
} else {
return rx::string::format("%s/%s", root_folder, path);
}
}();
if(!does_resource_exist_on_filesystem(full_path)) {
logger->error("Resource at path %s doesn't exist", full_path);
return {};
}
if(const auto bytes = rx::filesystem::read_binary_file(full_path)) {
return *bytes;
}
return {};
}
rx::vector<rx::string> RegularFolderAccessor::get_all_items_in_folder(const rx::string& folder) {
const auto full_path = rx::string::format("%s/%s", root_folder, folder);
rx::vector<rx::string> paths = {};
if(rx::filesystem::directory dir{full_path}) {
dir.each([&](const rx::filesystem::directory::item& item) { paths.push_back(item.name()); });
}
return paths;
}
bool RegularFolderAccessor::does_resource_exist_on_filesystem(const rx::string& resource_path) {
const auto existence_maybe = does_resource_exist_in_map(resource_path);
if(existence_maybe) {
// NOVA_LOG(TRACE) << "Does " << resource_path << " exist? " << *existence_maybe;
return *existence_maybe;
}
if(const rx::filesystem::file file{resource_path, "r"}) {
// logger->verbose("%s exists", resource_path);
resource_existence.insert(resource_path, true);
return true;
} else if(const rx::filesystem::directory dir{resource_path}) {
resource_existence.insert(resource_path, true);
return true;
}
// NOVA_LOG(TRACE) << resource_path << " does not exist";
resource_existence.insert(resource_path, false);
return false;
}
FolderAccessorBase* RegularFolderAccessor::create_subfolder_accessor(const rx::string& path) const {
return create(rx::string::format("%s/%s", root_folder, path));
}
} // namespace nova::filesystem
| 2,587
|
C++
|
.cpp
| 56
| 37.339286
| 106
| 0.61829
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,215
|
folder_accessor.cpp
|
NovaMods_nova-renderer/src/filesystem/folder_accessor.cpp
|
#include "nova_renderer/filesystem/folder_accessor.hpp"
#include <rx/core/concurrency/scope_lock.h>
#include <rx/core/log.h>
#include "regular_folder_accessor.hpp"
#include "zip_folder_accessor.hpp"
namespace nova::filesystem {
RX_LOG("filesystem", logger);
bool is_zip_folder(const rx::string& path_to_folder) { return path_to_folder.ends_with(".zip"); }
FolderAccessorBase* FolderAccessorBase::create(const rx::string& path) {
rx::memory::allocator* allocator = &rx::memory::g_system_allocator;
// Where is the renderpack, and what kind of folder is it in ?
if(is_zip_folder(path)) {
// zip folder in renderpacks folder
return allocator->create<ZipFolderAccessor>(path);
} else if(const rx::filesystem::directory directory(path); directory) {
// regular folder in renderpacks folder
return allocator->create<RegularFolderAccessor>(path);
}
logger->error("Could not create folder accessor for path %s", path);
return nullptr;
}
FolderAccessorBase::FolderAccessorBase(rx::string folder)
: root_folder(rx::utility::move(folder)), resource_existence_mutex(new rx::concurrency::mutex) {}
bool FolderAccessorBase::does_resource_exist(const rx::string& resource_path) {
rx::concurrency::scope_lock l(*resource_existence_mutex);
const auto full_path = rx::string::format("%s/%s", root_folder, resource_path);
return does_resource_exist_on_filesystem(full_path);
}
rx::string FolderAccessorBase::read_text_file(const rx::string& resource_path) {
auto buf = read_file(resource_path);
return buf.disown();
}
rx::optional<bool> FolderAccessorBase::does_resource_exist_in_map(const rx::string& resource_string) const {
if(const auto* val = resource_existence.find(resource_string); val != nullptr) {
return rx::optional<bool>(*val);
}
return rx::nullopt;
}
const rx::string& FolderAccessorBase::get_root() const { return root_folder; }
bool has_root(const rx::string& path, const rx::string& root) { return path.begins_with(root); }
} // namespace nova::filesystem
| 2,210
|
C++
|
.cpp
| 41
| 46.658537
| 112
| 0.684162
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| true
| true
| false
| false
| true
| false
| false
|
753,216
|
constants.hpp
|
NovaMods_nova-renderer/include/nova_renderer/constants.hpp
|
//! \brief A collection of constants for Nova to use
#pragma once
#include "nova_renderer/util/bytes.hpp"
using namespace nova::mem::operators;
namespace nova::renderer {
constexpr const char* MODEL_MATRIX_BUFFER_NAME = "NovaModelMatrixUBO";
constexpr const char* PER_FRAME_DATA_NAME = "NovaPerFrameUBO";
constexpr const char* MATERIAL_DATA_BUFFER_NAME = "NovaMaterialData";
constexpr const char* CAMERA_MATRIX_BUFFER_NAME = "NovaCameraMatrixBuffer";
constexpr mem::Bytes MATERIAL_BUFFER_SIZE = 64_kb;
constexpr uint32_t AMD_PCI_VENDOR_ID = 0x1022;
constexpr uint32_t INTEL_PCI_VENDOR_ID = 8086;
constexpr uint32_t NVIDIA_PCI_VENDOR_ID = 0x10DE;
constexpr uint32_t MAX_NUM_CAMERAS = 256;
/*!
* \brief Maximum number of textures that Nova can handle
*/
constexpr uint32_t MAX_NUM_TEXTURES = 1024;
constexpr mem::Bytes PER_FRAME_MEMORY_SIZE = 2_mb;
constexpr const char* RENDERPACK_DIRECTORY = "renderpacks";
constexpr const char* MATERIALS_DIRECTORY = "materials";
constexpr const char* SHADERS_DIRECTORY = "shaders";
constexpr const char* RENDERPACK_DESCRIPTOR_FILE = "renderpack.json";
constexpr const char* RESOURCES_FILE = "resources.json";
constexpr const char* MATERIAL_FILE_EXTENSION = ".mat";
/*!
* \brief Name of Nova's white texture
*
* The white texture is a 4x4 texture where each texel has the RGBA value of (1, 1, 1, 1)
*/
constexpr const char* WHITE_TEXTURE_NAME = "NovaWhiteTexture";
/*!
* \brief Name of Nova's gray texture
*
* The gray texture is a 4x4 texture where each texel has the RGBA value of (0.5, 0.5, 0.5, 0.5)
*/
constexpr const char* GRAY_TEXTURE_NAME = "NovaGrayTexture";
/*!
* \brief Name of Nova's black texture
*
* The black texture is a 4x4 texture where each texel has the RGBA value of (0, 0, 0, 0)
*/
constexpr const char* BLACK_TEXTURE_NAME = "NovaBlackTexture";
/*!
* \brief Name of the builtin pass Nova uses to render UI
*
* This pass reads from the writes to the backbuffer. UI renderpasses are expected to use something like blending or the stencil butter
* to layer the UI on top of the 3D scene.
*/
constexpr const char* UI_RENDER_PASS_NAME = "NovaUI";
/*!
* \brief Name of the renderpass that outputs to the backbuffer
*/
constexpr const char* BACKBUFFER_OUTPUT_RENDER_PASS_NAME = "BackbufferOutput";
constexpr const char* BACKBUFFER_OUTPUT_PIPELINE_NAME = "BackbufferOutput";
constexpr const char* BACKBUFFER_OUTPUT_MATERIAL_NAME = "BackbufferOutput";
/*!
* \brief Name of the render target that renderpacks must render to
*/
constexpr const char* SCENE_OUTPUT_RT_NAME = "NovaSceneOutput";
/*!
* \brief Name of the UI render target
*
* All UI renderpasses MUST will render to this render target
*/
constexpr const char* UI_OUTPUT_RT_NAME = "NovaUiOutput";
/*!
* \brief Name of the backbuffer
*
* Nova presents the backbuffer to the screen every frame. The builtin UI render pass adds the UI to the backbuffer after the rest of
* the rendergraph has finished
*/
constexpr const char* BACKBUFFER_NAME = "NovaBackbuffer";
constexpr const char* POINT_SAMPLER_NAME = "NovaPointSampler";
} // namespace nova::renderer
| 3,388
|
C++
|
.h
| 75
| 39.786667
| 139
| 0.701397
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| true
| true
| true
| false
| false
| true
| false
| false
|
753,217
|
resource_loader.hpp
|
NovaMods_nova-renderer/include/nova_renderer/resource_loader.hpp
|
#pragma once
#include "nova_renderer/rhi/forward_decls.hpp"
#include "nova_renderer/rhi/rhi_types.hpp"
#include "nova_renderer/util/container_accessor.hpp"
namespace nova::mem {
class Bytes;
}
namespace nova::renderer {
class NovaRenderer;
namespace rhi {
enum class PixelFormat;
}
struct TextureResource {
std::string name;
rhi::RhiImage* image = nullptr;
size_t width;
size_t height;
rhi::PixelFormat format;
};
struct BufferResource {
std::string name;
rhi::RhiBuffer* buffer = nullptr;
mem::Bytes size = 0;
};
using TextureResourceAccessor = VectorAccessor<TextureResource>;
using RenderTargetAccessor = MapAccessor<std::string, TextureResource>;
using BufferResourceAccessor = MapAccessor<std::string, BufferResource>;
/*!
* \brief Provides a means to access Nova's resources, and also helps in creating resources? IDK yet but that's fine
*
* Basically I need both a high-level API to make resources with, and I want to make those resource easy to access.
*/
class DeviceResources {
public:
explicit DeviceResources(NovaRenderer& renderer);
[[nodiscard]] std::optional<BufferResourceAccessor> create_uniform_buffer(const std::string& name, mem::Bytes size);
[[nodiscard]] std::optional<BufferResourceAccessor> get_uniform_buffer(const std::string& name);
void destroy_uniform_buffer(const std::string& name);
/*!
* \brief Creates a new dynamic texture with the provided initial texture data
*
* \param name The name of the texture. After the texture can been created, you can use this to refer to it
* \param width The width of the texture
* \param height The height of the texture
* \param pixel_format The format of the pixels in this texture
* \param data The initial data for this texture. Must be large enough to have all the pixels in the texture
* \param allocator The allocator to allocate with
* \return The newly-created image, or nullptr if the image could not be created. Check the Nova logs to find out why
*/
[[nodiscard]] std::optional<TextureResourceAccessor> create_texture(const std::string& name,
size_t width,
size_t height,
rhi::PixelFormat pixel_format,
const void* data,
rx::memory::allocator& allocator);
[[nodiscard]] std::optional<uint32_t> get_texture_idx_for_name(const std::string& name) const;
/*!
* \brief Retrieves the texture with the specified name
*/
[[nodiscard]] std::optional<TextureResourceAccessor> get_texture(const std::string& name);
/*!
* \brief Creates a new render target with the specified size and format
*
* Render targets reside completely on the GPU and are not accessible from the CPU. If you need a shader-writable, CPU-readable
* texture, create a readback texture instead
*
* By default a render target may not be sampled by a shader
*
* \param name The name of the render target
* \param width The width of the render target, in pixels
* \param height The height of the render target, in pixels
* \param pixel_format The format of the render target
* \param allocator The allocator to use for any host memory this methods needs to allocate
* \param can_be_sampled If true, the render target may be sampled by a shader. If false, this render target may only be presented
* to the screen
*
* \return The new render target if it could be created, or am empty optional if it could not
*/
[[nodiscard]] std::optional<RenderTargetAccessor> create_render_target(const std::string& name,
size_t width,
size_t height,
rhi::PixelFormat pixel_format,
rx::memory::allocator& allocator,
bool can_be_sampled = false);
/*!
* \brief Retrieves the render target with the specified name
*/
[[nodiscard]] std::optional<RenderTargetAccessor> get_render_target(const std::string& name);
void destroy_render_target(const std::string& texture_name, rx::memory::allocator& allocator);
/*!
* \brief Retrieves a staging buffer at least the specified size
*
* The actual buffer returned may be larger than what you need
*
* When you're done with the staging buffer, return it to the pool with `return_staging_buffer`
*/
rhi::RhiBuffer* get_staging_buffer_with_size(mem::Bytes size);
void return_staging_buffer(rhi::RhiBuffer* buffer);
[[nodiscard]] const std::vector<TextureResource>& get_all_textures() const;
private:
NovaRenderer& renderer;
rhi::RenderDevice& device;
rx::memory::allocator& internal_allocator;
std::vector<TextureResource> textures;
std::unordered_map<std::string, uint32_t> texture_name_to_idx;
std::unordered_map<std::string, TextureResource> render_targets;
std::unordered_map<size_t, std::vector<rhi::RhiBuffer*>> staging_buffers;
std::unordered_map<std::string, BufferResource> uniform_buffers;
void create_default_textures();
};
} // namespace nova::renderer
| 6,126
|
C++
|
.h
| 111
| 40.612613
| 138
| 0.59284
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,218
|
window.hpp
|
NovaMods_nova-renderer/include/nova_renderer/window.hpp
|
#pragma once
#include <functional>
#include <vector>
#include <glm/vec2.hpp>
#include "nova_renderer/nova_settings.hpp"
#include "nova_renderer/util/platform.hpp"
#include "nova_renderer/util/windows.hpp"
#include "nova_renderer/window.hpp"
// ReSharper disable once CppInconsistentNaming
struct GLFWwindow;
namespace nova::renderer {
/**
* \brief GLFW wrapper that does a few nice things that Nova likes
*/
class NovaWindow {
public:
explicit NovaWindow(const NovaSettings& options);
~NovaWindow();
NovaWindow(NovaWindow&& other) noexcept = delete;
NovaWindow& operator=(NovaWindow&& other) noexcept = delete;
NovaWindow(const NovaWindow& other) = delete;
NovaWindow& operator=(const NovaWindow& other) = delete;
/*!
* \brief Registers a new key input callback
*
* This callback will key called for every key press event that this window receives
*
* \param key_callback Callback for when a key is received. Intentionally a std::function so I can easily add
* it to a vector. First parameter to this function is the key code, second is whether the key was pressed
* this frame, third is if control is down, fourth is if shift is down
*/
void register_key_callback(std::function<void(uint32_t, bool, bool, bool)> key_callback);
/*!
* \brief Registers a new mouse position callback
*
* Mouse position callback gets called when the mouse position changed
*
* \param mouse_callback Callback for when mouse input is received. The first parameter is the mouse's X
* position, the second if the Y position
*/
void register_mouse_callback(std::function<void(double, double)> mouse_callback);
/*!
* \brief Registers a new mouse button callback
*
* This callback gets invoked whenever the user presses a mouse button
*
* \param mouse_callback Callback for when a mouse button is pressed. First parameter is the mouse button,
* second parameter is if it was pressed
*/
void register_mouse_button_callback(std::function<void(uint32_t, bool)> mouse_callback);
void poll_input() const;
[[nodiscard]] bool should_close() const;
/*!
* \brief Gets the size of the framebuffer that this window displays
*/
[[nodiscard]] glm::uvec2 get_framebuffer_size() const;
/*!
* \brief Gets the size of the window itself
*/
[[nodiscard]] glm::uvec2 get_window_size() const;
/*!
* \brief Get the ratio of the size of the framebuffer to the size of the window
*/
[[nodiscard]] glm::vec2 get_framebuffer_to_window_ratio() const;
[[nodiscard]] HWND get_window_handle() const;
private:
GLFWwindow* window = nullptr;
std::vector<std::function<void(uint32_t, bool, bool, bool)>> key_callbacks;
std::vector<std::function<void(double, double)>> mouse_callbacks;
std::vector<std::function<void(uint32_t, bool)>> mouse_button_callbacks;
static void glfw_key_callback(GLFWwindow* window, int key, int scancode, int action, int mods);
static void glfw_mouse_callback(GLFWwindow* window, double x_position, double y_position);
static void glfw_mouse_button_callback(GLFWwindow* window, int button, int action, int mods);
void broadcast_key_event(int key, bool is_press, bool is_control_down, bool is_shift_down);
void broadcast_mouse_position(double x_position, double y_position);
void broadcast_mouse_button(int button, bool is_pressed);
};
} // namespace nova::renderer
| 3,785
|
C++
|
.h
| 78
| 40.410256
| 117
| 0.665942
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| true
| true
| true
| false
| false
| true
| false
| false
|
753,219
|
frame_context.hpp
|
NovaMods_nova-renderer/include/nova_renderer/frame_context.hpp
|
#pragma once
#include <stddef.h>
#include <rx/core/memory/allocator.h>
#include "nova_renderer/rhi/forward_decls.hpp"
#include "nova_renderer/resource_loader.hpp"
namespace nova::renderer {
class NovaRenderer;
/*!
* \brief All the per-frame data that Nova itself cares about
*/
struct FrameContext {
/*!
* \brief Pointer to the NovaRenderer instance that launched this frame
*/
NovaRenderer* nova;
/*!
* \brief The number of frames that were started before this frame
*/
size_t frame_count;
/*!
* \brief Index of the frame currently being rendered
*/
size_t frame_idx;
/*!
* \brief Swapchain image that this frame renders to
*/
rhi::RhiImage* swapchain_image;
/*!
* \brief Swapchain framebuffer that this frame renders to
*/
rhi::RhiFramebuffer* swapchain_framebuffer;
/*!
* \brief Buffer with all the camera matrices to use when rendering this frame
*/
rhi::RhiBuffer* camera_matrix_buffer;
size_t cur_model_matrix_index = 0;
rx::memory::allocator* allocator = nullptr;
BufferResourceAccessor material_buffer;
};
} // namespace nova::renderer
| 1,311
|
C++
|
.h
| 40
| 25.1
| 86
| 0.621622
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,220
|
nova_settings.hpp
|
NovaMods_nova-renderer/include/nova_renderer/nova_settings.hpp
|
#pragma once
#include <string>
#include <vector>
#include <cstdint>
namespace nova::renderer {
struct Semver {
uint32_t major;
uint32_t minor;
uint32_t patch;
};
class NovaSettingsAccessManager;
/*!
* \brief Anything which inherits from this class wants to know about the configuration and any changes to it
*/
class ConfigListener {
public:
ConfigListener(const ConfigListener& other) = default;
ConfigListener& operator=(const ConfigListener& other) = default;
ConfigListener(ConfigListener&& old) noexcept = default;
ConfigListener& operator=(ConfigListener&& old) noexcept = default;
virtual ~ConfigListener() = default;
/*!
* \brief Tells the listeners that there has been a change in the configuration
*
* This method is called throughout Nova's lifetime whenever a configuration value changes. This method should
* handle changing configuration values such as the size of the window and what renderpack the user has loaded
*
* Note that this method only receives the read-write config values (the 'settings' node)
*
* \param new_config The updated configuration
*/
virtual void on_config_change(const NovaSettingsAccessManager& new_config) = 0;
/*!
* \brief Tells listeners that the configuration has been loaded
*
* When Nova starts up, this method is called on all config listeners, then on_config_change is called.
* on_config_change should be used to listen for any config values that change throughout the program's life, so
* then this method should be used for any initial configuration whose values will not change throughout the
* program's lifetime. An example of this is reading in the bind points of the UBOs: the bind points won't change
* throughout the program's life, so they should be handled in this function
*
* We may want to consider two config files: one for read-only values and one for read-write values. Probably a
* good idea, but I don't feel like implementing that just yet
*
* \param config The configuration that was loaded
*/
virtual void on_config_loaded(const NovaSettingsAccessManager& config) = 0;
};
struct NovaSettings {
/*!
* \brief Options for configuring the way mesh memory is allocated
*
* Nova tries to be clever and optimize how it draws meshes with indirect rendering. It shoves everything into
* a handful of giant buffers, to facilitate indirect rendering. These options are how you configure that
*/
struct BlockAllocatorSettings {
/*!
* \brief The total amount of memory that can be used
*
* This must be a whole-number multiple of `new_buffer_size`
*/
uint32_t max_total_allocation = 1024 * 1024 * 1024;
/*!
* \brief The size of one buffer
*
* Nova doesn't allocate `max_total_allocation` memory initially. It only allocates a single buffer of
* `new_buffer_size` size, then allocates new buffers as needed
*
* This number must be a whole-number multiple of `buffer_part_size`
*/
uint32_t new_buffer_size = 16 * 1024 * 1024;
/*!
* \brief The size of one allocation from a buffer
*
* Nova gives meshes one or more allocations from a given buffer.
*/
uint32_t buffer_part_size = 16 * 1024;
};
/*!
* \brief All options to turn on debugging functionality
*/
struct DebugOptions {
/*!
* \brief If false, all debugging behavior is disabled, even if individual options are turned on
*/
bool enabled = false;
/*!
* \breif Controls if the API-specific validation layers are enabled
*
* This should be enabled most of the time for Nova developers and almost never for renderpack authors. Nova developers need it
* on to debug their Vulkan usage, while Nova should be robust enough that errors that the validation layers would catch never
* happen in a shipping build
*/
bool enable_validation_layers = false;
/*!
* \brief Should Nova raise SIGINT when the validation layers detect an error?
*/
bool break_on_validation_errors = true;
/*!
* \brief Enables GPU-based validation, which can check more situations then the normal debug layers but can cost a lot of
* performance
*
* GPU-based validation checks for a number of errors like uninitialized descriptors, indexing a descriptor that doesn't exist,
* or trying to access a resource that's in an incomplete state. These are great errors to check for, but checking for them
* costs significant GPU time. Unless you're developing Nova, this should probably remain `false`
*/
bool enable_gpu_based_validation = false;
struct {
/*!
* \brief If true, Nova will look for RenderDoc on your computer and will try to load it, letting you
* debug your renderpack without leaving Nova
*/
bool enabled = false;
/*!
* \brief The path to `renderdoc.dll` on your filesystem
*/
std::string renderdoc_dll_path = R"(C:\Users\gold1\bin\RenderDoc\RenderDoc_2020_02_06_fe30fa91_64\renderdoc.dll)";
/*!
* \brief The base path for RenderDoc captures
*/
const char* capture_path = "logs/captures";
} renderdoc;
} debug;
/*!
* \brief Settings that Nova can change, but which are still stored in a config
*/
struct CacheOptions {
/*!
* \brief The renderpack that was most recently loaded
*
* Nova requires a renderpack to render anything, so we need to know which one to load on application start
*/
const char* loaded_renderpack = "DefaultShaderpack";
} cache;
/*!
* \brief Options about the window that Nova will live in
*/
struct WindowOptions {
/*!
* \brief The title of the Window
*/
const char* title = "Nova Renderer";
/*!
* \brief The width of the window
*/
uint32_t width{};
/*!
* \brief The height of the window
*/
uint32_t height{};
} window;
/*!
* \brief Options that are specific to Nova's Vulkan rendering backend
*/
struct VulkanOptions {
/*!
* \brief The application name to pass to Vulkan
*/
const char* application_name = "Nova Renderer";
/*!
* \brief The application version to pass to Vulkan
*/
Semver application_version = {0, 8, 4};
} vulkan;
/*!
* \brief Information about the system we're running on
*/
struct SystemInfo {
/*!
* \brief Whether we're on a Unified Memory Architecture
*/
bool is_uma = false;
} system_info;
uint32_t max_in_flight_frames = 3;
/*!
* \brief Settings for how Nova should allocate vertex memory
*/
BlockAllocatorSettings vertex_memory_settings;
/*!
* \brief Settings for how Nova should allocate index memory
*/
BlockAllocatorSettings index_memory_settings;
};
class NovaSettingsAccessManager { // Classes named Manager are an antipattern so yes
public:
NovaSettings settings;
explicit NovaSettingsAccessManager(NovaSettings settings);
/*!
* \brief Registers the given iconfig_change_listener as an Observer
*/
void register_change_listener(ConfigListener* new_listener);
/*!
* \brief Updates all the change listeners with the current state of the settings
*
* This method is public so that whatever changes values can delay calling it. You can set a bunch of options that
* are pretty computationally intensive to change, the update listeners after all the values are changed
*
* Note that this method only send the read-write config values (children of the node 'settings') to the listeners
*/
void update_config_changed();
/*!
* \brief Tells all the config listeners that the configuration has been loaded for the first time
*/
void update_config_loaded();
// Why did this take so long omg
const NovaSettings* operator->() const;
private:
std::vector<ConfigListener*> config_change_listeners;
};
} // namespace nova::renderer
| 9,388
|
C++
|
.h
| 210
| 33.542857
| 139
| 0.601794
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,221
|
rendergraph.hpp
|
NovaMods_nova-renderer/include/nova_renderer/rendergraph.hpp
|
#pragma once
#include <rx/core/log.h>
#include <unordered_map>
#include <optional>
#include "nova_renderer/frame_context.hpp"
#include "nova_renderer/procedural_mesh.hpp"
#include "nova_renderer/renderables.hpp"
#include "nova_renderer/renderpack_data.hpp"
#include "nova_renderer/rhi/pipeline_create_info.hpp"
#include "nova_renderer/rhi/render_device.hpp"
#include "nova_renderer/rhi/rhi_types.hpp"
#include "nova_renderer/rhi/swapchain.hpp"
#include "nova_renderer/util/container_accessor.hpp"
#include "resource_loader.hpp"
namespace nova::renderer {
RX_LOG("rendergraph", rg_log);
class DeviceResources;
namespace renderpack {
struct RenderPassCreateInfo;
}
#pragma region Metadata structs
struct FullMaterialPassName {
std::string material_name;
std::string pass_name;
bool operator==(const FullMaterialPassName& other) const;
size_t hash() const;
};
struct MaterialPassKey {
std::string pipeline_name;
uint32_t material_pass_index;
};
struct MaterialPassMetadata {
renderpack::MaterialPass data;
};
struct PipelineMetadata {
RhiGraphicsPipelineState data;
std::unordered_map<FullMaterialPassName, MaterialPassMetadata> material_metadatas{};
};
struct RenderpassMetadata {
renderpack::RenderPassCreateInfo data;
};
#pragma endregion
#pragma region Structs for rendering
template <typename RenderCommandType>
struct MeshBatch {
size_t num_vertex_attributes{};
uint32_t num_indices{};
rhi::RhiBuffer* vertex_buffer = nullptr;
rhi::RhiBuffer* index_buffer = nullptr;
/*!
* \brief A buffer to hold all the per-draw data
*
* For example, a non-animated mesh just needs a mat4 for its model matrix
*
* This buffer gets re-written to every frame, since the number of renderables in this mesh batch might have changed. If there's
* more renderables than the buffer can hold, it gets reallocated from the RHI
*/
rhi::RhiBuffer* per_renderable_data = nullptr;
std::vector<RenderCommandType> commands;
};
template <typename RenderCommandType>
struct ProceduralMeshBatch {
MapAccessor<MeshId, ProceduralMesh> mesh;
/*!
* \brief A buffer to hold all the per-draw data
*
* For example, a non-animated mesh just needs a mat4 for its model matrix
*
* This buffer gets re-written to every frame, since the number of renderables in this mesh batch might have changed. If there's
* more renderables than the buffer can hold, it gets reallocated from the RHI
*/
rhi::RhiBuffer* per_renderable_data = nullptr;
std::vector<RenderCommandType> commands;
ProceduralMeshBatch(std::unordered_map<MeshId, ProceduralMesh>* meshes, const MeshId key) : mesh(meshes, key) {}
};
struct MaterialPass {
FullMaterialPassName name;
std::vector<MeshBatch<StaticMeshRenderCommand>> static_mesh_draws;
std::vector<ProceduralMeshBatch<StaticMeshRenderCommand>> static_procedural_mesh_draws;
std::vector<rhi::RhiDescriptorSet*> descriptor_sets;
const rhi::RhiPipelineInterface* pipeline_interface = nullptr;
void record(rhi::RhiRenderCommandList& cmds, FrameContext& ctx) const;
static void record_rendering_static_mesh_batch(const MeshBatch<StaticMeshRenderCommand>& batch,
rhi::RhiRenderCommandList& cmds,
FrameContext& ctx);
static void record_rendering_static_mesh_batch(const ProceduralMeshBatch<StaticMeshRenderCommand>& batch,
rhi::RhiRenderCommandList& cmds,
FrameContext& ctx);
};
struct Pipeline {
std::unique_ptr<rhi::RhiPipeline> pipeline{};
rhi::RhiPipelineInterface* pipeline_interface = nullptr;
void record(rhi::RhiRenderCommandList& cmds, FrameContext& ctx) const;
};
#pragma endregion
/*!
* \brief Renderpass that's ready to be recorded into a command list
*
* Renderpass has two virtual methods: `record` and `record_inside_renderpass`. `record` records the renderpass in its entirety, while
* `record_inside_renderpass` only records the inside of the renderpass, not the work needed to begin or end it. I expect that most
* subclasses will only want to override `record_inside_renderpass`
*/
class Renderpass {
public:
explicit Renderpass(std::string name, bool is_builtin = false);
Renderpass(Renderpass&& old) noexcept = default;
Renderpass& operator=(Renderpass&& old) noexcept = default;
Renderpass(const Renderpass& other) = delete;
Renderpass& operator=(const Renderpass& other) = delete;
virtual ~Renderpass() = default;
uint32_t id = 0;
std::string name;
bool is_builtin = false;
rhi::RhiRenderpass* renderpass = nullptr;
rhi::RhiFramebuffer* framebuffer = nullptr;
/*!
* \brief Names of all the pipelines which are in this renderpass
*/
std::vector<std::string> pipeline_names;
bool writes_to_backbuffer = false;
std::vector<rhi::RhiResourceBarrier> read_texture_barriers;
std::vector<rhi::RhiResourceBarrier> write_texture_barriers;
/*!
* \brief Performs the rendering work of this renderpass
*
* Custom renderpasses can override this method to perform custom rendering. However, I recommend that you override
* `render_renderpass_contents` instead. A typical renderpass will need to issue barriers for the resource it uses, and
* the default renderpass implementation calls `render_renderpass_contents` after issuing those barriers
*
* \param cmds The command list that this renderpass should record all its rendering commands into. You may record secondary command
* lists in multiple threads and execute them with this command list, if you want
*
* \param ctx The context for the current frame. Contains information about the available resources, the current frame, and
* everything you should need to render. If there's something you need that isn't in the frame context, submit an issue on the Nova
* GitHub
*/
virtual void execute(rhi::RhiRenderCommandList& cmds, FrameContext& ctx);
/*!
* \brief Returns the framebuffer that this renderpass should render to
*/
[[nodiscard]] rhi::RhiFramebuffer* get_framebuffer(const FrameContext& ctx) const;
protected:
/*!
* \brief Records all the resource barriers that need to take place before this renderpass renders anything
*
* By default `render` calls this method before calling `setup_renderpass`. If you override `render`, you'll need to call
* this method yourself before using any of this renderpass's resources
*/
virtual void record_pre_renderpass_barriers(rhi::RhiRenderCommandList& cmds, FrameContext& ctx) const;
/*!
* \brief Allows a renderpass to perform work before the recording of the actual renderpass
*
* This is useful for e.g. uploading streamed in vertex data
*
* The default `render` method calls this after `record_pre_renderpass_barriers` and before `record_renderpass_contents`
*/
virtual void setup_renderpass(rhi::RhiRenderCommandList& cmds, FrameContext& ctx);
/*!
* \brief Renders the contents of this renderpass
*
* The default `render` method calls this method after `record_pre_renderpass_barriers` and before
* `record_post_renderpass_barriers`. Thus, I recommend that you override this method instead of `render` - you'll have fewer things
* to worry about
*
* \param cmds The command list that this renderpass should record all its rendering commands into. You may record secondary command
* lists in multiple threads and execute them with this command list, if you want
*
* \param ctx The context for the current frame. Contains information about the available resources, the current frame, and
* everything you should need to render. If there's something you need that isn't in the frame context, submit an issue on the Nova
* GitHub
*/
virtual void record_renderpass_contents(rhi::RhiRenderCommandList& cmds, FrameContext& ctx);
/*!
* \brief Records all the resource barriers that need to take place after this renderpass renders anything
*
* By default `render` calls this method after calling `render_renderpass_contents`. If you override `render`, you'll need to call
* this method yourself near the end of your `render` method
*/
virtual void record_post_renderpass_barriers(rhi::RhiRenderCommandList& cmds, FrameContext& ctx) const;
};
// Intentionally using a C enum because this is actually a bitmask
enum ObjectType {
OpaqueSurface = 0x1,
TransparentSurface = 0x2,
Particle = 0x4,
Volume = 0x8,
};
/*!
* \brief A renderpass that draws objects in a scene
*
* Scene renderpasses have some information about which kinds of objects they draw - transparent, opaque, particles, ets
*/
class SceneRenderpass : public Renderpass {
public:
/*!
* \brief Draws this render pass's objects
*/
void record_renderpass_contents(rhi::RhiRenderCommandList& cmds, FrameContext& ctx) override;
private:
ObjectType drawn_objects;
};
/*!
* \brief A renderpass that doesn't operate on a specific object, but rather on data that's accessible for the whole scene
*
* Examples: light culling in a forward+ renderer, lighting in a deferred renderer, or post-processing
*
* Global renderpasses typically only execute one graphics pipeline, and they do it across the entire scene. They operate on render
* targets like the absolute chads they are
*/
class GlobalRenderpass : public Renderpass {
public:
/*!
* \brief Creates a new global render pass that will use the provided pipeline
*
* We use shader reflection to figure out which render targets the pipeline wants to use, then cache them from the device resources
* object. This means that a renderpack's dynamic resources _MUST_ be created before its render graph
*
* \param name The name of this renderpass
* \param pipeline The graphics pipeline state to use when executing this renderpass
* \param mesh The mesh to execute this renderpass over. Will usually be the fullscreen triangle
* \param is_builtin Whether this render pass is built in to Nova or comes from a renderpack
*/
explicit GlobalRenderpass(const std::string& name, std::unique_ptr<rhi::RhiPipeline> pipeline, MeshId mesh, bool is_builtin = false);
protected:
std::unique_ptr<rhi::RhiPipeline> pipeline;
std::unique_ptr<RhiResourceBinder> resource_binder;
MeshId mesh;
/*!
* \brief Issues a fullscreen drawcall that uses its resource binder and pipeline state
*/
void record_renderpass_contents(rhi::RhiRenderCommandList& cmds, FrameContext& ctx) override;
};
/*!
* \brief Represents Nova's rendergraph
*
* The rendergraph can change a lot over the runtime of Nova. Loading or unloading a renderpack will change the available passes, and
* the order they're executed in
*/
class Rendergraph {
public:
/*!
* \brief Constructs a Rendergraph which will allocate its internal memory from the provided allocator, and which will execute on
* the provided device
*/
explicit Rendergraph(rhi::RenderDevice& device);
/*!
* \brief Creates a new renderpass of the specified type using it's own create info
*
* This method calls a static method `RenderpassType::get_create_info` to get the renderpass's create info, and it allocates the new
* renderpass from the rendergraph's internal allocator. Intended usage is adding renderpasses from C++ code - this method makes it
* easy to define all your renderpass data in your C++ renderpass class
*
* This method creates all the GPU resources needed for the renderpass and it's framebuffer. It does not create any pipelines or
* materials that may be rendered as part of this renderpass. You may create them through the rendergraph's JSON files, or through
* the renderpass's constructor
*
* This method returns a pointer to the newly-created renderpass if everything went according to plan, or `nullptr` if it didn't
*
* Exact name and usage are still under revision, this is the alpha version of this method
*/
template <typename RenderpassType, typename... Args>
[[nodiscard]] RenderpassType* create_renderpass(DeviceResources& resource_storage, Args&&... args);
/*!
* \brief Adds an already-created renderpass with a specific create info
*
* This method initializes all the GPU resources needed for this renderpass and the framebuffer it renders to. It then adds the
* renderpass to the appropriate places, returning a pointer to the renderpass you provided
*
* This method returns `nullptr` if the renderpass's GPU resources can't be initialized
*/
template <typename RenderpassType>
[[nodiscard]] RenderpassType* add_renderpass(RenderpassType* renderpass,
const renderpack::RenderPassCreateInfo& create_info,
DeviceResources& resource_storage);
void destroy_renderpass(const std::string& name);
[[nodiscard]] std::vector<std::string> calculate_renderpass_execution_order();
[[nodiscard]] Renderpass* get_renderpass(const std::string& name) const;
[[nodiscard]] std::optional<RenderpassMetadata> get_metadata_for_renderpass(const std::string& name) const;
private:
bool is_dirty = false;
rhi::RenderDevice& device;
std::unordered_map<std::string, Renderpass*> renderpasses;
std::vector<std::string> cached_execution_order;
std::unordered_map<std::string, RenderpassMetadata> renderpass_metadatas;
};
template <typename RenderpassType, typename... Args>
RenderpassType* Rendergraph::create_renderpass(DeviceResources& resource_storage, Args&&... args) {
auto* renderpass = allocator.create<RenderpassType>(std::forward<Args>(args)...);
const auto& create_info = RenderpassType::get_create_info();
return add_renderpass(renderpass, create_info, resource_storage);
}
template <typename RenderpassType>
RenderpassType* Rendergraph::add_renderpass(RenderpassType* renderpass,
const renderpack::RenderPassCreateInfo& create_info,
DeviceResources& resource_storage) {
RenderpassMetadata metadata;
metadata.data = create_info;
std::vector<rhi::RhiImage*> color_attachments;
color_attachments.reserve(create_info.texture_outputs.size());
glm::uvec2 framebuffer_size(0);
const auto num_attachments = create_info.depth_texture ? create_info.texture_outputs.size() + 1 :
create_info.texture_outputs.size();
std::vector<std::string> attachment_errors;
attachment_errors.reserve(num_attachments);
bool missing_render_targets = false;
create_info.texture_outputs.each_fwd([&](const renderpack::TextureAttachmentInfo& attachment_info) {
if(attachment_info.name == BACKBUFFER_NAME) {
if(create_info.texture_outputs.size() == 1) {
renderpass->writes_to_backbuffer = true;
renderpass->framebuffer = nullptr; // Will be resolved when rendering
} else {
attachment_errors.push_back(std::string::format(
"Pass %s writes to the backbuffer and %zu other textures, but that's not allowed. If a pass writes to the backbuffer, it can't write to any other textures",
create_info.name,
create_info.texture_outputs.size() - 1));
}
framebuffer_size = device.get_swapchain()->get_size();
} else {
const auto render_target_opt = resource_storage.get_render_target(attachment_info.name);
if(render_target_opt) {
const auto& render_target = *render_target_opt;
color_attachments.push_back(render_target->image);
const glm::uvec2 attachment_size = {render_target->width, render_target->height};
if(framebuffer_size.x > 0) {
if(attachment_size.x != framebuffer_size.x || attachment_size.y != framebuffer_size.y) {
attachment_errors.push_back(std::string::format(
"Attachment %s has a size of %dx%d, but the framebuffer for pass %s has a size of %dx%d - these must match! All attachments of a single renderpass must have the same size",
attachment_info.name,
attachment_size.x,
attachment_size.y,
create_info.name,
framebuffer_size.x,
framebuffer_size.y));
}
} else {
framebuffer_size = attachment_size;
}
} else {
rg_log->error("No render target named %s", attachment_info.name);
missing_render_targets = true;
}
}
});
if(missing_render_targets) {
return nullptr;
}
// Can't combine these if statements and I don't want to `.find` twice
const auto depth_attachment = [&]() -> std::optional<rhi::RhiImage*> {
if(create_info.depth_texture) {
if(const auto depth_tex = resource_storage.get_render_target(create_info.depth_texture->name); depth_tex) {
return (*depth_tex)->image;
}
}
return rx::nullopt;
}();
if(!attachment_errors.is_empty()) {
attachment_errors.each_fwd([&](const std::string& err) { rg_log->error("%s", err); });
rg_log->error(
"Could not create renderpass %s because there were errors in the attachment specification. Look above this message for details",
create_info.name);
return nullptr;
}
ntl::Result<rhi::RhiRenderpass*> renderpass_result = device.create_renderpass(create_info, framebuffer_size, allocator);
if(renderpass_result) {
renderpass->renderpass = renderpass_result.value;
} else {
rg_log->error("Could not create renderpass %s: %s", create_info.name, renderpass_result.error.to_string());
return nullptr;
}
// Backbuffer framebuffers are owned by the swapchain, not the renderpass that writes to them, so if the
// renderpass writes to the backbuffer then we don't need to create a framebuffer for it
if(!renderpass->writes_to_backbuffer) {
renderpass->framebuffer = device.create_framebuffer(renderpass->renderpass,
color_attachments,
depth_attachment,
framebuffer_size,
allocator);
}
renderpass->pipeline_names = create_info.pipeline_names;
renderpass->id = static_cast<uint32_t>(renderpass_metadatas.size());
destroy_renderpass(create_info.name);
renderpasses.insert(create_info.name, renderpass);
renderpass_metadatas.insert(create_info.name, metadata);
is_dirty = true;
return renderpass;
}
} // namespace nova::renderer
| 21,011
|
C++
|
.h
| 387
| 42.782946
| 204
| 0.641779
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| true
| true
| true
| false
| false
| true
| false
| false
|
753,222
|
per_frame_device_array.hpp
|
NovaMods_nova-renderer/include/nova_renderer/per_frame_device_array.hpp
|
#pragma once
#include <Tracy.hpp>
#include <vector>
#include <spdlog/sinks/stdout_color_sinks.h>
#include "nova_renderer/constants.hpp"
#include "nova_renderer/rhi/render_device.hpp"
namespace nova::renderer {
static auto pfd_logger = spdlog::stdout_color_mt("PerDeviceFrameArray");
/*!
* \brief Array of data which is unique for each frame of execution
*/
template <typename ElementType>
class PerFrameDeviceArray {
public:
/*!
* \brief Initializes a PerFrameDeviceArray, creating GPU resources with the provided render device
*
* \param num_elements The number of elements in the array
* \param num_in_flight_frames NThe number of in-flight frames that we'll support
* \param device The device to create the buffers on
*/
explicit PerFrameDeviceArray(size_t num_elements,
uint32_t num_in_flight_frames,
rhi::RenderDevice& device);
~PerFrameDeviceArray() = default;
ElementType& operator[](uint32_t idx);
ElementType& at(uint32_t idx);
void upload_to_device(uint32_t frame_idx);
[[nodiscard]] uint32_t get_next_free_slot();
void free_slot(uint32_t idx);
[[nodiscard]] size_t size() const;
[[nodiscard]] rhi::RhiBuffer* get_buffer_for_frame(uint32_t frame_idx) const;
private:
std::vector<rhi::RhiBuffer*> per_frame_buffers;
rhi::RenderDevice& device;
std::vector<ElementType> data;
std::vector<uint32_t> free_indices;
};
template <typename ElementType>
PerFrameDeviceArray<ElementType>::PerFrameDeviceArray(const size_t num_elements,
const uint32_t num_in_flight_frames,
rhi::RenderDevice& device)
: device{device},
data{num_elements}{
rhi::RhiBufferCreateInfo create_info;
create_info.size = sizeof(ElementType) * data.size();
create_info.buffer_usage = rhi::BufferUsage::UniformBuffer;
per_frame_buffers.reserve(num_in_flight_frames);
for(uint32_t i = 0; i < num_in_flight_frames; i++) {
create_info.name = fmt::format("CameraBuffer{}", i);
per_frame_buffers.emplace_back(device.create_buffer(create_info, internal_allocator));
}
// All camera indices are free at program startup
for(uint32_t i = num_elements - 1; i < num_elements; i--) {
free_indices.emplace_back(i);
}
}
template <typename ElementType>
ElementType& PerFrameDeviceArray<ElementType>::operator[](const uint32_t idx) {
return data[idx];
}
template <typename ElementType>
ElementType& PerFrameDeviceArray<ElementType>::at(uint32_t idx) {
return data[idx];
}
template <typename ElementType>
void PerFrameDeviceArray<ElementType>::upload_to_device(const uint32_t frame_idx) {
ZoneScoped;
const auto num_bytes_to_write = sizeof(ElementType) * data.size();
device.write_data_to_buffer(data.data(), num_bytes_to_write, per_frame_buffers[frame_idx]);
}
template <typename ElementType>
uint32_t PerFrameDeviceArray<ElementType>::get_next_free_slot() {
const auto val = free_indices.last();
free_indices.pop_back();
return val;
}
template <typename ElementType>
void PerFrameDeviceArray<ElementType>::free_slot(const uint32_t idx) {
free_indices.emplace_back(idx);
}
template <typename ElementType>
size_t PerFrameDeviceArray<ElementType>::size() const {
return data.size();
}
template <typename ElementType>
rhi::RhiBuffer* PerFrameDeviceArray<ElementType>::get_buffer_for_frame(const uint32_t frame_idx) const {
return per_frame_buffers[frame_idx];
}
} // namespace nova::renderer
| 3,978
|
C++
|
.h
| 90
| 35.044444
| 108
| 0.643598
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,223
|
renderpack_data_conversions.hpp
|
NovaMods_nova-renderer/include/nova_renderer/renderpack_data_conversions.hpp
|
#pragma once
#include <optional>
#include "rhi/pipeline_create_info.hpp"
namespace nova {
namespace renderer {
class Rendergraph;
namespace renderpack {
struct PipelineData;
}
} // namespace renderer
} // namespace nova
namespace nova::renderer::renderpack {
std::optional<RhiGraphicsPipelineState> to_pipeline_state_create_info(const renderpack::PipelineData& data,
const Rendergraph& rendergraph);
};
| 530
|
C++
|
.h
| 15
| 25.933333
| 111
| 0.624754
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,224
|
renderpack_data.hpp
|
NovaMods_nova-renderer/include/nova_renderer/renderpack_data.hpp
|
#pragma once
#include <optional>
#include <string>
#include <unordered_map>
#include <glm/glm.hpp>
#include <nlohmann/json.hpp>
#include <cstdint>
#include <vulkan/vulkan.h>
#include "rhi/rhi_enums.hpp"
namespace nova::renderer {
class Renderpass;
} // namespace nova::renderer
namespace nova::renderer::renderpack {
/*!
* \brief Controls the rasterizer's state
*/
enum class RasterizerState {
/*!
* \brief Enable blending for this material state
*/
Blending,
/*!
* \brief Render backfaces and cull frontfaces
*/
InvertCulling,
/*!
* \brief Don't cull backfaces or frontfaces
*/
DisableCulling,
/*!
* \brief Don't write to the depth buffer
*/
DisableDepthWrite,
/*!
* \brief Don't perform a depth test
*/
DisableDepthTest,
/*!
* \brief Perform the stencil test
*/
EnableStencilTest,
/*!
* \brief Write to the stencil buffer
*/
StencilWrite,
/*!
* \brief Don't write to the color buffer
*/
DisableColorWrite,
/*!
* \brief Enable alpha to coverage
*/
EnableAlphaToCoverage,
/*!
* \brief Don't write alpha
*/
DisableAlphaWrite,
};
enum class TextureFilter { TexelAA, Bilinear, Point };
enum class WrapMode { Repeat, Clamp };
/*!
* \brief Where the texture comes from
*/
enum class TextureLocation {
/*!
* \brief The texture is written to by a shader
*/
Dynamic,
/*!
* \brief The texture is loaded from the textures/ folder in the current renderpack
*/
InUserPackage,
/*!
* \brief The texture is provided by Nova or by Minecraft
*/
InAppPackage
};
enum class MsaaSupport { MSAA, Both, None };
enum class RPStencilOp { Keep, Zero, Replace, Increment, IncrementAndWrap, Decrement, DecrementAndWrap, Invert };
enum class RPCompareOp { Never, Less, LessEqual, Greater, GreaterEqual, Equal, NotEqual, Always };
enum class RPPrimitiveTopology { Triangles, Lines };
enum class RPBlendFactor {
One,
Zero,
SrcColor,
DstColor,
OneMinusSrcColor,
OneMinusDstColor,
SrcAlpha,
DstAlpha,
OneMinusSrcAlpha,
OneMinusDstAlpha
};
enum class RenderQueue { Transparent, Opaque, Cutout };
enum class ScissorTestMode {
Off,
StaticScissorRect,
DynamicScissorRect,
};
enum class TextureDimensionType { ScreenRelative, Absolute };
enum class ImageUsage {
RenderTarget,
SampledImage,
};
/*!
* \brief Defines a sampler to use for a texture
*
* At the time of writing I'm not sure how this is correlated with a texture, but all well
*/
struct SamplerCreateInfo {
std::string name;
/*!
* \brief What kind of texture filter to use
*
* texel_aa does something that I don't want to figure out right now. Bilinear is your regular bilinear filter,
* and point is the point filter. Aniso isn't an option and I kinda hope it stays that way
*/
TextureFilter filter{};
/*!
* \brief How the texture should wrap at the edges
*/
WrapMode wrap_mode{};
static SamplerCreateInfo from_json(const nlohmann::json& json);
};
struct StencilOpState {
RPStencilOp fail_op;
RPStencilOp pass_op;
RPStencilOp depth_fail_op;
RPCompareOp compare_op;
uint32_t compare_mask;
uint32_t write_mask;
static StencilOpState from_json(const nlohmann::json& json);
};
struct RenderpackShaderSource {
std::string filename;
std::vector<uint32_t> source;
};
/*!
* \brief All the data that Nova uses to build a pipeline
*/
struct PipelineData {
/*!
* \brief The name of this pipeline
*/
std::string name;
/*!
* \brief The pipeline that this pipeline inherits from
*/
std::optional<std::string> parent_name;
/*!
* \brief The name of the pass that this pipeline belongs to
*/
std::string pass;
/*!
* \brief All of the symbols in the shader that are defined by this state
*/
std::vector<std::string> defines{};
/*!
* \brief Defines the rasterizer state that's active for this pipeline
*/
std::vector<RasterizerState> states{};
/*!
* \brief The stencil buffer operations to perform on the front faces
*/
std::optional<StencilOpState> front_face;
/*!
* \brief The stencil buffer operations to perform on the back faces
*/
std::optional<StencilOpState> back_face;
/*!
* \brief The material to use if this one's shaders can't be found
*/
std::optional<std::string> fallback;
/*!
* \brief A bias to apply to the depth
*/
float depth_bias{};
/*!
* \brief The depth bias, scaled by slope I guess?
*/
float slope_scaled_depth_bias{};
/*!
* \brief The reference value to use for the stencil test
*/
uint32_t stencil_ref{};
/*!
* \brief The mask to use when reading from the stencil buffer
*/
uint32_t stencil_read_mask{};
/*!
* \brief The mask to use when writing to the stencil buffer
*/
uint32_t stencil_write_mask{};
/*!
* \brief How to handle MSAA for this state
*/
MsaaSupport msaa_support{};
/*!
* \brief
*/
RPPrimitiveTopology primitive_mode{};
/*!
* \brief Where to get the blending factor for the source
*/
RPBlendFactor source_color_blend_factor{};
/*!
* \brief Where to get the blending factor for the destination
*/
RPBlendFactor destination_color_blend_factor{};
/*!
* \brief How to get the source alpha in a blend
*/
RPBlendFactor source_alpha_blend_factor{};
/*!
* \brief How to get the destination alpha in a blend
*/
RPBlendFactor destination_alpha_blend_factor{};
/*!
* \brief The function to use for the depth test
*/
RPCompareOp depth_func{};
/*!
* \brief The render queue that this pass belongs to
*
* This may or may not be removed depending on what is actually needed by Nova
*/
RenderQueue render_queue{};
ScissorTestMode scissor_mode = ScissorTestMode::Off;
RenderpackShaderSource vertex_shader{};
std::optional<RenderpackShaderSource> geometry_shader;
std::optional<RenderpackShaderSource> tessellation_control_shader;
std::optional<RenderpackShaderSource> tessellation_evaluation_shader;
std::optional<RenderpackShaderSource> fragment_shader;
static PipelineData from_json(const nlohmann::json& json);
};
struct TextureFormat {
/*!
* \brief The format of the texture
*/
rhi::PixelFormat pixel_format{};
/*!
* \brief How to interpret the dimensions of this texture
*/
TextureDimensionType dimension_type{};
/*!
* \brief The width, in pixels, of the texture
*/
float width = 0;
/*!
* \brief The height, in pixels, of the texture
*/
float height = 0;
[[nodiscard]] glm::uvec2 get_size_in_pixels(const glm::uvec2& screen_size) const;
bool operator==(const TextureFormat& other) const;
bool operator!=(const TextureFormat& other) const;
static TextureFormat from_json(const nlohmann::json& json);
};
/*!
* \brief A texture that a pass can use
*/
struct TextureCreateInfo {
/*!
* \brief The name of the texture
*
* Nova implicitly defines a few textures for you to use:
* - NovaColorVirtualTexture
* - Virtual texture atlas that holds color textures
* - Textures which have the exact name as requested by Minecraft are in this atlas
* - Things without a color texture get a pure white texture
* - Always has a format of R8G8B8A8
* - Can only be used as a pass's input
* - NovaNormalVirtualTexture
* - Virtual texture atlas that holds normal textures
* - Textures which have `_n` after the name requested by Minecraft are in this atlas
* - If no normal texture exists for a given object, a texture with RGBA of (0, 0, 1, 1) is used
* - Always has a format of R8G8B8A8
* - Can only be used as a pass's input
* - NovaDataVirtualTexture
* - Virtual texture atlas that holds data textures
* - Textures which have a `_s` after the name requested by Minecraft are in this atlas
* - If no data texture exists for a given object, a texture with an RGBA of (0, 0, 0, 0) is used
* - Always has a format of R8G8B8A8
* - Can only be used as a pass's input
* - NovaLightmap
* - Minecraft lightmap, loaded from the current resourcepack
* - Format of RGBA8
* - Can only be used as an input
* - NovaFinal
* - The texture that gets presented to the screen
* - Always has a format of RGB8
* - Can only be used as a pass's output
*
* If you use one of the virtual textures, then all fields except the binding are ignored
* If you use `NovaFinal`, then all fields are ignored since the backbuffer is always bound to output location 0
*/
std::string name;
// TODO: Renderpack developers shouldn't have to worry about this
ImageUsage usage;
TextureFormat format{};
static TextureCreateInfo from_json(const nlohmann::json& json);
};
struct RenderpackResourcesData {
std::vector<TextureCreateInfo> render_targets;
std::vector<SamplerCreateInfo> samplers;
static RenderpackResourcesData from_json(const nlohmann::json& json);
};
/*!
* \brief A description of a texture that a render pass outputs to
*/
struct TextureAttachmentInfo {
/*!
* \brief The name of the texture
*/
std::string name{};
rhi::PixelFormat pixel_format;
/*!
* \brief Whether to clear it
*
* If the texture is a depth buffer, it gets cleared to 1
* If the texture is a stencil buffer, it gets cleared to 0xFFFFFFFF
* If the texture is a color buffer, it gets cleared to (0, 0, 0, 0)
*/
bool clear = false;
bool operator==(const TextureAttachmentInfo& other) const;
static TextureAttachmentInfo from_json(const nlohmann::json& json);
};
/*!
* \brief A pass over the scene
*
* A pass has a few things:
* - What passes MUST be executed before this one
* - What inputs this pass's shaders have
* - What uniform buffers to use
* - What vertex data to use
* - Any textures that are needed
* - What outputs this pass has
* - Framebuffer attachments
* - Write buffers
*
* The inputs and outputs of a pass must be resources declared in the renderpack's `resources.json` file (or the
* default resources.json), or a resource that's internal to Nova. For example, Nova provides a UBO of uniforms that
* change per frame, a UBO for per-model data like the model matrix, and the virtual texture atlases. The default
* resources.json file sets up sixteen framebuffer color attachments for ping-pong buffers, a depth attachment,
* some shadow maps, etc
*/
struct RenderPassCreateInfo {
/*!
* \brief The name of this render pass
*/
std::string name;
/*!
* \brief The textures that this pass will read from
*/
std::vector<std::string> texture_inputs{};
/*!
* \brief The textures that this pass will write to
*/
std::vector<TextureAttachmentInfo> texture_outputs{};
/*!
* \brief The depth texture this pass will write to
*/
std::optional<TextureAttachmentInfo> depth_texture;
/*!
* \brief All the buffers that this renderpass reads from
*/
std::vector<std::string> input_buffers{};
/*!
* \brief All the buffers that this renderpass writes to
*/
std::vector<std::string> output_buffers{};
/*!
* \brief Names of all the pipelines that use this renderpass
*/
std::vector<std::string> pipeline_names;
RenderPassCreateInfo() = default;
static RenderPassCreateInfo from_json(const nlohmann::json& json);
};
/*!
* \brief All the data to create one rendergraph, including which builtin passes the renderpack wants to use in its rendergraph
*/
struct RendergraphData {
/*!
* \brief The renderpack-supplied passes
*/
std::vector<RenderPassCreateInfo> passes;
/*!
* \brief Names of all the builtin renderpasses that the renderpack wants to use
*/
std::vector<std::string> builtin_passes;
static RendergraphData from_json(const nlohmann::json& json);
};
struct MaterialPass {
std::string name;
std::string material_name;
std::string pipeline;
std::unordered_map<std::string, std::string> bindings;
/*!
* \brief All the descriptor sets needed to bind everything used by this material to its pipeline
*
* All the material's resources get bound to its descriptor sets when the material is created. Updating
* descriptor sets is allowed, although the result won't show up on screen for a couple frames because Nova
* (will) copies its descriptor sets to each in-flight frame
*/
std::vector<vk::DescriptorSet> descriptor_sets;
static MaterialPass from_json(const nlohmann::json& json);
};
struct MaterialData {
std::string name;
std::vector<MaterialPass> passes;
std::string geometry_filter;
static MaterialData from_json(const nlohmann::json& json);
};
/*!
* \brief All the data that can be in a renderpack
*/
struct RenderpackData {
std::vector<PipelineData> pipelines;
/*!
* \brief All the renderpasses that this renderpack needs, in submission order
*/
RendergraphData graph_data;
std::vector<MaterialData> materials;
RenderpackResourcesData resources;
std::string name;
};
[[nodiscard]] rhi::PixelFormat pixel_format_enum_from_string(const std::string& str);
[[nodiscard]] TextureDimensionType texture_dimension_type_enum_from_string(const std::string& str);
[[nodiscard]] TextureFilter texture_filter_enum_from_string(const std::string& str);
[[nodiscard]] WrapMode wrap_mode_enum_from_string(const std::string& str);
[[nodiscard]] RPStencilOp stencil_op_enum_from_string(const std::string& str);
[[nodiscard]] RPCompareOp compare_op_enum_from_string(const std::string& str);
[[nodiscard]] MsaaSupport msaa_support_enum_from_string(const std::string& str);
[[nodiscard]] RPPrimitiveTopology primitive_topology_enum_from_string(const std::string& str);
[[nodiscard]] RPBlendFactor blend_factor_enum_from_string(const std::string& str);
[[nodiscard]] RenderQueue render_queue_enum_from_string(const std::string& str);
[[nodiscard]] ScissorTestMode scissor_test_mode_from_string(const std::string& str);
[[nodiscard]] RasterizerState state_enum_from_string(const std::string& str);
[[nodiscard]] rhi::PixelFormat pixel_format_enum_from_json(const nlohmann::json& j);
[[nodiscard]] TextureDimensionType texture_dimension_type_enum_from_json(const nlohmann::json& j);
[[nodiscard]] TextureFilter texture_filter_enum_from_json(const nlohmann::json& j);
[[nodiscard]] WrapMode wrap_mode_enum_from_json(const nlohmann::json& j);
[[nodiscard]] RPStencilOp stencil_op_enum_from_json(const nlohmann::json& j);
[[nodiscard]] RPCompareOp compare_op_enum_from_json(const nlohmann::json& j);
[[nodiscard]] MsaaSupport msaa_support_enum_from_json(const nlohmann::json& j);
[[nodiscard]] RPPrimitiveTopology primitive_topology_enum_from_json(const nlohmann::json& j);
[[nodiscard]] RPBlendFactor blend_factor_enum_from_json(const nlohmann::json& j);
[[nodiscard]] RenderQueue render_queue_enum_from_json(const nlohmann::json& j);
[[nodiscard]] ScissorTestMode scissor_test_mode_from_json(const nlohmann::json& j);
[[nodiscard]] RasterizerState state_enum_from_json(const nlohmann::json& j);
[[nodiscard]] std::string to_string(rhi::PixelFormat val);
[[nodiscard]] std::string to_string(TextureDimensionType val);
[[nodiscard]] std::string to_string(TextureFilter val);
[[nodiscard]] std::string to_string(WrapMode val);
[[nodiscard]] std::string to_string(RPStencilOp val);
[[nodiscard]] std::string to_string(RPCompareOp val);
[[nodiscard]] std::string to_string(MsaaSupport val);
[[nodiscard]] std::string to_string(RPPrimitiveTopology val);
[[nodiscard]] std::string to_string(RPBlendFactor val);
[[nodiscard]] std::string to_string(RenderQueue val);
[[nodiscard]] std::string to_string(RasterizerState val);
[[nodiscard]] uint32_t pixel_format_to_pixel_width(rhi::PixelFormat format);
} // namespace nova::renderer::renderpack
| 18,273
|
C++
|
.h
| 462
| 31.233766
| 131
| 0.6238
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| true
| true
| true
| false
| false
| true
| false
| false
|
753,225
|
ui_renderer.hpp
|
NovaMods_nova-renderer/include/nova_renderer/ui_renderer.hpp
|
#pragma once
#include "nova_renderer/rhi/forward_decls.hpp"
#include "rendergraph.hpp"
namespace nova::renderer {
struct FrameContext;
class UiRenderpass : public Renderpass {
public:
UiRenderpass();
UiRenderpass(UiRenderpass&& old) noexcept = default;
UiRenderpass& operator=(UiRenderpass&& old) noexcept = default;
static const renderpack::RenderPassCreateInfo& get_create_info();
protected:
void record_renderpass_contents(rhi::RhiRenderCommandList& cmds, FrameContext& ctx) override final;
/*!
* \brief Renders the host application's UI
*
* Clients of Nova must provide their own implementation of `UiRenderpass`. Nova will then use that implementation to render that
* application's UI
*/
virtual void render_ui(rhi::RhiRenderCommandList& cmds, FrameContext& ctx) = 0;
};
class NullUiRenderpass final : public UiRenderpass {
public:
~NullUiRenderpass() override = default;
protected:
void render_ui(rhi::RhiRenderCommandList& cmds, FrameContext& ctx) override;
};
} // namespace nova::renderer
| 1,162
|
C++
|
.h
| 28
| 34.678571
| 137
| 0.696619
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,226
|
material.hpp
|
NovaMods_nova-renderer/include/nova_renderer/material.hpp
|
#pragma once
#include <cstdint>
namespace nova::renderer {
using TextureId = uint32_t;
} // namespace nova::renderer
| 123
|
C++
|
.h
| 5
| 22.4
| 31
| 0.75
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,227
|
camera.hpp
|
NovaMods_nova-renderer/include/nova_renderer/camera.hpp
|
#pragma once
#include <string>
#include "resource_loader.hpp"
namespace nova::renderer {
class NovaRenderer;
using CameraIndex = uint32_t;
/*!
* \brief Create info to initialize a camera
*/
struct CameraCreateInfo {
/*!
* \brief Name of the new camera
*/
std::string name;
/*!
* \brief Vertical field of view
*/
float field_of_view = 90.0f;
/*!
* \brief Aspect ratio of the camera
*/
float aspect_ratio = 16.0f / 9.0f;
/*!
* \brief Near plane of the camera. Corresponds to a value of 1 in the depth buffer
*/
float near_plane = 0.001f;
/*!
* \brief Far plane of the camera. Corresponds to a value of 0 in the depth buffer
*/
float far_plane = 1000.0f;
};
/*!
* \brief Data for a camera's UBO
*/
struct CameraUboData {
/*!
* \brief Current frame's view matrix
*/
glm::mat4 view;
/*!
* \brief current frame's projection matrix
*/
glm::mat4 projection;
/*!
* \brief Previous frame's view matrix
*/
glm::mat4 previous_view;
/*!
* \brief Previous frame's projection matrix
*/
glm::mat4 previous_projection;
};
class Camera {
friend class NovaRenderer;
public:
bool is_active = false;
float aspect_ratio;
/*!
* \brief Vertical field of view
*
* A non-zero number means the camera renders things in worldspace, while zero means that this camera renders things
* orthographically in screen space
*/
float field_of_view;
/*!
* \brief Near clipping plane
*
* Objects at this worldspace distance from the camera will have a value of 1 in the depth buffer
*/
float near_plane;
/*!
* \brief Far clipping plane
*
* Objects at this worldspace distance from the camera will have a value of 0 in the depth buffer
*/
float far_plane;
glm::vec3 position{};
glm::vec3 rotation{};
/*!
* \brief Index of this camera in the camera array
*/
CameraIndex index{};
explicit Camera(const CameraCreateInfo& create_info);
Camera(const Camera& other) = default;
Camera& operator=(const Camera& other) = default;
Camera(Camera&& old) noexcept = default;
Camera& operator=(Camera&& old) noexcept = default;
~Camera() = default;
[[nodiscard]] const std::string& get_name() const;
private:
std::string name;
};
using CameraAccessor = VectorAccessor<Camera>;
} // namespace nova::renderer
| 2,863
|
C++
|
.h
| 94
| 21.904255
| 124
| 0.564964
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,228
|
renderables.hpp
|
NovaMods_nova-renderer/include/nova_renderer/renderables.hpp
|
#pragma once
#include <atomic>
#include <glm/glm.hpp>
#include <string>
#include <vector>
namespace nova::renderer {
struct FullVertex {
glm::vec3 position; // 12 bytes
glm::vec3 normal; // 12 bytes
glm::vec3 tangent; // 12 bytes
uint32_t main_uv; // 4 bytes
uint32_t secondary_uv; // 4 bytes
uint32_t virtual_texture_id; // 4 bytes
glm::vec4 additional_stuff; // 12 bytes
};
static_assert(sizeof(FullVertex) % 16 == 0, "full_vertex struct is not aligned to 16 bytes!");
/*!
* \brief All the data needed to make a single mesh
*
* Meshes all have the same data. Chunks need all the mesh data, and they're most of the world. Entities, GUI,
* particles, etc will probably have FAR fewer vertices than chunks, meaning that there's not a huge savings by
* making them use special vertex formats
*/
struct MeshData {
size_t num_vertex_attributes{};
uint32_t num_indices{};
/*!
* \brief Pointer to the vertex data of this mesh
*/
const void* vertex_data_ptr{};
/*!
* \brief Number of bytes of vertex data
*/
size_t vertex_data_size{};
/*!
* \brief Pointer to the index data of this mesh
*/
const void* index_data_ptr{};
/*!
* \brief Number of bytes of index data
*/
size_t index_data_size{};
};
using MeshId = uint64_t;
struct StaticMeshRenderableUpdateData {
glm::vec3 position{};
glm::vec3 rotation{};
glm::vec3 scale{1};
bool visible = true;
};
struct StaticMeshRenderableCreateInfo : StaticMeshRenderableUpdateData {
bool is_static = true;
MeshId mesh{};
};
using RenderableId = uint64_t;
enum class RenderableType {
StaticMesh,
ProceduralMesh,
};
static std::atomic<RenderableId> next_renderable_id;
struct RenderableMetadata {
RenderableId id = 0;
std::vector<std::string> passes{};
};
struct RenderCommand {
RenderableId id{};
bool is_visible = true;
glm::mat4 model_matrix{1};
};
struct StaticMeshRenderCommand : RenderCommand {};
StaticMeshRenderCommand make_render_command(const StaticMeshRenderableCreateInfo& data, RenderableId id);
} // namespace nova::renderer
| 2,466
|
C++
|
.h
| 72
| 27
| 115
| 0.61097
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,230
|
nova_renderer.hpp
|
NovaMods_nova-renderer/include/nova_renderer/nova_renderer.hpp
|
#pragma once
#include <rx/core/log.h>
#include <unordered_map>
#include <optional>
#include <rx/core/ptr.h>
#include "nova_renderer/camera.hpp"
#include "nova_renderer/constants.hpp"
#include "nova_renderer/filesystem/virtual_filesystem.hpp"
#include "nova_renderer/nova_settings.hpp"
#include "nova_renderer/per_frame_device_array.hpp"
#include "nova_renderer/procedural_mesh.hpp"
#include "nova_renderer/renderables.hpp"
#include "nova_renderer/renderdoc_app.h"
#include "nova_renderer/rendergraph.hpp"
#include "nova_renderer/resource_loader.hpp"
#include "nova_renderer/rhi/forward_decls.hpp"
#include "nova_renderer/rhi/render_device.hpp"
#include "nova_renderer/util/container_accessor.hpp"
#include "../../src/renderer/material_data_buffer.hpp"
namespace rx {
namespace memory {
struct bump_point_allocator;
}
} // namespace rx
void init_rex();
void rex_fini();
namespace spirv_cross {
class CompilerGLSL;
struct Resource;
} // namespace spirv_cross
namespace nova::renderer {
using LogHandles = std::vector<rx::log::queue_event::handle>;
/*!
* \brief Registers a log message writing function
*
* This function removes any previously registered logging handler, replacing it with the provided function
*
* If you don't call this function, Nova will send all log messages to `stdout`
*
* You may manually unregister your handlers by calling `LogHandles::clear()`, but you don't need to. This function is intentionally not
* marked `[[nodiscard]]` because doing things with the handles is completely optional
*/
template <typename LogHandlerFunc>
LogHandles& set_logging_handler(LogHandlerFunc&& log_handler);
class UiRenderpass;
namespace rhi {
class Swapchain;
}
#pragma region Runtime optimized data
struct Mesh {
rhi::RhiBuffer* vertex_buffer = nullptr;
rhi::RhiBuffer* index_buffer = nullptr;
uint32_t num_indices = 0;
size_t num_vertex_attributes{};
};
#pragma endregion
using ProceduralMeshAccessor = MapAccessor<MeshId, ProceduralMesh>;
/*!
* \brief Main class for Nova. Owns all of Nova's resources and provides a way to access them
* This class exists as a singleton so it's always available
*/
class NovaRenderer {
public:
/*!
* \brief Initializes the Nova Renderer
*/
explicit NovaRenderer(const NovaSettings& settings);
NovaRenderer(NovaRenderer&& other) noexcept = delete;
NovaRenderer& operator=(NovaRenderer&& other) noexcept = delete;
NovaRenderer(const NovaRenderer& other) = delete;
NovaRenderer& operator=(const NovaRenderer& other) = delete;
~NovaRenderer();
/*!
* \brief Loads the renderpack with the given name
*
* This method will first try to load from the `renderpacks/` folder. If Nova can't find the renderpack there, it will try to load
* it from the `shaderpacks/` directory (mimicking Optifine shaders). If the renderpack isn't found there, it'll try to load it from
* the `resourcepacks/` directory (mimicking Bedrock shaders)
*
* Loading a renderpack will cause a stall in the GPU. Nova will have to wait for all in-flight frames to finish, then replace the
* current renderpack with the new one, then start rendering. Replacing the renderpack might also require reloading all chunks, if
* the new renderpack has different geometry filters then the current one
*
* \param renderpack_name The name of the renderpack to load
*/
void load_renderpack(const std::string& renderpack_name);
/*!
* \brief Gives Nova a function to use to render UI
*
* This function will be executed inside the builtin UI renderpass. This renderpass takes the output of the 3D renderer, adds the UI
* on top of it, and writes that all to the backbuffer
*
* The first parameter to the function is the command list it must record UI rendering into, and the second parameter is the
* rendering context for the current frame
*
* Before calling the UI render function, Nova records commands to begin a renderpass with one RGBA8 color attachment and one D24S8
* depth/stencil attachment. After calling this function, Nova records commands to end that same renderpass. This allows the host
* application to only care about rendering the UI, instead of worrying about any pass scheduling concerns
*
* \return A pointer to the newly created renderpass
*/
template <typename RenderpassType, typename... Args>
RenderpassType* create_ui_renderpass(Args&&... args);
[[nodiscard]] const std::vector<MaterialPass>& get_material_passes_for_pipeline(const std::string& pipeline);
[[nodiscard]] std::optional<RenderpassMetadata> get_renderpass_metadata(const std::string& renderpass_name) const;
/*!
* \brief Executes a single frame
*/
void execute_frame();
[[nodiscard]] NovaSettingsAccessManager& get_settings();
[[nodiscard]] rx::memory::allocator& get_global_allocator() const;
#pragma region Meshes
/*!
* \brief Tells Nova how many meshes you expect to have in your scene
*
* Allows the Nova Renderer to preallocate space for your meshes
*
* \param num_meshes The number of meshes you expect to have
*/
void set_num_meshes(uint32_t num_meshes);
/*!
* \brief Creates a new mesh and uploads its data to the GPU, returning the ID of the newly created mesh
*
* \param mesh_data The mesh's initial data
*/
[[nodiscard]] MeshId create_mesh(const MeshData& mesh_data);
/*!
* \brief Creates a procedural mesh, returning both its mesh id and
*/
[[nodiscard]] ProceduralMeshAccessor create_procedural_mesh(uint64_t vertex_size, uint64_t index_size);
[[nodiscard]] std::optional<Mesh> get_mesh(MeshId mesh);
/*!
* \brief Destroys the mesh with the provided ID, freeing up whatever VRAM it was using
*
* In debug builds, this method checks that no renderables are using the mesh
*
* \param mesh_to_destroy The handle of the mesh you want to destroy
*/
void destroy_mesh(MeshId mesh_to_destroy);
#pragma endregion
#pragma region Resources
[[nodiscard]] rhi::RhiSampler* get_point_sampler() const;
#pragma endregion
#pragma region Materials
/*!
* \brief Creates a new material of the specified type
*
* \return A pointer to the new material, or nullptr if the material can't be created for whatever reason
*/
template <typename MaterialType>
[[nodiscard]] std::pair<uint32_t, MaterialType*> create_material();
/*!
* \brief Gets the pipeline with the provided name
*
* \param pipeline_name Name of the pipeline to find
*
* \return The pipeline if it exists, or nullptr if it does not
*/
[[nodiscard]] Pipeline* find_pipeline(const std::string& pipeline_name);
#pragma endregion
[[nodiscard]] RenderableId add_renderable_for_material(const FullMaterialPassName& material_name,
const StaticMeshRenderableCreateInfo& create_info);
/*!
* \brief Updates a renderable's information
*
* \param renderable The renderable to update
* \param update_data The new data for the renderable
*/
void update_renderable(RenderableId renderable, const StaticMeshRenderableUpdateData& update_data);
[[nodiscard]] CameraAccessor create_camera(const CameraCreateInfo& create_info);
[[nodiscard]] rhi::RenderDevice& get_device() const;
[[nodiscard]] NovaWindow& get_window() const;
[[nodiscard]] DeviceResources& get_resource_manager() const;
private:
NovaSettingsAccessManager settings;
std::unique_ptr<rhi::RenderDevice> device;
std::unique_ptr<NovaWindow> window;
rhi::Swapchain* swapchain;
RENDERDOC_API_1_3_0* render_doc;
rhi::RhiSampler* point_sampler;
MeshId fullscreen_triangle_id;
std::unique_ptr<DeviceResources> device_resources;
rhi::RhiDescriptorPool* global_descriptor_pool;
void* staging_buffer_memory_ptr;
#pragma region Initialization
void create_global_allocators();
static void initialize_virtual_filesystem();
void create_global_sync_objects();
void create_global_samplers();
void create_resource_storage();
void create_builtin_render_targets();
void create_builtin_uniform_buffers();
void create_builtin_meshes();
void create_renderpass_manager();
// MUST be called when the swapchain size changes
void create_builtin_renderpasses();
void create_builtin_pipelines();
#pragma endregion
#pragma region Renderpack
struct PipelineReturn {
Pipeline pipeline;
PipelineMetadata metadata;
};
bool renderpacks_loaded = false;
std::mutex renderpacks_loading_mutex;
std::optional<renderpack::RenderpackData> loaded_renderpack;
std::unique_ptr<Rendergraph> rendergraph;
#pragma endregion
#pragma region Rendergraph
std::unordered_map<std::string, rhi::RhiImage*> builtin_images;
std::unordered_map<std::string, renderer::Renderpass*> builtin_renderpasses;
std::unordered_map<std::string, renderpack::TextureCreateInfo> dynamic_texture_infos;
void create_dynamic_textures(const std::vector<renderpack::TextureCreateInfo>& texture_create_infos);
void create_render_passes(const std::vector<renderpack::RenderPassCreateInfo>& pass_create_infos,
const std::vector<renderpack::PipelineData>& pipelines) const;
void destroy_dynamic_resources();
void destroy_renderpasses();
#pragma endregion
#pragma region Rendering pipelines
/*!
* \brief Map from pipeline name to all the material passes that use that pipeline
*/
std::unordered_map<std::string, std::vector<MaterialPass>> passes_by_pipeline;
std::unordered_map<FullMaterialPassName, MaterialPassMetadata> material_metadatas;
void create_pipelines_and_materials(const std::vector<renderpack::PipelineData>& pipeline_create_infos,
const std::vector<renderpack::MaterialData>& materials);
void create_materials_for_pipeline(const renderer::Pipeline& pipeline,
const std::vector<renderpack::MaterialData>& materials,
const std::string& pipeline_name);
void destroy_pipelines();
void destroy_materials();
#pragma endregion
#pragma region Meshes
MeshId next_mesh_id = 0;
std::unordered_map<MeshId, Mesh> meshes;
std::unordered_map<MeshId, ProceduralMesh> proc_meshes;
#pragma endregion
#pragma region Rendering
uint64_t frame_count = 0;
uint8_t cur_frame_idx = 0;
std::vector<std::string> builtin_buffer_names;
uint32_t cur_model_matrix_index = 0;
std::vector<rhi::RhiFence*> frame_fences;
std::unordered_map<FullMaterialPassName, MaterialPassKey> material_pass_keys;
std::unordered_map<std::string, Pipeline> pipelines;
std::unique_ptr<MaterialDataBuffer> material_buffer;
std::vector<BufferResourceAccessor> material_device_buffers;
struct RenderableKey {
std::string pipeline_name{};
uint32_t material_pass_idx{};
RenderableType type{};
uint32_t batch_idx{};
uint32_t renderable_idx{};
};
std::unordered_map<RenderableId, RenderableKey> renderable_keys;
std::vector<Camera> cameras;
std::unique_ptr<PerFrameDeviceArray<CameraUboData>> camera_data;
void update_camera_matrix_buffer(uint32_t frame_idx);
std::vector<rhi::RhiImage*> get_all_images();
#pragma endregion
};
template <typename RenderpassType, typename... Args>
RenderpassType* NovaRenderer::create_ui_renderpass(Args&&... args) {
return rendergraph->create_renderpass<RenderpassType>(*device_resources, std::forward<Args>(args)...);
}
template <typename MaterialType>
std::pair<uint32_t, MaterialType*> NovaRenderer::create_material() {
// We need to return the index so that we can send the index to the shader
// WE NEED TO RETURN THE INDEX, NOT JUST A POINTER TO THE MATERIAL
const auto idx = material_buffer->get_next_free_index<MaterialType>();
return {idx, &material_buffer->at<MaterialType>(idx)};
}
} // namespace nova::renderer
| 13,154
|
C++
|
.h
| 273
| 39.70696
| 140
| 0.675031
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| true
| true
| true
| false
| false
| true
| false
| false
|
753,231
|
procedural_mesh.hpp
|
NovaMods_nova-renderer/include/nova_renderer/procedural_mesh.hpp
|
#pragma once
#include <array>
#include <rx/core/concepts/no_copy.h>
#include <string>
#include <cstdint>
#include "nova_renderer/constants.hpp"
#include "nova_renderer/rhi/forward_decls.hpp"
namespace nova::renderer {
/*!
* \brief ProceduralMesh is a mesh which the user will modify every frame
*
* ProceduralMesh should _not_ be used if you're not going to update the mesh frequently. It stores four copies of the mesh data - once
* in host memory, and three times in device memory (one for each in-flight frame)
*/
class ProceduralMesh : rx::concepts::no_copy {
public:
struct Buffers {
rhi::RhiBuffer* vertex_buffer;
rhi::RhiBuffer* index_buffer;
};
ProceduralMesh() = default;
/*!
* \brief Creates a new procedural mesh which has the specified amount of space
*
* \param vertex_buffer_size The number of bytes the vertex buffer needs
* \param index_buffer_size The number of bytes that the index buffer needs
* \param num_in_flight_frames Number of in-flight frames that this proc mesh supports
* \param device The device to create the buffers on
* \param name Name of this procedural mesh
*/
ProceduralMesh(uint64_t vertex_buffer_size,
uint64_t index_buffer_size,
uint32_t num_in_flight_frames,
rhi::RenderDevice* device,
const std::string& name = "ProceduralMesh");
ProceduralMesh(ProceduralMesh&& old) noexcept;
ProceduralMesh& operator=(ProceduralMesh&& old) noexcept;
~ProceduralMesh() = default;
/*!
* \brief Sets the data to upload to the vertex buffer
*
* \param data A pointer to the start of the data
* \param size The number of bytes to upload
*/
void set_vertex_data(const void* data, uint64_t size);
/*!
* \brief Sets the data to upload to the index buffer
*
* \param data A pointer to the start of the data
* \param size The number of bytes to upload
*/
void set_index_data(const void* data, uint64_t size);
/*!
* \brief Records commands to copy the staging buffers to the device buffers for the specified frame
*
* Intent is that you call this method at the beginning or the frame that will use the buffers you're uploading to. It's a method on
* this class mostly because writing accessors is hard
*
* \param cmds The command list to record commands into
* \param frame_idx The index of the frame to write the data to
*/
void record_commands_to_upload_data(rhi::RhiRenderCommandList* cmds, uint8_t frame_idx) const;
/*!
* \brief Returns the vertex and index buffer for the provided frame
*/
[[nodiscard]] Buffers get_buffers_for_frame(uint8_t frame_idx) const;
private:
rhi::RenderDevice* device = nullptr;
std::string name;
std::vector<rhi::RhiBuffer*> vertex_buffers;
std::vector<rhi::RhiBuffer*> index_buffers;
rhi::RhiBuffer* cached_vertex_buffer;
rhi::RhiBuffer* cached_index_buffer;
uint64_t num_vertex_bytes_to_upload = 0;
uint64_t num_index_bytes_to_upload = 0;
rx::memory::allocator* allocator;
#ifdef NOVA_DEBUG
uint64_t vertex_buffer_size;
uint64_t index_buffer_size;
#endif
};
} // namespace nova::renderer
| 3,586
|
C++
|
.h
| 82
| 34.841463
| 140
| 0.636546
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,232
|
x11_but_good.hpp
|
NovaMods_nova-renderer/include/nova_renderer/util/x11_but_good.hpp
|
#pragma once
#include <X11/Xlib.h>
// X11 macros that are bad
#ifdef Always
#undef Always
#endif
#ifdef None
#undef None
#endif
#ifdef Bool
#undef Bool
#endif
| 163
|
C++
|
.h
| 12
| 12.25
| 26
| 0.795918
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,233
|
result.hpp
|
NovaMods_nova-renderer/include/nova_renderer/util/result.hpp
|
#pragma once
#include <optional>
#include <string>
namespace ntl {
struct NovaError;
struct NovaError {
std::string message = "";
NovaError* cause = nullptr;
explicit NovaError(std::string message);
NovaError(std::string message, NovaError* cause);
[[nodiscard]] std::string to_string() const;
};
inline NovaError operator""_err(const char* str, const size_t size) { return NovaError(std::string(str, size)); }
template <typename ValueType, typename ErrorType = NovaError>
struct [[nodiscard]] Result {
union {
ValueType value;
ErrorType error;
};
bool has_value = false;
explicit Result(ValueType && value) : value(value), has_value(true) {}
explicit Result(const ValueType& value) : value(value), has_value(true) {}
explicit Result(ErrorType error) : error(std::move(error)) {}
explicit Result(const Result<ValueType, ErrorType>& other) {
if(other.has_value) {
value = other.value;
} else {
error = other.error;
}
};
Result& operator=(const Result<ValueType, ErrorType>& other) {
if(other.has_value) {
value = other.value;
} else {
error = other.error;
}
return *this;
};
explicit Result(Result<ValueType, ErrorType> && old) noexcept {
if(old.has_value) {
value = std::move(old.value);
old.value = {};
has_value = true;
} else {
error = std::move(old.error);
old.error = {};
}
};
Result& operator=(Result<ValueType, ErrorType>&& old) noexcept {
if(old.has_value) {
value = old.value;
old.value = {};
has_value = true;
} else {
error = old.error;
old.error = {};
}
return *this;
};
~Result() {
if(has_value) {
value.~ValueType();
} else {
error.~ErrorType();
}
}
const ValueType* operator->() const { return &value; }
const ValueType& operator*() const { return value; }
template <typename FuncType>
auto map(FuncType && func)->Result<decltype(func(value))> {
using RetVal = decltype(func(value));
if(has_value) {
return Result<RetVal>(func(value));
} else {
return Result<RetVal>(std::move(error));
}
}
template <typename FuncType>
auto flat_map(FuncType && func)->Result<decltype(func(value).value)> {
using RetVal = decltype(func(value).value);
if(has_value) {
return func(value);
} else {
return Result<RetVal>(std::move(error));
}
}
template <typename FuncType>
void if_present(FuncType && func) {
if(has_value) {
func(value);
}
}
template <typename FuncType>
void on_error(FuncType && error_func) const {
if(!has_value) {
error_func(error);
}
}
// ReSharper disable once CppNonExplicitConversionOperator
operator bool() const { return has_value; }
ValueType operator*() { return value; }
};
template <typename ValueType>
Result(ValueType value)->Result<ValueType>;
#define MAKE_ERROR(s, ...) ::ntl::NovaError(::fmt::format(s, __VA_ARGS__))
} // namespace ntl
| 3,764
|
C++
|
.h
| 106
| 24.160377
| 117
| 0.517241
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,234
|
utils.hpp
|
NovaMods_nova-renderer/include/nova_renderer/util/utils.hpp
|
#pragma once
// MUST be before <algorithm> to keep gcc happy
#include <algorithm>
#include <memory_resource>
#include <string>
#include <vector>
#include <optional>
namespace rx {
struct string;
}
namespace nova::renderer {
/*!
* \brief Calls the function once for every element in the provided container
*
* \param container The container to perform an action for each element in
* \param thing_to_do The action to perform for each element in the collection
*/
template <typename Cont, typename Func>
void foreach(Cont container, Func thing_to_do) {
std::for_each(std::cbegin(container), std::cend(container), thing_to_do);
}
std::pmr::vector<std::string> split(const std::string& s, char delim);
std::string join(const std::pmr::vector<std::string>& strings, const std::string& joiner);
std::string print_color(unsigned int color);
std::string print_array(int* data, int size);
bool ends_with(const std::string& string, const std::string& ending);
#define FORMAT(s, ...) fmt::format(fmt(s), __VA_ARGS__)
#define PROFILE_VOID_EXPR(expr, category, event_name) \
[&] { \
ZoneScoped; expr; \
}()
#define PROFILE_RET_EXPR(expr, category, event_name) \
[&] { \
ZoneScoped; return expr; \
}()
} // namespace nova::renderer
| 2,023
|
C++
|
.h
| 36
| 51.916667
| 159
| 0.444276
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,235
|
stream.hpp
|
NovaMods_nova-renderer/include/nova_renderer/util/stream.hpp
|
#pragma once
#include <functional>
//! Streams and stream accessories!
//!
//! A stream provides a functional interface to operate on everything in a collection. Streams allow you to
namespace ntl {
/*! \brief Provides a functional interface for data processing */
template <typename DataType>
class Stream {
public:
template <typename OutputType>
Stream<OutputType>& map(std::function<OutputType(const DataType&)> map_func);
};
} // namespace ntl
| 486
|
C++
|
.h
| 14
| 31
| 107
| 0.725532
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,236
|
windows.hpp
|
NovaMods_nova-renderer/include/nova_renderer/util/windows.hpp
|
#pragma once
#include "nova_renderer/util/platform.hpp"
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#ifndef NOMINMAX
#define NOMINMAX
#endif
#ifdef NOVA_WINDOWS
#include <Windows.h>
#undef ERROR
#else
#error "Trying to include windows on non-windows build."
#endif
| 286
|
C++
|
.h
| 14
| 19.285714
| 56
| 0.814815
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,237
|
container_accessor.hpp
|
NovaMods_nova-renderer/include/nova_renderer/util/container_accessor.hpp
|
#pragma once
#include <unordered_map>
namespace nova::renderer {
/*!
* \brief Allows one to access a thing at a location in a vector, even after the vector has been reallocated
*
* How this isn't in the standard library, idk
*/
template <typename KeyType, typename ValueType>
class MapAccessor {
public:
MapAccessor() = default;
MapAccessor(std::unordered_map<KeyType, ValueType> * map, const KeyType& key) : map(map), key(key) {}
MapAccessor(const MapAccessor& other) = default;
MapAccessor& operator=(const MapAccessor& other) = default;
MapAccessor(MapAccessor&& old) noexcept = default;
MapAccessor& operator=(MapAccessor&& old) noexcept = default;
~MapAccessor() = default;
[[nodiscard]] const ValueType* operator->() const { return map->find(key); }
[[nodiscard]] ValueType* operator->() { return map->find(key); }
[[nodiscard]] const KeyType& get_key() const { return key; }
private:
std::unordered_map<KeyType, ValueType>* map = nullptr;
KeyType key = {};
};
template <typename ValueType>
class VectorAccessor {
public:
VectorAccessor() = default;
VectorAccessor(std::vector<ValueType>* vec, const size_t idx) : vec(vec), idx(idx) {}
VectorAccessor(const VectorAccessor& other) = default;
VectorAccessor& operator=(const VectorAccessor& other) = default;
VectorAccessor(VectorAccessor&& old) noexcept = default;
VectorAccessor& operator=(VectorAccessor&& old) noexcept = default;
~VectorAccessor() = default;
[[nodiscard]] const ValueType* operator->() const { return &(*vec)[idx]; }
[[nodiscard]] ValueType* operator->() { return &(*vec)[idx]; }
[[nodiscard]] const ValueType& operator*() const { return (*vec)[idx]; }
[[nodiscard]] const size_t& get_idx() const { return idx; }
private:
std::vector<ValueType>* vec = nullptr;
size_t idx = 0;
};
} // namespace nova::renderer
| 2,069
|
C++
|
.h
| 44
| 39.568182
| 112
| 0.644888
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| true
| true
| true
| false
| false
| true
| false
| false
|
753,238
|
platform.hpp
|
NovaMods_nova-renderer/include/nova_renderer/util/platform.hpp
|
#pragma once
#ifdef _WIN32
#define NOVA_WINDOWS 1
#endif
#if defined(linux) || defined(__linux) || defined(__linux__)
#define NOVA_LINUX 1
#endif
| 148
|
C++
|
.h
| 7
| 19.857143
| 60
| 0.733813
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,241
|
command_list.hpp
|
NovaMods_nova-renderer/include/nova_renderer/rhi/command_list.hpp
|
#pragma once
#include <cstdint> // needed for uint****
#include "nova_renderer/rhi/forward_decls.hpp"
#include "nova_renderer/rhi/rhi_enums.hpp"
#include "nova_renderer/rhi/rhi_types.hpp"
namespace nova {
namespace renderer {
struct RhiGraphicsPipelineState;
class Camera;
} // namespace renderer
} // namespace nova
namespace nova::renderer::rhi {
enum class IndexType {
Uint16,
Uint32,
};
/*!
* \brief An API-agnostic command list
*
* Command lists are allocated from the render engine. Once allocated, ownership is passed to the callee. You can
* then record whatever commands you want and submit the command list back to the render engine for execution on
* the GPU. Once submitted, you may not record any more commands into the command list
*
* There is one command list pool per swapchain image per thread. All the pools for one swapchain image are
* reset at the beginning of a frame that renders to that swapchain image. This means that any command list
* allocated in one frame will not be valid in the next frame. DO NOT hold on to command lists
*
* A command list may only be recorded to from one thread at a time
*
* Command lists are fully bound to ChaiScript
*/
class RhiRenderCommandList {
public:
enum class Level {
Primary,
Secondary,
};
RhiRenderCommandList() = default;
RhiRenderCommandList(RhiRenderCommandList&& old) noexcept = default;
RhiRenderCommandList& operator=(RhiRenderCommandList&& old) noexcept = default;
RhiRenderCommandList(const RhiRenderCommandList& other) = delete;
RhiRenderCommandList& operator=(const RhiRenderCommandList& other) = delete;
/*!
* \brief Sets the debug name of this command list, so that API debugging tools can give you a nice name
*/
virtual void set_debug_name(const std::string& name) = 0;
/*!
* \brief Bind the buffers of all the resources that Nova needs to render an object
*/
virtual void bind_material_resources(RhiBuffer* camera_buffer,
RhiBuffer* material_buffer,
RhiSampler* point_sampler,
RhiSampler* bilinear_sampler,
RhiSampler* trilinear_sampler,
const std::vector<RhiImage*>& images,
rx::memory::allocator& allocator) = 0;
/*!
* \brief Uses the provided resource binder to bind resources to the command list
*/
virtual void bind_resources(RhiResourceBinder& binder) = 0;
/*!
* \brief Inserts a barrier so that all access to a resource before the barrier is resolved before any access
* to the resource after the barrier
*
* \param stages_before_barrier The pipeline stages that should be completed before the barriers take effect
* \param stages_after_barrier The pipeline stages that must wait for the barrier
* \param barriers All the resource barriers to use
*/
virtual void resource_barriers(PipelineStage stages_before_barrier,
PipelineStage stages_after_barrier,
const std::vector<RhiResourceBarrier>& barriers) = 0;
/*!
* \brief Records a command to copy one region of a buffer to another buffer
*
* \param destination_buffer The buffer to write data to
* \param destination_offset The offset in the destination buffer to start writing to. Measured in bytes
* \param source_buffer The buffer to read data from
* \param source_offset The offset in the source buffer to start reading from. Measures in bytes
* \param num_bytes The number of bytes to copy
*
* \pre destination_buffer != nullptr
* \pre destination_buffer is a buffer
* \pre destination_offset is less than the size of destination_buffer
* \pre source_buffer != nullptr
* \pre source_buffer is a buffer
* \pre source_offset is less than the size of source_buffer
* \pre destination_offset plus num_bytes is less than the size of destination_buffer
* \pre destination_offset plus num_bytes is less than the size of source_buffer
*/
virtual void copy_buffer(RhiBuffer* destination_buffer,
mem::Bytes destination_offset,
RhiBuffer* source_buffer,
mem::Bytes source_offset,
mem::Bytes num_bytes) = 0;
/*!
* \brief Uploads data to an image in the most API-optimal way
*
* \param image The image to upload the data to. Must be in the CopyDestination state
* \param width The width of the image in pixels
* \param height The height of the image in pixels
* \param bytes_per_pixel The number of bytes that each pixel uses
* \param staging_buffer The buffer to use to upload the data to the image. This buffer must be host writable, and must be in the
* CopySource state
* \param data A pointer to the data to upload to the image
*
* \note The image must be in the Common layout prior to uploading data to it
*/
virtual void upload_data_to_image(
RhiImage* image, size_t width, size_t height, size_t bytes_per_pixel, RhiBuffer* staging_buffer, const void* data) = 0;
/*!
* \brief Executed a number of command lists
*
* These command lists should be secondary command lists. Nova doesn't validate this because yolo but you need
* to be nice - the API-specific validation layers _will_ yell at you
*/
virtual void execute_command_lists(const std::vector<RhiRenderCommandList*>& lists) = 0;
/*!
* \brief Sets the camera that subsequent drawcalls will use to render
*
* May be called at any time
*
* \param camera The camera to render with
*/
virtual void set_camera(const Camera& camera) = 0;
/*!
* \brief Begins a renderpass
*
* \param renderpass The renderpass to begin
* \param framebuffer The framebuffer to render to
*/
virtual void begin_renderpass(RhiRenderpass* renderpass, RhiFramebuffer* framebuffer) = 0;
virtual void end_renderpass() = 0;
virtual void set_material_index(uint32_t index) = 0;
virtual void set_pipeline(const RhiPipeline& pipeline) = 0;
virtual void bind_descriptor_sets(const std::vector<RhiDescriptorSet*>& descriptor_sets,
const RhiPipelineInterface* pipeline_interface) = 0;
/*!
* \brief Binds the provided vertex buffers to the command list
*
* The buffers are always bound sequentially starting from binding 0. The first buffer in the vector is bound to binding 0, the
* second is bound to binding 1, etc
*
* \param buffers The buffers to bind
*/
virtual void bind_vertex_buffers(const std::vector<RhiBuffer*>& buffers) = 0;
/*!
* \brief Binds the provided index buffer to the command list
*
* The index buffer must use 32-bit indices. This will likely change in the future but for now it's a thing
*/
virtual void bind_index_buffer(const RhiBuffer* buffer, IndexType index_size) = 0;
/*!
* \brief Records rendering instances of an indexed mesh
*
* \param num_indices The number of indices to read from the current index buffer
* \param offset The offset from the beginning of the index buffer to begin reading vertex indices
* \param num_instances The number of instances to render
*/
virtual void draw_indexed_mesh(uint32_t num_indices, uint32_t offset = 0, uint32_t num_instances = 1) = 0;
virtual void set_scissor_rect(uint32_t x, uint32_t y, uint32_t width, uint32_t height) = 0;
virtual ~RhiRenderCommandList() = default;
};
} // namespace nova::renderer::rhi
| 8,518
|
C++
|
.h
| 163
| 40.895706
| 137
| 0.628767
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,242
|
swapchain.hpp
|
NovaMods_nova-renderer/include/nova_renderer/rhi/swapchain.hpp
|
#pragma once
#include <glm/glm.hpp>
#include <vector>
#include <rx/core/memory/allocator.h>
namespace nova::renderer::rhi {
struct RhiFence;
struct RhiFramebuffer;
struct RhiImage;
struct RhiSemaphore;
class Swapchain {
public:
Swapchain(uint32_t num_images, const glm::uvec2& size);
virtual ~Swapchain() = default;
/*!
* \brief Acquires the next image in the swapchain
*
* \return The index of the swapchain image we just acquired
*/
virtual uint8_t acquire_next_swapchain_image(rx::memory::allocator& allocator) = 0;
/*!
* \brief Presents the specified swapchain image
*/
virtual void present(uint32_t image_idx) = 0;
[[nodiscard]] RhiFramebuffer* get_framebuffer(uint32_t frame_idx) const;
[[nodiscard]] RhiImage* get_image(uint32_t frame_idx) const;
[[nodiscard]] RhiFence* get_fence(uint32_t frame_idx) const;
[[nodiscard]] glm::uvec2 get_size() const;
protected:
const uint32_t num_images;
const glm::uvec2 size;
// Arrays of the per-frame swapchain resources. Each swapchain implementation is responsible for filling these arrays with
// API-specific objects
std::vector<RhiFramebuffer*> framebuffers;
std::vector<RhiImage*> swapchain_images;
std::vector<RhiFence*> fences;
};
} // namespace nova::renderer::rhi
| 1,454
|
C++
|
.h
| 37
| 31.945946
| 130
| 0.65812
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,243
|
rhi_types.hpp
|
NovaMods_nova-renderer/include/nova_renderer/rhi/rhi_types.hpp
|
#pragma once
#include <glm/glm.hpp>
#include "nova_renderer/renderpack_data.hpp"
#include "nova_renderer/rhi/forward_decls.hpp"
#include "nova_renderer/rhi/rhi_enums.hpp"
#include "nova_renderer/util/bytes.hpp"
namespace nova::renderer::rhi {
#pragma region Structs
struct RhiBufferCreateInfo {
std::string name;
mem::Bytes size = 0;
BufferUsage buffer_usage{};
};
struct RhiDeviceMemory {};
/*!
* \brief A resource
*
* Resources may by dynamic of static. Dynamic resources are updated after they are created, possibly by a shader,
* while static resources are loaded once and that's that
*/
struct RhiResource {
ResourceType type = {};
bool is_dynamic = true;
};
struct RhiSamplerCreateInfo {
TextureFilter min_filter = TextureFilter::Point;
TextureFilter mag_filter = TextureFilter::Point;
TextureCoordWrapMode x_wrap_mode = TextureCoordWrapMode::ClampToEdge;
TextureCoordWrapMode y_wrap_mode = TextureCoordWrapMode::ClampToEdge;
TextureCoordWrapMode z_wrap_mode = TextureCoordWrapMode::ClampToEdge;
float mip_bias = 0;
bool enable_anisotropy = false;
float max_anisotropy = 1;
float min_lod = 0;
float max_lod = 0;
};
struct RhiSampler {};
struct RhiTextureCreateInfo {
TextureUsage usage;
};
struct RhiImage : RhiResource {
bool is_depth_tex = false;
};
struct RhiBuffer : RhiResource {
mem::Bytes size = 0;
};
struct RhiMaterialResources {
RhiBuffer* material_data_buffer;
RhiSampler* point_sampler;
RhiSampler* bilinear_sampler;
RhiSampler* trilinear_sampler;
std::vector<RhiImage*> images;
};
struct RhiPipeline {
std::string name;
};
struct RhiFramebuffer {
glm::uvec2 size;
uint32_t num_attachments;
};
struct RhiRenderpass {};
struct RhiResourceBindingDescription {
/*!
* \brief Descriptor set that this binding belongs to
*/
uint32_t set;
/*!
* \brief Binding of this resource binding
*/
uint32_t binding;
/*!
* \brief Number of bindings. Useful if you have an array of descriptors
*
* If this is a unbounded array, this count is the upper limit on the size of the array
*/
uint32_t count;
/*!
* \brief If true, this binding is an unbounded array
*
* Unbounded descriptors must be the final binding in their descriptor set
*/
bool is_unbounded;
/*!
* \brief The type of object that will be bound
*/
DescriptorType type;
/*!
* \brief The shader stages that need access to this binding
*/
ShaderStage stages;
bool operator==(const RhiResourceBindingDescription& other);
bool operator!=(const RhiResourceBindingDescription& other);
};
struct RhiPushConstantRange {
uint32_t offset;
uint32_t size;
};
struct RhiVertexField {
std::string name;
VertexFieldFormat format;
};
/*!
* \brief The interface for a pipeline. Includes both inputs (descriptors) and outputs (framebuffers)
*/
struct RhiPipelineInterface {
std::unordered_map<std::string, RhiResourceBindingDescription> bindings;
[[nodiscard]] uint32_t get_num_descriptors_of_type(DescriptorType type) const;
};
struct RhiSemaphore {};
struct RhiPresentSemaphore {};
struct RhiFence {};
struct RhiDescriptorPool {};
struct RhiDescriptorSet {};
// TODO: Resource state tracking in the command list so we don't need all this bullshit
struct RhiResourceBarrier {
RhiResource* resource_to_barrier;
/*!
* \brief The resource access that much finish before this barrier executed
*/
ResourceAccess access_before_barrier;
/*!
* \brief The resource access that must wait for this battier to finish executing
*/
ResourceAccess access_after_barrier;
/*!
* \brief How you're going to access this resource just before this barrier
*
* Will a shader read from it before the barrier? Will the fragment depth by copied to a depth buffer before
* this barrier? Will the resource be used as a indirect draw command buffer right before this barrier?
*/
ResourceState old_state;
/*!
* \brief How you're going to access this resource after this barrier
*
* Will a shader read from it after the barrier? Will the fragment depth by copied to a depth buffer after
* this barrier? Will the resource be used as a indirect draw command buffer right after this barrier?
*/
ResourceState new_state;
QueueType source_queue;
QueueType destination_queue;
struct ImageMemoryBarrier {
ImageAspect aspect;
};
struct BufferMemoryBarrier {
mem::Bytes offset;
mem::Bytes size;
};
union {
ImageMemoryBarrier image_memory_barrier;
BufferMemoryBarrier buffer_memory_barrier;
};
RhiResourceBarrier();
};
#pragma endregion
ShaderStage operator|=(ShaderStage lhs, ShaderStage rhs);
} // namespace nova::renderer::rhi
| 5,522
|
C++
|
.h
| 156
| 27.416667
| 118
| 0.643018
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,244
|
forward_decls.hpp
|
NovaMods_nova-renderer/include/nova_renderer/rhi/forward_decls.hpp
|
#pragma once
//! Forward declarations for all the types in the RHI
namespace nova::renderer {
class RhiResourceBinder;
namespace rhi {
struct RhiDescriptorSet;
struct RhiRenderpass;
struct RhiFramebuffer;
struct RhiPipelineInterface;
struct RhiFence;
struct RhiSemaphore;
struct RhiResource;
struct RhiBuffer;
struct RhiImage;
struct RhiDeviceMemory;
struct RhiSampler;
struct RhiPresentSemaphore;
struct RhiDescriptorPool;
class Swapchain;
class RhiRenderCommandList;
class RenderDevice;
} // namespace rhi
} // namespace nova::renderer
| 682
|
C++
|
.h
| 23
| 22.391304
| 53
| 0.679389
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,245
|
resource_binder.hpp
|
NovaMods_nova-renderer/include/nova_renderer/rhi/resource_binder.hpp
|
#pragma once
#include <string>
#include <vector>
#include "nova_renderer/rhi/forward_decls.hpp"
namespace nova::renderer {
/*!
* \brief Abstraction for making resources available to shaders
*
* Resource binders are meant to be long-lived. You should create them for your materials as early as possible and simply use them over
* and over and over
*/
class RhiResourceBinder {
public:
virtual ~RhiResourceBinder() = default;
virtual void bind_image(const std::string& binding_name, rhi::RhiImage* image) = 0;
virtual void bind_buffer(const std::string& binding_name, rhi::RhiBuffer* buffer) = 0;
virtual void bind_sampler(const std::string& binding_name, rhi::RhiSampler* sampler) = 0;
virtual void bind_image_array(const std::string& binding_name, const std::vector<rhi::RhiImage*>& images) = 0;
virtual void bind_buffer_array(const std::string& binding_name, const std::vector<rhi::RhiBuffer*>& buffers) = 0;
virtual void bind_sampler_array(const std::string& binding_name, const std::vector<rhi::RhiSampler*>& samplers) = 0;
};
} // namespace nova::renderer
| 1,166
|
C++
|
.h
| 22
| 47.181818
| 139
| 0.703084
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,246
|
pipeline_create_info.hpp
|
NovaMods_nova-renderer/include/nova_renderer/rhi/pipeline_create_info.hpp
|
#pragma once
#include <string>
#include "nova_renderer/rhi/rhi_types.hpp"
/*!
* \file pipeline_create_info.hpp
*
* \brief All the data needed for the RHI to create a pipeline state object
*/
namespace nova::renderer {
/*!
* \brief SPIR-V shader source
*/
struct ShaderSource {
/*!
* \brief Filename of the file that this shader source originated from
*/
std::string filename{};
/*!
* \brief SPIR-V shader code
*/
std::vector<uint32_t> source{};
};
enum class PrimitiveTopology {
PointList,
LineList,
TriangleList,
};
/*!
* \brief How to fill in the rasterized primitives
*/
enum class FillMode {
/*!
* \brief Completely fill the primitive
*/
Solid,
/*!
* \brief Only draw lines between the vertices of each primitive
*/
Wireframe,
/*!
* \brief Only draw points at the vertices of each primitive
*/
Points
};
/*!
* \brief Which kind of primitives to cull
*/
enum class PrimitiveCullingMode {
/*!
* \brief Cull back faces - faces with a clockwise winding order
*/
BackFace,
/*!
* \brief Cull front faces - faces with a counterclockwise winding order
*/
FrontFace,
/*!
* \brief Don't cull any faces
*/
None,
};
/*!
* \brief State of the pixel rasterizer
*/
struct RasterizerState {
/*!
* \brief Enables or disabled depth clamping
*
* If depth clamping is enabled, then pixels with a depth value outside of the active viewport will have their depth value clamped
* to the viewport. This can help prevent holes in primitives that are particularly close to or far from the active view
*/
bool enable_depth_clamping = false;
/*!
* \brief How to fill in rasterized primitives
*/
FillMode fill_mode{};
/*!
* \brief Which faces, if any, should be culled based on their winding order
*/
PrimitiveCullingMode cull_mode{};
/*!
* \brief Constant amount to bias the output depth
*/
float depth_bias = 0;
/*!
* \brief Slope scaled amount to bias the output depth
*
* This value is multiplied by the output pixel's depth slope before being added to the output pixel's output depth
*/
float slope_scaled_depth_bias = 0;
/*!
* \brief Maximum amount of allowed depth bias
*/
float maximum_depth_bias = 0;
};
struct MultisamplingState {
// TODO: Figure out multisampling. The Vulkan book's section is sadly incomplete
};
enum class StencilOp { Keep, Zero, Replace, Increment, IncrementAndWrap, Decrement, DecrementAndWrap, Invert };
enum class CompareOp { Never, Less, LessEqual, Greater, GreaterEqual, Equal, NotEqual, Always };
/*!
* \brief Describes the stencil buffer operations to perform
*
* I don't like this name but idk a better one
*/
struct StencilOpState {
/*!
* \brief What to do when the stencil test fails
*/
StencilOp fail_op{};
/*!
* \brief What to do when the stencil op passes
*/
StencilOp pass_op{};
/*!
* \brief What to do when the depth test fails
*/
StencilOp depth_fail_op{};
/*!
* \brief How to compare a stencil value to the value in the stencil buffer
*/
CompareOp compare_op{};
/*!
* \brief A bitmask of which stencil bits to compare
*/
uint32_t compare_mask{};
/*!
* \brief A bitmask of what stencil bits to write
*/
uint32_t write_mask{};
/*!
* \brief Reference value to use in the stencil test
*/
uint32_t reference_value{};
};
/*!
* \brief What stencil buffer operations the pipeline state should use
*/
struct StencilState {
/*!
* \brief The stencil operations to perform for front faces
*/
StencilOpState front_face_op{};
/*!
* \brief The stencil operations to perform for back faces
*/
StencilOpState back_face_op{};
};
/*!
* \brief Depth bounds test mode
*/
enum class DepthBoundsTestMode {
/*!
* \brief Test against a static depth bounds, as specified in the depth bounds test info
*/
Static,
/*!
* \brief Test against a dynamic depth bounds, as specified when recording a command list
*/
Dynamic,
};
/*!
* \brief State to use for a static depth bounds test
*/
struct StaticDepthBoundsTestState {
/*!
* \brief Minimum depth bound value
*/
float min_bound = 0;
/*!
* \brief Maximum depth bound value
*/
float max_bound = 0;
};
/*!
* \brief State to use for a dynamic depth bounds test
*/
struct DynamicDepthBoundsTestState {
// Intentionally empty so I can use a union
};
/*!
* \brief The state to use for the depth bounds test
*/
struct DepthBoundsTestState {
/*!
* \brief Depth bounds test mode
*/
DepthBoundsTestMode mode{};
/*!
* \brief State for the depth bounds test
*/
union {
StaticDepthBoundsTestState static_state{};
DynamicDepthBoundsTestState dynamic_state;
};
};
struct DepthState {
/*!
* \brief Whether to write the pixel shader's output depth value to the depth buffer
*/
bool enable_depth_write = true;
/*!
* \brief Which operation to use for depth comparisons
*/
CompareOp compare_op = CompareOp::Greater;
/*!
* \brief State to use for the depth bounds test
*
* If this optional is empty, the depth bounds test is disabled. Otherwise, the depth bounds test is enabled
*/
std::optional<DepthBoundsTestState> bounds_test_state{};
};
/*!
* \brief A factor to use in blending
*/
enum class BlendFactor {
Zero,
One,
SrcColor,
OneMinusSrcColor,
DstColor,
OneMinusDstColor,
SrcAlpha,
OneMinusSrcAlpha,
DstAlpha,
OneMinusDstAlpha,
ConstantColor,
OneMinusConstantColor,
ConstantAlpha,
OneMinusConstantAlpha,
ClampedSrcAlpha,
};
/*!
* \brief Operation to use to combine the blend factors
*/
enum class BlendOp {
Add,
Subtract,
ReverseSubtract,
Min,
Max,
};
/*!
* \brief Blending state for a single attachment
*/
struct RenderTargetBlendState {
/*!
* \brief Whether to enable blending for this attachment
*
* If false, all the other values in this struct are ignored
*/
bool enable = false;
/*!
* \brief Blending factor for the pixel shader's output color
*/
BlendFactor src_color_factor = BlendFactor::SrcAlpha;
/*!
* \brier Blending factor for the color in the output render target
*/
BlendFactor dst_color_factor = BlendFactor::OneMinusSrcAlpha;
/*!
* \brief The operation to use when blending color
*/
BlendOp color_op = BlendOp::Add;
/*!
* \brief Blending factor for the pixel shader's output alpha
*/
BlendFactor src_alpha_factor = BlendFactor::SrcAlpha;
/*!
* \brief Blending factor for the alpha in the output render target
*/
BlendFactor dst_alpha_factor = BlendFactor::OneMinusSrcAlpha;
/*!
* \brief The operation to use when blending alpha
*/
BlendOp alpha_op = BlendOp::Add;
};
/*!
* \brief How to blend colors
*/
struct BlendState {
/*!
* \brief How to blend each render target that this pipeline state renders to
*/
std::vector<RenderTargetBlendState> render_target_states{};
/*!
* \brief Constant values to use for any render targets where one of the blend factors involves either a constant color or constant
* alpha
*/
glm::vec4 blend_constants{};
};
/*!
* \brief All the information needed to create a pipeline state
*/
struct RhiGraphicsPipelineState {
/*!
* \brief Name of this pipeline state
*/
std::string name{};
/*!
* \brief Vertex shader to use
*/
ShaderSource vertex_shader{};
/*!
* \brief Geometry shader to use
*/
std::optional<ShaderSource> geometry_shader{};
/*!
* \brier Pixel shader to use
*/
std::optional<ShaderSource> pixel_shader{};
/*!
* \brief Description of the fields in the vertex data
*/
std::vector<rhi::RhiVertexField> vertex_fields{};
/*!
* \brief Size of the viewport that this pipeline state renders to, measured in pixels
*/
glm::vec2 viewport_size{};
/*!
* \brief Enables the scissor test, allowing e.g. UI elements to only render to a specific portion of the screen
*/
bool enable_scissor_test = false;
/*!
* \brief Topology of the vertex data
*/
PrimitiveTopology topology = PrimitiveTopology::TriangleList;
/*!
* \brief The state of the rasterizer when this pipeline state is active
*/
RasterizerState rasterizer_state{};
std::optional<MultisamplingState> multisampling_state{};
/*!
* \brief What depth operations to perform
*
* If this optional has a value, the depth test is enabled. If false, the depth test is disabled
*/
std::optional<DepthState> depth_state = DepthState{};
/*!
* \brief What stencil operations to perform
*
* If this optional has a value, the stencil test will be enabled. Otherwise, the stencil test will be disabled
*/
std::optional<StencilState> stencil_state{};
/*!
* \brief How to blend colors
*
* If this optional has a value, blending will be enabled. Otherwise, blending will be disabled
*/
std::optional<BlendState> blend_state{};
bool enable_color_write = true;
bool enable_alpha_write = true;
/*!
* \brief All the color attachments that this pipeline writes to
*/
std::vector<renderpack::TextureAttachmentInfo> color_attachments{};
/*!
* \brief The depth texture that this pipeline writes to, if it writes to a depth texture
*/
std::optional<renderpack::TextureAttachmentInfo> depth_texture{};
};
} // namespace nova::renderer
| 11,348
|
C++
|
.h
| 362
| 22.850829
| 139
| 0.57941
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,247
|
rhi_enums.hpp
|
NovaMods_nova-renderer/include/nova_renderer/rhi/rhi_enums.hpp
|
#pragma once
#include "nova_renderer/util/utils.hpp"
namespace nova::renderer::rhi {
enum class MemoryUsage {
DeviceOnly,
LowFrequencyUpload,
StagingBuffer,
};
enum class ObjectType {
Buffer,
Texture,
RenderTexture,
SwapchainSurface,
Any,
};
enum class PixelFormat {
Rgba8,
Rgba16F,
Rgba32F,
Depth32,
Depth24Stencil8,
};
enum class TextureUsage {
RenderTarget,
SampledRenderTarget,
SampledTexture,
};
enum class VertexFieldFormat {
Uint,
Float2,
Float3,
Float4,
// MUST always be last
Invalid,
};
enum class DescriptorType { CombinedImageSampler, UniformBuffer, StorageBuffer, Texture, Sampler };
enum class ResourceAccess {
IndirectCommandRead,
IndexRead,
VertexAttributeRead,
UniformRead,
InputAttachmentRead,
ShaderRead,
ShaderWrite,
ColorAttachmentRead,
ColorAttachmentWrite,
DepthStencilAttachmentRead,
DepthStencilAttachmentWrite,
CopyRead,
CopyWrite,
HostRead,
HostWrite,
MemoryRead,
MemoryWrite,
ShadingRateImageRead,
AccelerationStructureRead,
AccelerationStructureWrite,
FragmentDensityMapRead,
};
enum class ResourceState {
Undefined,
Common,
CopySource,
CopyDestination,
UniformBuffer,
VertexBuffer,
IndexBuffer,
ShaderRead,
ShaderWrite,
RenderTarget,
DepthWrite,
DepthRead,
PresentSource,
};
enum class ImageAspect {
Color = 0x00000001,
Depth = 0x00000002,
Stencil = 0x00000004,
};
enum class PipelineStage {
TopOfPipe = 0x00000001,
DrawIndirect = 0x00000002,
VertexInput = 0x00000004,
VertexShader = 0x00000008,
TessellationControlShader = 0x00000010,
TessellationEvaluationShader = 0x00000020,
GeometryShader = 0x00000040,
FragmentShader = 0x00000080,
EarlyFragmentTests = 0x00000100,
LateFragmentTests = 0x00000200,
ColorAttachmentOutput = 0x00000400,
ComputeShader = 0x00000800,
Transfer = 0x00001000,
BottomOfPipe = 0x00002000,
Host = 0x00004000,
AllGraphics = 0x00008000,
AllCommands = 0x00010000,
ShadingRateImage = 0x00400000,
RayTracingShader = 0x00200000,
AccelerationStructureBuild = 0x02000000,
TaskShader = 0x00080000,
MeshShader = 0x00100000,
FragmentDensityProcess = 0x00800000,
};
enum class ShaderLanguage {
Hlsl,
Glsl,
Spirv,
};
enum class ShaderStage {
Vertex = 0x0001,
TessellationControl = 0x0002,
TessellationEvaluation = 0x0004,
Geometry = 0x0008,
Pixel = 0x0010,
Compute = 0x0020,
Raygen = 0x0100,
AnyHit = 0x0200,
ClosestHit = 0x0400,
Miss = 0x0800,
Intersection = 0x1000,
Task = 0x0040,
Mesh = 0x0080,
};
enum class QueueType {
Graphics,
Transfer,
AsyncCompute,
};
enum class BufferUsage {
UniformBuffer,
IndexBuffer,
VertexBuffer,
StagingBuffer,
};
enum class ResourceType {
Buffer,
Image,
};
enum class TextureFilter {
Point,
Bilinear,
Trilinear,
};
enum class TextureCoordWrapMode {
Repeat,
MirroredRepeat,
ClampToEdge,
ClampToBorder,
MirrorClampToEdge,
};
bool is_depth_format(PixelFormat format);
uint32_t get_byte_size(VertexFieldFormat format);
std::string descriptor_type_to_string(DescriptorType type);
} // namespace nova::renderer::rhi
| 3,982
|
C++
|
.h
| 155
| 17.619355
| 103
| 0.613966
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,248
|
render_device.hpp
|
NovaMods_nova-renderer/include/nova_renderer/rhi/render_device.hpp
|
#pragma once
#include "nova_renderer/nova_settings.hpp"
#include "nova_renderer/renderpack_data.hpp"
#include "nova_renderer/rhi/command_list.hpp"
#include "nova_renderer/rhi/rhi_types.hpp"
#include "nova_renderer/util/result.hpp"
#include "nova_renderer/window.hpp"
#include "resource_binder.hpp"
namespace nova::renderer {
struct FrameContext;
struct RhiGraphicsPipelineState;
struct DeviceMemoryResource;
} // namespace nova::renderer
namespace nova::renderer::rhi {
/*!
* \brief All the GPU architectures that Nova cares about, at whatever granularity is most useful
*/
enum class DeviceArchitecture {
unknown,
/*!
* \brief The GPU was made by AMD
*/
amd,
/*!
* \brief The GPU was made by Nvidia
*/
nvidia,
/*!
* \brief The GPU was made by Intel
*/
intel,
};
/*!
* \brief Information about hte capabilities and limits of the device we're running on
*/
struct DeviceInfo {
DeviceArchitecture architecture = DeviceArchitecture::unknown;
mem::Bytes max_texture_size = 0;
bool is_uma = false;
bool supports_raytracing = false;
bool supports_mesh_shaders = false;
};
#define NUM_THREADS 1
/*!
* \brief Interface to a logical device which can render to an operating system window
*/
class RenderDevice {
public:
DeviceInfo info;
NovaSettingsAccessManager& settings;
RenderDevice(RenderDevice&& other) = delete;
RenderDevice& operator=(RenderDevice&& other) noexcept = delete;
RenderDevice(const RenderDevice& other) = delete;
RenderDevice& operator=(const RenderDevice& other) = delete;
/*!
* \brief Needed to make destructor of subclasses called
*/
virtual ~RenderDevice() = default;
virtual void set_num_renderpasses(uint32_t num_renderpasses) = 0;
/*!
* \brief Creates a renderpass from the provided data
*
* Renderpasses are created 100% upfront, meaning that the caller can't change anything about a renderpass
* after it's been created
*
* \param data The data to create a renderpass from
* \param framebuffer_size The size in pixels of the framebuffer that the renderpass will write to
* \param allocator The allocator to allocate the renderpass from
*
* \return The newly created renderpass
*/
[[nodiscard]] virtual ntl::Result<RhiRenderpass*> create_renderpass(const renderpack::RenderPassCreateInfo& data,
const glm::uvec2& framebuffer_size) = 0;
[[nodiscard]] virtual RhiFramebuffer* create_framebuffer(const RhiRenderpass* renderpass,
const std::vector<RhiImage*>& color_attachments,
const std::optional<RhiImage*> depth_attachment,
const glm::uvec2& framebuffer_size) = 0;
/*!
* \brief Creates a new surface pipeline
*
* Surface pipelines render objects using Nova's material system. The backend does a little work to set them up so they're 100%
* compatible with the material system. They currently can't access any resources outside of the material system, and _have_ to use
* the standard pipeline layout
*/
[[nodiscard]] virtual std::unique_ptr<RhiPipeline> create_surface_pipeline(const RhiGraphicsPipelineState& pipeline_state) = 0;
/*!
* \brief Creates a global pipeline
*
* Global pipelines are pipelines that aren't tied to any specific objects in the world. Global pipelines typically read render
* targets to do something like post processing
*/
[[nodiscard]] virtual std::unique_ptr<RhiPipeline> create_global_pipeline(const RhiGraphicsPipelineState& pipeline_state) = 0;
[[nodiscard]] virtual std::unique_ptr<RhiResourceBinder> create_resource_binder_for_pipeline(const RhiPipeline& pipeline) = 0;
/*!
* \brief Creates a buffer with undefined contents
*/
[[nodiscard]] virtual RhiBuffer* create_buffer(const RhiBufferCreateInfo& info) = 0;
/*!
* \brief Writes data to a buffer
*
* This method always writes the data from byte 0 to byte num_bytes. It does not let you use an offset for either reading from
* the data or writing to the buffer
*
* The CPU must be able to write directly to the buffer for this method to work. If the buffer is device local, this method will
* fail in a horrible way
*
* \param data The data to upload
* \param num_bytes The number of bytes to write
* \param buffer The buffer to write to
*/
virtual void write_data_to_buffer(const void* data, mem::Bytes num_bytes, const RhiBuffer* buffer) = 0;
/*!
* \brief Creates a new Sampler object
*/
[[nodiscard]] virtual RhiSampler* create_sampler(const RhiSamplerCreateInfo& create_info) = 0;
/*!
* \brief Creates an empty image
*
* The image will start out in the Undefined layout. You must transition it to whatever layout you want to use
*/
[[nodiscard]] virtual RhiImage* create_image(const renderpack::TextureCreateInfo& info) = 0;
[[nodiscard]] virtual RhiSemaphore* create_semaphore() = 0;
[[nodiscard]] virtual std::vector<RhiSemaphore*> create_semaphores(uint32_t num_semaphores) = 0;
[[nodiscard]] virtual RhiFence* create_fence(bool signaled) = 0;
[[nodiscard]] virtual std::vector<RhiFence*> create_fences(uint32_t num_fences, bool signaled) = 0;
/*!
* \blocks the fence until all fences are signaled
*
* Fences are waited on for an infinite time
*
* \param fences All the fences to wait for
*/
virtual void wait_for_fences(std::vector<RhiFence*> fences) = 0;
virtual void reset_fences(const std::vector<RhiFence*>& fences) = 0;
/*!
* \brief Clean up any GPU objects a Renderpass may own
*
* While Renderpasses are per-renderpack objects, and their CPU memory will be cleaned up when a new renderpack is loaded, we still
* need to clean up their GPU objects
*/
virtual void destroy_renderpass(RhiRenderpass* pass) = 0;
/*!
* \brief Clean up any GPU objects a Framebuffer may own
*
* While Framebuffers are per-renderpack objects, and their CPU memory will be cleaned up when a new renderpack is loaded, we still
* need to clean up their GPU objects
*/
virtual void destroy_framebuffer(RhiFramebuffer* framebuffer) = 0;
/*!
* \brief Clean up any GPU objects an Image may own
*
* While Images are per-renderpack objects, and their CPU memory will be cleaned up when a new renderpack is loaded, we still need
* to clean up their GPU objects
*/
virtual void destroy_texture(RhiImage* resource) = 0;
/*!
* \brief Clean up any GPU objects a Semaphores may own
*
* While Semaphores are per-renderpack objects, and their CPU memory will be cleaned up when a new renderpack is loaded, we still
* need to clean up their GPU objects
*/
virtual void destroy_semaphores(std::vector<RhiSemaphore*>& semaphores) = 0;
/*!
* \brief Clean up any GPU objects a Fence may own
*
* While Fence are per-renderpack objects, and their CPU memory will be cleaned up when a new renderpack is loaded, we still need to
* clean up their GPU objects
*/
virtual void destroy_fences(const std::vector<RhiFence*>& fences) = 0;
[[nodiscard]] Swapchain* get_swapchain() const;
/*!
* \brief Allocates a new command list that can be used from the provided thread and has the desired type
*
* Ownership of the command list is given to the caller. You can record your commands into it, then submit it
* to a queue. Submitting it gives ownership back to the render engine, and recording commands into a
* submitted command list is not supported
*
* There is one command list pool per swapchain image per thread. All the pools for one swapchain image are
* reset at the beginning of a frame that renders to that swapchain image. This means that any command list
* allocated in one frame will not be valid in the next frame. DO NOT hold on to command lists
*
* Command lists allocated by this method are returned ready to record commands into - the caller doesn't need
* to begin the command list
*/
virtual RhiRenderCommandList* create_command_list(uint32_t thread_idx,
QueueType needed_queue_type,
RhiRenderCommandList::Level level) = 0;
virtual void submit_command_list(RhiRenderCommandList* cmds,
QueueType queue,
RhiFence* fence_to_signal = nullptr,
const std::vector<RhiSemaphore*>& wait_semaphores = {},
const std::vector<RhiSemaphore*>& signal_semaphores = {}) = 0;
/*!
* \brief Performs any work that's needed to end the provided frame
*/
virtual void end_frame(FrameContext& ctx) = 0;
protected:
NovaWindow& window;
glm::uvec2 swapchain_size = {};
Swapchain* swapchain = nullptr;
/*!
* \brief Initializes the engine
* \param settings The settings passed to nova
* \param window The OS window that we'll be rendering to
*
* Intentionally does nothing. This constructor serves mostly to ensure that concrete render engines have a
* constructor that takes in some settings
*
* \attention Called by the various render engine implementations
*/
RenderDevice(NovaSettingsAccessManager& settings,
NovaWindow& window);
};
/*!
* \brief Creates a new API-agnostic render device
*
* Right now we only support creating Vulkan render devices, but in the future we might support devices for different APIs, or different
* types of hardware
*/
std::unique_ptr<RenderDevice> create_render_device(NovaSettingsAccessManager& settings, NovaWindow& window);
} // namespace nova::renderer::rhi
| 11,051
|
C++
|
.h
| 221
| 39.036199
| 140
| 0.627098
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,249
|
shader_includer.hpp
|
NovaMods_nova-renderer/include/nova_renderer/loading/shader_includer.hpp
|
#pragma once
#include "nova_renderer/util/platform.hpp"
#ifdef NOVA_WINDOWS
#include <comdef.h>
#endif
#include <dxc/dxcapi.h>
#include <rx/core/concurrency/mutex.h>
#include <unordered_map>
#include <string>
namespace nova {
namespace filesystem {
class FolderAccessorBase;
}
} // namespace nova
namespace rx {
namespace memory {
struct allocator;
}
} // namespace rx
namespace nova::renderer {
/*!
* \brief Include handler to let Nova shaders include other files
*/
class NovaDxcIncludeHandler final : public IDxcIncludeHandler {
public:
explicit NovaDxcIncludeHandler(rx::memory::allocator& allocator,
IDxcLibrary& library,
filesystem::FolderAccessorBase* folder_accessor);
virtual ~NovaDxcIncludeHandler() = default;
HRESULT QueryInterface(const REFIID class_id, void** output_object) override;
// In the Linux support library, these are implemented in IUnknown
// However, I ran into an issue where the linker couldn't find definitions for these methods. I added the definitions to the
// WinAdapter.h header in DXC, which seems to work for now
#if NOVA_WINDOWS
ULONG AddRef() override;
ULONG Release() override;
#endif
HRESULT LoadSource(LPCWSTR wide_filename, IDxcBlob** included_source) override;
private:
rx::memory::allocator& allocator;
IDxcLibrary& library;
filesystem::FolderAccessorBase* folder_accessor;
std::unordered_map<std::string, std::string> builtin_files;
#if NOVA_WINDOWS
std::mutex mtx;
ULONG num_refs = 0;
#endif
};
} // namespace nova::renderer
| 1,748
|
C++
|
.h
| 49
| 29.040816
| 132
| 0.679739
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| true
| true
| true
| false
| false
| true
| false
| false
|
753,250
|
renderpack_loading.hpp
|
NovaMods_nova-renderer/include/nova_renderer/loading/renderpack_loading.hpp
|
#pragma once
#include <vector>
#include "nova_renderer/filesystem/folder_accessor.hpp"
#include "nova_renderer/renderpack_data.hpp"
#include "nova_renderer/rhi/rhi_enums.hpp"
namespace nova::renderer::renderpack {
/*!
* \brief Loads all the data for a single renderpack
*
* This function reads the renderpack data from disk (either a folder od a zip file) and performs basic validation
* to ensure both that the data is well-formatted JSON, but also to ensure that the data has all the fields that
* Nova requires, e.g. a material must have at least one pass, a texture must have a width and a height, etc. All
* generated warnings and errors are printed to the Nova logger
*
* If the renderpack can't be loaded, an empty optional is returned
*
* Note: This function is NOT thread-safe. It should only be called for a single thread at a time
*
* \param renderpack_name The name of the renderpack to loads
* \return The renderpack, if it can be loaded, or an empty optional if it cannot
*/
RenderpackData load_renderpack_data(const std::string& renderpack_name);
std::vector<uint32_t> load_shader_file(const std::string& filename,
filesystem::FolderAccessorBase* folder_access,
rhi::ShaderStage stage,
const std::vector<std::string>& defines = {});
std::vector<uint32_t> compile_shader(const std::string& source,
rhi::ShaderStage stage,
rhi::ShaderLanguage source_language,
filesystem::FolderAccessorBase* folder_accessor = nullptr);
} // namespace nova::renderer::renderpack
| 1,812
|
C++
|
.h
| 31
| 46.580645
| 118
| 0.63964
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,252
|
virtual_filesystem.hpp
|
NovaMods_nova-renderer/include/nova_renderer/filesystem/virtual_filesystem.hpp
|
#pragma once
#include "folder_accessor.hpp"
namespace nova::filesystem {
/*!
* Nova's virtual filesystem
*
* The virtual filesystem may have one or more filesystem roots. When you request access to a file at a specific path, the virtual
* filesystem looks for it in all the filesystem roots, in their priority order
*
* Resource roots may be either the path to a zip file or the path to a filesystem directory
*/
class VirtualFilesystem {
public:
[[nodiscard]] static VirtualFilesystem* get_instance();
/*!
* \brief Adds the provided path to the resource roots that the virtual filesystem will care about
*/
void add_resource_root(const std::string& root);
/*!
* \brief Adds the provided folder accessor as an accessor for one of our root directories
*
* This method lets you add a custom folder accessor as a resource root. This allows for e.g. the Minecraft adapter to register a
* shaderpack accessor which transpiles the shaders from GLSL 120 to SPIR-V
*/
void add_resource_root(FolderAccessorBase* root_accessor);
[[nodiscard]] FolderAccessorBase* get_folder_accessor(const std::string& path) const;
private:
static VirtualFilesystem* instance;
std::vector<FolderAccessorBase*> resource_roots;
};
} // namespace nova::filesystem
| 1,422
|
C++
|
.h
| 31
| 38.806452
| 137
| 0.689531
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| true
| true
| true
| false
| false
| true
| false
| false
|
753,253
|
folder_accessor.hpp
|
NovaMods_nova-renderer/include/nova_renderer/filesystem/folder_accessor.hpp
|
#pragma once
#include <rx/core/concurrency/mutex.h>
#include <rx/core/filesystem/directory.h>
#include <unordered_map>
#include <optional>
#include <string>
#include <cstdint>
namespace nova::filesystem {
/*!
* \brief A collection of resources on the filesystem
*
* "resourcepack" isn't the exact right name here. This isn't strictly a resourcepack in the Minecraft sense - it
* can be, sure, but it can also be a pure renderpack. Ths main point is to abstract away loading resources from a
* folder or a zip file - the calling code shouldn't care how the data is stored on the filesystem
*/
class FolderAccessorBase {
public:
[[nodiscard]] static FolderAccessorBase* create(const std::string& path);
/*!
* \brief Initializes this resourcepack to load resources from the folder/zip file with the provided name
* \param folder The name of the folder or zip file to load resources from, relative to Nova's working directory
*/
explicit FolderAccessorBase(std::string folder);
FolderAccessorBase(FolderAccessorBase&& other) noexcept = default;
FolderAccessorBase& operator=(FolderAccessorBase&& other) noexcept = default;
FolderAccessorBase(const FolderAccessorBase& other) = delete;
FolderAccessorBase& operator=(const FolderAccessorBase& other) = delete;
virtual ~FolderAccessorBase() = default;
/*!
* \brief Checks if the given resource exists
*
* This function locks resource_existence_mutex, so any methods which are called by this -
* does_resource_exist_internal and does_resource_exist_in_map - MUST not try to lock resource_existence_mutex
*
* \param resource_path The path to the resource you want to know the existence of, relative to this
* resourcepack's root
* \return True if the resource exists, false if it does not
*/
[[nodiscard]] bool does_resource_exist(const std::string& resource_path);
[[nodiscard]] virtual std::vector<uint8_t> read_file(const std::string& path) = 0;
/*!
* \brief Loads the resource with the given path
* \param resource_path The path to the resource to load, relative to this resourcepack's root
* \return All the bytes in the loaded resource
*/
[[nodiscard]] std::string read_text_file(const std::string& resource_path);
/*!
* \brief Retrieves the paths of all the items in the specified folder
* \param folder The folder to get all items from
* \return A list of all the paths in the provided folder
*/
[[nodiscard]] virtual std::vector<std::string> get_all_items_in_folder(const std::string& folder) = 0;
[[nodiscard]] const std::string& get_root() const;
[[nodiscard]] virtual FolderAccessorBase* create_subfolder_accessor(const std::string& path) const = 0;
protected:
std::string root_folder;
/*!
* \brief I expect certain resources, like textures, to be requested a lot as Nova streams them in and out of
* VRAM. This map caches if a resource exists or not - if a path is absent from the map, it's never been
* requested and we don't know if it exists. However, if a path has been checked before, we can now save an IO
* call!
*/
std::unordered_map<std::string, bool> resource_existence;
std::mutex* resource_existence_mutex;
[[nodiscard]] std::optional<bool> does_resource_exist_in_map(const std::string& resource_string) const;
/*!
* \brief Like the non-internal one, but does not add the folder's root to resource_path
*
* \param resource_path The path to the resource, with `our_root` already appended
*/
[[nodiscard]] virtual bool does_resource_exist_on_filesystem(const std::string& resource_path) = 0;
};
/*!
* \brief Checks if the given path has the other path as its root
* \param path The path to check if it has the root
* \param root The potential root path of the file
* \return True if `path` has `root` as its root, false otherwise
*/
[[nodiscard]] bool has_root(const std::string& path, const std::string& root);
} // namespace nova::filesystem
| 4,363
|
C++
|
.h
| 80
| 46.6125
| 120
| 0.673388
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,254
|
result.hpp
|
NovaMods_nova-renderer/libs/ntl/include/ntl/result.hpp
|
#pragma once
#include <functional>
#include <memory>
#include <string>
// ReSharper thinks that the include isn't used, but it's used in a macro so it needs to be here
// ReSharper disable once CppUnusedIncludeDirective
#include <fmt/format.h>
#include <nova_renderer/util/utils.hpp>
namespace ntl {
struct NovaError {
std::string message = "";
std::unique_ptr<NovaError> cause;
explicit NovaError(const std::string& message);
NovaError(const std::string& message, NovaError cause);
[[nodiscard]] std::string to_string() const;
};
inline NovaError operator""_err(const char* str, const std::size_t size) { return NovaError(std::string(str, size)); }
template <typename ValueType, typename ErrorType = NovaError>
struct Result {
union {
ValueType value;
ErrorType error;
};
bool is_moved = false;
bool has_value = false;
explicit Result(ValueType&& value) : value(std::move(value)), has_value(true) {}
explicit Result(const ValueType& value) : value(value), has_value(true) {}
explicit Result(ErrorType error) : error(std::move(error)) {}
explicit Result(const Result<ValueType>& other) = delete;
Result<ValueType>& operator=(const Result<ValueType>& other) = delete;
explicit Result(Result<ValueType>&& old) noexcept {
if(old.has_value) {
value = std::move(old.value);
old.value = {};
has_value = true;
} else {
error = std::move(old.error);
old.error = {};
}
old.is_moved = true;
};
Result& operator=(Result<ValueType>&& old) noexcept {
if(old.has_value) {
value = std::move(old.value);
has_value = true;
} else {
error = std::move(old.error);
}
old.is_moved = true;
return *this;
};
~Result() {
if(!is_moved) {
if(has_value) {
value.~ValueType();
} else {
error.~ErrorType();
}
}
}
template <typename FuncType>
auto map(FuncType&& func) -> Result<decltype(func(value))> {
using RetVal = decltype(func(value));
if(has_value) {
return Result<RetVal>(func(value));
} else {
return Result<RetVal>(std::move(error));
}
}
template <typename FuncType>
auto flat_map(FuncType&& func) -> Result<decltype(func(value).value)> {
using RetVal = decltype(func(value).value);
if(has_value) {
return func(value);
} else {
return Result<RetVal>(std::move(error));
}
}
template <typename FuncType>
void if_present(FuncType&& func) {
if(has_value) {
func(value);
}
}
template <typename FuncType>
void on_error(FuntType&& error_func) const {
if(!has_value) {
error_func(error);
}
}
operator bool() const { return has_value; }
ValueType operator*() { return value; }
};
template <typename ValueType>
Result(ValueType value)->Result<ValueType>;
#define MAKE_ERROR(s, ...) ::ntl::NovaError(fmt::format(fmt(s), __VA_ARGS__).c_str())
} // namespace ntl
| 3,587
|
C++
|
.h
| 97
| 26.113402
| 122
| 0.540157
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,255
|
uniform_structs.hpp
|
NovaMods_nova-renderer/src/render_objects/uniform_structs.hpp
|
#pragma once
#include <glm/glm.hpp>
namespace nova::renderer {
struct PerFrameUniforms {
glm::mat4 gbufferModelView;
glm::mat4 gbufferModelViewInverse;
glm::mat4 gbufferPreviousModelView;
glm::mat4 gbufferProjection;
glm::mat4 gbufferProjectionInverse;
glm::mat4 gbufferPreviousProjection;
glm::mat4 shadowProjection;
glm::mat4 shadowProjectionInverse;
glm::mat4 shadowModelView;
glm::mat4 shadowModelViewInverse;
glm::vec4 entityColor;
glm::vec3 fogColor;
glm::vec3 skyColor;
glm::vec3 sunPosition;
glm::vec3 moonPosition;
glm::vec3 shadowLightPosition;
glm::vec3 upPosition;
glm::vec3 cameraPosition;
glm::vec3 previousCameraPosition;
glm::ivec2 eyeBrightness;
glm::ivec2 eyeBrightnessSmooth;
glm::ivec2 terrainTextureSize;
glm::ivec2 atlasSize;
int heldItemId;
int heldBlockLightValue;
int heldItemId2;
int heldBlockLightValue2;
int fogMode;
int worldTime;
int moonPhase;
int terrainIconSize;
int isEyeInWater;
int hideGUI;
int entityId;
int blockEntityId;
float frameTimeCounter;
float sunAngle;
float shadowAngle;
float rainStrength;
float aspectRatio;
float viewWidth;
float viewHeight;
float clip_distance_near;
float clip_distance_far;
float wetness;
float eyeAltitude;
float centerDepthSmooth;
};
} // namespace nova::renderer
| 1,622
|
C++
|
.h
| 53
| 22.320755
| 44
| 0.646458
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| true
| true
| false
| false
| true
| false
| false
|
753,256
|
vma_usage.hpp
|
NovaMods_nova-renderer/src/util/vma_usage.hpp
|
#pragma once
#undef NOMINMAX
#undef WIN32_LEAN_AND_MEAN
#undef VK_USE_PLATFORM_WIN32_KHR
#include <vk_mem_alloc.h>
| 117
|
C++
|
.h
| 5
| 22
| 32
| 0.8
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,257
|
memory_utils.hpp
|
NovaMods_nova-renderer/src/util/memory_utils.hpp
|
#pragma once
#include "nova_renderer/util/bytes.hpp"
/*!
* \brief Some useful utilities
*/
namespace nova::mem {
constexpr Bytes align(const Bytes value, const Bytes alignment) noexcept {
// TODO: Make faster
return alignment == Bytes(0) ? value : (value % alignment == Bytes(0) ? value : value + value % alignment);
}
} // namespace nova::memory
| 376
|
C++
|
.h
| 11
| 30.545455
| 115
| 0.676796
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,258
|
windows_utils.hpp
|
NovaMods_nova-renderer/src/util/windows_utils.hpp
|
/*!
* \brief Utility functions for handling Windows
*
* \author ddubois
* \date 10-Oct-18.
*/
#ifndef NOVA_RENDERER_WINDOWS_UTILS_HPP
#define NOVA_RENDERER_WINDOWS_UTILS_HPP
#include <string>
#include <string>
/*!
* \brief Converts a string to a wide string because Windows
*
* \param s The string to convert
* \return The converted string
*/
std::wstring s2ws(const std::string& s);
/*!
* \brief Retrieves the most recent Windows error and returns it to the user
* \return The error string of the most recent Windows error
*/
std::string get_last_windows_error();
#endif // NOVA_RENDERER_WINDOWS_UTILS_HPP
| 625
|
C++
|
.h
| 23
| 25.391304
| 76
| 0.747069
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,259
|
renderdoc.hpp
|
NovaMods_nova-renderer/src/debugging/renderdoc.hpp
|
/*!
* \author ddubois
* \date 24-Jan-19.
*/
#ifndef NOVA_RENDERER_RENDERDOC_HPP
#define NOVA_RENDERER_RENDERDOC_HPP
#include "nova_renderer/renderdoc_app.h"
#include "nova_renderer/util/result.hpp"
namespace nova::renderer {
ntl::Result<RENDERDOC_API_1_3_0*> load_renderdoc(const std::string& renderdoc_dll_path);
} // namespace nova::renderer
#endif // NOVA_RENDERER_RENDERDOC_HPP
| 393
|
C++
|
.h
| 12
| 30.833333
| 92
| 0.758621
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| true
| true
| true
| false
| false
| true
| false
| false
|
753,260
|
vulkan_command_list.hpp
|
NovaMods_nova-renderer/src/rhi/vulkan/vulkan_command_list.hpp
|
#pragma once
#include <vulkan/vulkan.h>
#include "nova_renderer/rhi/command_list.hpp"
#include "nova_renderer/rhi/rhi_enums.hpp"
#include "nova_renderer/rhi/rhi_types.hpp"
#include "vk_structs.hpp"
namespace nova::renderer::rhi {
class VulkanRenderDevice;
/*!
* \brief Vulkan implementation of `command_list`
*/
class VulkanRenderCommandList final : public RhiRenderCommandList {
public:
vk::CommandBuffer cmds;
VulkanRenderCommandList(vk::CommandBuffer cmds, VulkanRenderDevice& render_device, rx::memory::allocator& allocator);
~VulkanRenderCommandList() override = default;
void set_debug_name(const std::string& name) override;
void bind_material_resources(RhiBuffer* camera_buffer,
RhiBuffer* material_buffer,
RhiSampler* point_sampler,
RhiSampler* bilinear_sampler,
RhiSampler* trilinear_sampler,
const std::vector<RhiImage*>& textures,
rx::memory::allocator& allocator) override;
void bind_resources(RhiResourceBinder& binder) override;
void resource_barriers(PipelineStage stages_before_barrier,
PipelineStage stages_after_barrier,
const std::vector<RhiResourceBarrier>& barriers) override;
void copy_buffer(RhiBuffer* destination_buffer,
mem::Bytes destination_offset,
RhiBuffer* source_buffer,
mem::Bytes source_offset,
mem::Bytes num_bytes) override;
void execute_command_lists(const std::vector<RhiRenderCommandList*>& lists) override;
void set_camera(const Camera& camera) override;
void begin_renderpass(RhiRenderpass* renderpass, RhiFramebuffer* framebuffer) override;
void end_renderpass() override;
void set_material_index(uint32_t index) override;
void set_pipeline(const RhiPipeline& state) override;
void bind_descriptor_sets(const std::vector<RhiDescriptorSet*>& descriptor_sets,
const RhiPipelineInterface* pipeline_interface) override;
void bind_vertex_buffers(const std::vector<RhiBuffer*>& buffers) override;
void bind_index_buffer(const RhiBuffer* buffer, IndexType index_type) override;
void draw_indexed_mesh(uint32_t num_indices, uint32_t offset, uint32_t num_instances) override;
void set_scissor_rect(uint32_t x, uint32_t y, uint32_t width, uint32_t height) override;
void upload_data_to_image(
RhiImage* image, size_t width, size_t height, size_t bytes_per_pixel, RhiBuffer* staging_buffer, const void* data) override;
public:
/*!
* \brief Called by VulkanRenderDevice when this command list has finished execution on the GPU
*
* This method should free any transient resources that the command lists uses
*/
void cleanup_resources();
private:
VulkanRenderDevice& device;
rx::memory::allocator& allocator;
uint32_t camera_index = 0;
VulkanRenderpass* current_render_pass = nullptr;
vk::PipelineLayout current_layout = VK_NULL_HANDLE;
std::vector<vk::DescriptorSet> descriptor_sets;
};
} // namespace nova::renderer::rhi
| 3,509
|
C++
|
.h
| 63
| 42.68254
| 136
| 0.641206
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| true
| true
| true
| false
| false
| true
| false
| false
|
753,261
|
vulkan.hpp
|
NovaMods_nova-renderer/src/rhi/vulkan/vulkan.hpp
|
#pragma once
#include "nova_renderer/util/platform.hpp"
// vulkan.h is a C header, so it does C things, and my C++ linter is like "wati no be more C++" but I ain't about to
// rewrite vulkan.h
#pragma warning(push, 0)
#include <vulkan/vulkan.hpp>
// I really don't know how Khronos/anyone else gets vulkan.h to work. Doing this manually feels dirty, and not in a
// good way, but it works
#ifdef NOVA_LINUX
#define VK_USE_PLATFORM_XLIB_KHR
#include <X11/Xlib.h>
#include <vulkan/vulkan_xlib.h>
#elif defined(NOVA_WINDOWS)
#define VK_USE_PLATFORM_WIN32_KHR
#define WIN32_LEAN_AND_MEAN
#define NOMINMAX
#include <windows.h>
#include <vulkan/vulkan_win32.h>
#endif
#pragma warning(pop)
// Thank you, Windows, for being an idiot
#ifdef ERROR
#undef ERROR
#endif
#ifdef far
#undef far
#endif
#ifdef near
#undef near
#endif
// Thank you, X11, for proving that the Linux ecosystem has many of the same problems as Windows
#ifdef Always
#undef Always
#endif
#ifdef None
#undef None
#endif
#ifdef Bool
#undef Bool
#endif
#ifdef Status
#undef Status
#endif
#ifdef True
#undef True
#endif
#ifdef False
#undef False
#endif
| 1,117
|
C++
|
.h
| 49
| 21.673469
| 116
| 0.778928
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,262
|
vk_structs.hpp
|
NovaMods_nova-renderer/src/rhi/vulkan/vk_structs.hpp
|
/*!
* \brief Vulkan definition of the structs forward-declared in render_device.hpp
*/
#pragma once
#include <vk_mem_alloc.h>
#include <vulkan/vulkan.hpp>
#include "nova_renderer/rhi/pipeline_create_info.hpp"
#include "nova_renderer/rhi/rhi_types.hpp"
namespace nova::renderer::rhi {
struct VulkanDeviceMemory : RhiDeviceMemory {
vk::DeviceMemory memory;
};
struct VulkanSampler : RhiSampler {
vk::Sampler sampler;
};
struct VulkanImage : RhiImage {
vk::Image image = VK_NULL_HANDLE;
vk::ImageView image_view = VK_NULL_HANDLE;
VmaAllocation allocation{};
};
struct VulkanBuffer : RhiBuffer {
vk::Buffer buffer = VK_NULL_HANDLE;
VmaAllocation allocation{};
VmaAllocationInfo allocation_info{};
};
struct VulkanMaterialResources : RhiMaterialResources {
vk::DescriptorSet set;
};
struct VulkanPipelineLayoutInfo {
std::unordered_map<std::string, RhiResourceBindingDescription> bindings;
std::vector<vk::DescriptorSetLayout> descriptor_set_layouts;
vk::PipelineLayout layout;
std::vector<uint32_t> variable_descriptor_set_counts;
};
/*!
* \brief Represents a Vulkan pipeline
*
* Vulkan pipelines are actually compiled lazily, because they depend on the layouts of the render targets they
* write to. This struct just contains the input layout of the pipeline and the PSO create info, which we combine
* with a renderpass to compile the pipeline
*/
struct VulkanPipeline : RhiPipeline {
RhiGraphicsPipelineState state;
VulkanPipelineLayoutInfo layout;
};
struct VulkanRenderpass : RhiRenderpass {
vk::RenderPass pass = VK_NULL_HANDLE;
vk::Rect2D render_area{};
/*!
* \brief Cache of pipelines that get used in this renderpass
*
* We keep a cache of PSOs that are used by this renderpass, using the frontend name of the pipeline state as a key. If we've
* already used a pipeline state with this renderpass we just get the caches PSO, otherwise we have to create it
*/
std::unordered_map<std::string, vk::Pipeline> cached_pipelines;
};
struct VulkanFramebuffer : RhiFramebuffer {
vk::Framebuffer framebuffer = VK_NULL_HANDLE;
};
struct VulkanPipelineInterface : RhiPipelineInterface {
/*!
* \brief Renderpass for the pipeline's output layouts because why _wouldn't_ that be married to the
* renderpass itself?
*/
vk::RenderPass pass = VK_NULL_HANDLE;
};
struct VulkanDescriptorPool : RhiDescriptorPool {
vk::DescriptorPool descriptor_pool{};
};
struct VulkanDescriptorSet : RhiDescriptorSet {
vk::DescriptorSet descriptor_set;
};
struct VulkanSemaphore : RhiSemaphore {
vk::Semaphore semaphore;
};
struct VulkanFence : RhiFence {
vk::Fence fence;
};
struct VulkanGpuInfo {
vk::PhysicalDevice phys_device{};
std::vector<vk::QueueFamilyProperties> queue_family_props;
std::vector<vk::ExtensionProperties> available_extensions;
vk::SurfaceCapabilitiesKHR surface_capabilities{};
std::vector<vk::SurfaceFormatKHR> surface_formats;
vk::PhysicalDeviceProperties props{};
vk::PhysicalDeviceFeatures supported_features{};
vk::PhysicalDeviceMemoryProperties memory_properties{};
};
} // namespace nova::renderer::rhi
| 3,526
|
C++
|
.h
| 89
| 32.741573
| 133
| 0.68717
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,263
|
vulkan_swapchain.hpp
|
NovaMods_nova-renderer/src/rhi/vulkan/vulkan_swapchain.hpp
|
#pragma once
#include <glm/glm.hpp>
#include <cstdint>
#include <vulkan/vulkan.h>
#include "nova_renderer/rhi/swapchain.hpp"
namespace nova::renderer::rhi {
struct RhiFence;
struct RhiFramebuffer;
struct RhiImage;
class VulkanRenderDevice;
/*!
* \brief Deals with the swapchain, yo
*
* Methods to get he next swapchain image and whatnot are found here
*
* You can even get the framebuffer constructed from the current swapchain. Wow!
*/
class VulkanSwapchain final : public Swapchain {
public:
VulkanSwapchain(uint32_t num_swapchain_images,
VulkanRenderDevice* render_device,
glm::uvec2 window_dimensions,
const std::vector<vk::PresentModeKHR>& present_modes);
#pragma region Swapchain implementation
uint8_t acquire_next_swapchain_image(rx::memory::allocator& allocator) override;
void present(uint32_t image_idx) override;
#pragma endregion
[[nodiscard]] vk::ImageLayout get_layout(uint32_t frame_idx);
[[nodiscard]] vk::Extent2D get_swapchain_extent() const;
[[nodiscard]] vk::Format get_swapchain_format() const;
// I've had a lot of bugs with RAII so here's an explicit cleanup method
void deinit();
[[nodiscard]] uint32_t get_num_images() const;
private:
VulkanRenderDevice* render_device;
vk::SwapchainKHR swapchain{};
vk::Extent2D swapchain_extent;
vk::PresentModeKHR present_mode;
vk::Format swapchain_format;
std::vector<vk::ImageView> swapchain_image_views;
std::vector<vk::ImageLayout> swapchain_image_layouts;
uint32_t num_swapchain_images;
#pragma region Initialization
static vk::SurfaceFormatKHR choose_surface_format(const std::vector<vk::SurfaceFormatKHR>& formats);
static vk::PresentModeKHR choose_present_mode(const std::vector<vk::PresentModeKHR>& modes);
static vk::Extent2D choose_surface_extent(const vk::SurfaceCapabilitiesKHR& caps, const glm::ivec2& window_dimensions);
/*!
* \brief Creates the vk::Swapchain and saves some metadata about it
*
* \param requested_num_swapchain_images The number of swapchain images you want in the swapchain. Vulkan may or may not create more
* than you request
* \param present_modes The present modes you're wiling to use
* \param window_dimensions The dimensions of the window you'll be presenting to
*
* \post The swapchain is created
* \post swapchain_format is set to the swapchain's actual format
* \post present_mode is set to the swapchain's actual present mode
* \post swapchain_extent is set to the swapchain's actual extent
*/
void create_swapchain(uint32_t requested_num_swapchain_images,
const std::vector<vk::PresentModeKHR>& present_modes,
const glm::uvec2& window_dimensions);
/*!
* \brief Gets the images from the swapchain, so we can create framebuffers and whatnot from them
*
* \pre The swapchain exists
*/
std::vector<vk::Image> get_swapchain_images();
/*!
* \brief Creates an image view, framebuffer, and fence for a specific swapchain image
*
* \param image The swapchain image to create resources for
* \param renderpass The renderpass returned by create_dummy_renderpass
* \param swapchain_size The size of the swapchain
*
* \note This method will add to swapchain_image_views, swapchain_images, framebuffers, and fences. Its intended use is to be called
* in a loop over all swapchain images, and never called again. It mostly exists to make the constructor cleaner
*/
void create_resources_for_frame(vk::Image image, vk::RenderPass renderpass, const glm::uvec2& swapchain_size);
/*!
* \brief Transitions all the provided images into COLOR_ATTACHMENT layout
*/
void transition_swapchain_images_into_color_attachment_layout(const std::vector<vk::Image>& images) const;
/*!
* \brief Creates a dummy renderpass that only writes to one image - the swapchain. I need it so I can create framebuffers for the
* swapchain images
*/
[[nodiscard]] vk::RenderPass create_dummy_renderpass() const;
#pragma endregion
};
} // namespace nova::renderer::rhi
| 4,560
|
C++
|
.h
| 91
| 41.087912
| 140
| 0.668765
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| true
| true
| true
| false
| false
| true
| false
| false
|
753,264
|
vulkan_resource_binder.hpp
|
NovaMods_nova-renderer/src/rhi/vulkan/vulkan_resource_binder.hpp
|
#pragma once
#include <rx/core/deferred_function.h>
#include <vulkan/vulkan.hpp>
#include "nova_renderer/rhi/resource_binder.hpp"
#include "nova_renderer/rhi/rhi_types.hpp"
namespace nova {
namespace renderer {
struct RhiGraphicsPipelineState;
}
} // namespace nova
namespace nova::renderer::rhi {
class VulkanRenderDevice;
/*!
* \brief Binding resources in Vulkan!
*
* This is basically a thing wrapper around descriptor sets. The constructor creates the descriptor sets, the destructor destroys them
*/
class VulkanResourceBinder final : public RhiResourceBinder {
public:
#pragma region Lifecycle
VulkanResourceBinder(VulkanRenderDevice& device,
std::unordered_map<std::string, RhiResourceBindingDescription> bindings,
std::vector<vk::DescriptorSet> sets,
vk::PipelineLayout layout,
rx::memory::allocator& allocator);
VulkanResourceBinder(const VulkanResourceBinder& other) = delete;
VulkanResourceBinder& operator=(const VulkanResourceBinder& other) = delete;
VulkanResourceBinder(VulkanResourceBinder&& old) noexcept = default;
VulkanResourceBinder& operator=(VulkanResourceBinder&& old) noexcept = default;
~VulkanResourceBinder() override = default;
#pragma endregion
#pragma region RhiResourceBinder
void bind_image(const std::string& binding_name, RhiImage* image) override;
void bind_buffer(const std::string& binding_name, RhiBuffer* buffer) override;
void bind_sampler(const std::string& binding_name, RhiSampler* sampler) override;
void bind_image_array(const std::string& binding_name, const std::vector<RhiImage*>& images) override;
void bind_buffer_array(const std::string& binding_name, const std::vector<RhiBuffer*>& buffers) override;
void bind_sampler_array(const std::string& binding_name, const std::vector<RhiSampler*>& samplers) override;
#pragma endregion
[[nodiscard]] vk::PipelineLayout get_layout() const;
[[nodiscard]] const std::vector<vk::DescriptorSet>& get_sets();
private:
bool dirty = false;
VulkanRenderDevice* render_device;
rx::memory::allocator* allocator;
/*!
* \brief Layout for pipelines that can access this binder's resources
*/
vk::PipelineLayout layout;
/*!
* \brief Descriptor sets for this binder
*/
std::vector<vk::DescriptorSet> sets;
std::unordered_map<std::string, RhiResourceBindingDescription> bindings;
std::unordered_map<std::string, std::vector<RhiImage*>> bound_images;
std::unordered_map<std::string, std::vector<RhiBuffer*>> bound_buffers;
std::unordered_map<std::string, std::vector<RhiSampler*>> bound_samplers;
void update_all_descriptors();
};
} // namespace nova::renderer::rhi
| 2,987
|
C++
|
.h
| 60
| 41.383333
| 138
| 0.68722
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,265
|
vulkan_render_device.hpp
|
NovaMods_nova-renderer/src/rhi/vulkan/vulkan_render_device.hpp
|
#pragma once
#include <vk_mem_alloc.h>
#include <vulkan/vulkan.hpp>
#include "vk_structs.hpp"
#include "vulkan_swapchain.hpp"
namespace nova {
namespace renderer {
class NovaWindow;
class NovaSettingsAccessManager;
}
}
namespace nova::renderer::rhi {
struct VulkanDeviceInfo {
uint64_t max_uniform_buffer_size = 0;
};
struct VulkanInputAssemblerLayout {
std::vector<vk::VertexInputAttributeDescription> attributes;
std::vector<vk::VertexInputBindingDescription> bindings;
};
/*!
* \brief Task that should be executed when a fence has been signaled
*/
struct FencedTask {
vk::Fence fence;
std::function<void()> work_to_perform;
void operator()() const;
};
/*!
* \brief Vulkan implementation of a render engine
*/
class VulkanRenderDevice final {
public:
vk::AllocationCallbacks vk_internal_allocator;
// Global Vulkan objects
vk::Instance instance;
vk::Device device;
vk::SurfaceKHR surface{};
uint32_t graphics_family_index;
uint32_t compute_family_index;
uint32_t transfer_family_index;
vk::Queue graphics_queue;
vk::Queue compute_queue;
vk::Queue copy_queue;
// Info about the hardware
VulkanGpuInfo gpu;
uint32_t cur_frame_idx;
/*!
* \brief All the push constants in the standard pipeline layout
*/
std::vector<vk::PushConstantRange> standard_push_constants;
std::unordered_map<std::string, RhiResourceBindingDescription> standard_layout_bindings;
/*!
* \brief Layout for the standard descriptor set
*/
vk::DescriptorSetLayout standard_set_layout;
/*!
* \brief The pipeline layout that all pipelines use
*/
vk::PipelineLayout standard_pipeline_layout;
vk::DescriptorPool standard_descriptor_set_pool;
/*!
* \brief The descriptor set that binds to the standard pipeline layout
*/
std::vector<vk::DescriptorSet> standard_descriptor_sets;
// Debugging things
PFN_vkCreateDebugUtilsMessengerEXT vkCreateDebugUtilsMessengerEXT = nullptr;
PFN_vkDestroyDebugReportCallbackEXT vkDestroyDebugReportCallbackEXT = nullptr;
PFN_vkSetDebugUtilsObjectNameEXT vkSetDebugUtilsObjectNameEXT = nullptr;
VulkanRenderDevice(NovaSettingsAccessManager& settings, NovaWindow& window);
VulkanRenderDevice(VulkanRenderDevice&& old) noexcept = delete;
VulkanRenderDevice& operator=(VulkanRenderDevice&& old) noexcept = delete;
VulkanRenderDevice(const VulkanRenderDevice& other) = delete;
VulkanRenderDevice& operator=(const VulkanRenderDevice& other) = delete;
~VulkanRenderDevice() = default;
#pragma region Render engine interface
void set_num_renderpasses(uint32_t num_renderpasses) override;
ntl::Result<RhiRenderpass*> create_renderpass(const renderpack::RenderPassCreateInfo& data,
const glm::uvec2& framebuffer_size) override;
RhiFramebuffer* create_framebuffer(const RhiRenderpass* renderpass,
const std::vector<RhiImage*>& color_attachments,
const std::optional<RhiImage*> depth_attachment,
const glm::uvec2& framebuffer_size) override;
std::unique_ptr<RhiPipeline> create_surface_pipeline(const RhiGraphicsPipelineState& pipeline_state) override;
std::unique_ptr<RhiPipeline> create_global_pipeline(const RhiGraphicsPipelineState& pipeline_state) override;
std::unique_ptr<RhiResourceBinder> create_resource_binder_for_pipeline(const RhiPipeline& pipeline) override;
RhiBuffer* create_buffer(const RhiBufferCreateInfo& info) override;
void write_data_to_buffer(const void* data, mem::Bytes num_bytes, const RhiBuffer* buffer) override;
RhiSampler* create_sampler(const RhiSamplerCreateInfo& create_info) override;
RhiImage* create_image(const renderpack::TextureCreateInfo& info) override;
RhiSemaphore* create_semaphore() override;
std::vector<RhiSemaphore*> create_semaphores(uint32_t num_semaphores) override;
RhiFence* create_fence(bool signaled) override;
std::vector<RhiFence*> create_fences(uint32_t num_fences, bool signaled) override;
void wait_for_fences(std::vector<RhiFence*> fences) override;
void reset_fences(const std::vector<RhiFence*>& fences) override;
void destroy_renderpass(RhiRenderpass* pass) override;
void destroy_framebuffer(RhiFramebuffer* framebuffer) override;
void destroy_texture(RhiImage* resource) override;
void destroy_semaphores(std::vector<RhiSemaphore*>& semaphores) override;
void destroy_fences(const std::vector<RhiFence*>& fences) override;
RhiRenderCommandList* create_command_list(uint32_t thread_idx,
QueueType needed_queue_type,
RhiRenderCommandList::Level level) override;
void submit_command_list(RhiRenderCommandList* cmds,
QueueType queue,
RhiFence* fence_to_signal = nullptr,
const std::vector<RhiSemaphore*>& wait_semaphores = {},
const std::vector<RhiSemaphore*>& signal_semaphores = {}) override;
void end_frame(FrameContext& ctx) override;
#pragma endregion
public:
[[nodiscard]] uint32_t get_queue_family_index(QueueType type) const;
VulkanPipelineLayoutInfo create_pipeline_layout(const RhiGraphicsPipelineState& state);
/*!
* \brief Creates a new PSO
*
* \param state Pipeline state to bake into the PSO
* \param renderpass The render pas that this pipeline will be used with
* \param allocator Allocator to use for any needed memory
*
* \return The new PSO
*/
[[nodiscard]] ntl::Result<vk::Pipeline> compile_pipeline_state(const VulkanPipeline& state, const VulkanRenderpass& renderpass);
[[nodiscard]] std::optional<vk::DescriptorPool> create_descriptor_pool(
const std::unordered_map<DescriptorType, uint32_t>& descriptor_capacity);
/*!
* \brief Gets the next available descriptor set for the standard pipeline layout
*
* If there are no free descriptor sets for the standard pipeline layout, this method creates a new one
*/
[[nodiscard]] vk::DescriptorSet get_next_standard_descriptor_set();
/*!
* \brief Lets the render device know that all the provided descriptor sets are no longer in use by the GPU and can be used for
* whatever
*/
void return_standard_descriptor_sets(const std::vector<vk::DescriptorSet>& sets);
std::vector<vk::DescriptorSet> create_descriptors(const std::vector<vk::DescriptorSetLayout>& descriptor_set_layouts,
const std::vector<uint32_t>& variable_descriptor_max_counts) const;
[[nodiscard]] vk::Fence get_next_submission_fence();
protected:
void create_surface();
private:
VulkanDeviceInfo vk_info;
VmaAllocator vma;
/*!
* The index in the vector is the thread index, the key in the map is the queue family index
*/
std::vector<std::unordered_map<uint32_t, vk::CommandPool>> command_pools_by_thread_idx;
std::vector<FencedTask> fenced_tasks;
std::vector<vk::Fence> submission_fences;
#pragma region Initialization
std::vector<const char*> enabled_layer_names;
void create_instance();
void enable_debug_output();
/*!
* \brief Copies device information, such as hardware limits and memory architecture, to the API-agnostic DeviceInfo struct
*
* This allows things outside of a render engine to make decisions based on GPU information
*/
void save_device_info();
void initialize_vma();
void create_device_and_queues();
bool does_device_support_extensions(vk::PhysicalDevice device, const std::vector<char*>& required_device_extensions);
void create_swapchain();
void create_per_thread_command_pools();
void create_standard_pipeline_layout();
[[nodiscard]] std::unordered_map<uint32_t, vk::CommandPool> make_new_command_pools() const;
#pragma endregion
#pragma region Helpers
enum class MemorySearchMode { Exact, Fuzzy };
/*!
* \brief Finds the index of the memory type with the desired flags
*
* \param[in] search_flags Flags to search for
* \param[in] search_mode What search mode to use. If search_mode is MemorySearchMode::Exact, this method will only return the index
* of a memory type whose flags exactly match search_flags. If search_mode is MemorySearchMode::Fuzzy, this method will return the
* index of the first memory type whose flags include search_flags
*
* \return The index of the memory type with the desired flags, or VK_MAX_MEMORY_TYPES if no memory types match the given flags
*/
[[nodiscard]] uint32_t find_memory_type_with_flags(uint32_t search_flags,
MemorySearchMode search_mode = MemorySearchMode::Fuzzy) const;
[[nodiscard]] std::optional<vk::ShaderModule> create_shader_module(const std::vector<uint32_t>& spirv) const;
/*!
* \brief Gets the image view associated with the given image
*
* Nova simplifies things a lot and only has one image view for each image. This is maintained within the
* Vulkan backend, since neither DX12 nor OpenGL have a direct equivalent. I may or may not emulate image views
* for those APIs if the demand is there, but idk
*
* The method checks an internal hash map. If there's already an image view for the given image then great,
* otherwise one is created on-demand
*/
[[nodiscard]] static vk::ImageView image_view_for_image(const RhiImage* image);
[[nodiscard]] static vk::CommandBufferLevel to_vk_command_buffer_level(RhiRenderCommandList::Level level);
[[nodiscard]] static VulkanInputAssemblerLayout get_input_assembler_setup(const std::vector<RhiVertexField>& vertex_fields);
#pragma endregion
#pragma region Debugging
vk::DebugUtilsMessengerEXT debug_callback{};
static VKAPI_ATTR vk::Bool32 VKAPI_CALL debug_report_callback(vk::DebugUtilsMessageSeverityFlagBitsEXT message_severity,
vk::DebugUtilsMessageTypeFlagsEXT message_types,
const vk::DebugUtilsMessengerCallbackDataEXT* callback_data,
void* render_device);
#pragma endregion
};
} // namespace nova::renderer::rhi
| 11,401
|
C++
|
.h
| 206
| 43.728155
| 140
| 0.656287
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| true
| true
| true
| false
| false
| true
| false
| false
|
753,266
|
vulkan_utils.hpp
|
NovaMods_nova-renderer/src/rhi/vulkan/vulkan_utils.hpp
|
#pragma once
#include "nova_renderer/renderpack_data.hpp"
#include "nova_renderer/rhi/command_list.hpp"
// idk maybe this header is included in places that already include Vulkan? Either way I want this include here and not anywhere else
// ReSharper disable once CppUnusedIncludeDirective
#include "vulkan.hpp"
namespace nova {
namespace renderer {
enum class BlendOp;
enum class BlendFactor;
enum class StencilOp;
enum class CompareOp;
} // namespace renderer
} // namespace nova
namespace nova::renderer::rhi {
class VulkanRenderDevice;
vk::ImageLayout to_vk_image_layout(ResourceState layout);
vk::AccessFlags to_vk_access_flags(ResourceAccess access);
vk::PrimitiveTopology to_primitive_topology(renderpack::RPPrimitiveTopology topology);
vk::BlendFactor to_blend_factor(BlendFactor factor);
vk::BlendOp to_blend_op(const BlendOp blend_op);
vk::CompareOp to_compare_op(CompareOp compare_op);
vk::StencilOp to_stencil_op(StencilOp stencil_op);
vk::Format to_vk_format(PixelFormat format);
vk::Filter to_vk_filter(TextureFilter filter);
vk::SamplerAddressMode to_vk_address_mode(TextureCoordWrapMode wrap_mode);
vk::DescriptorType to_vk_descriptor_type(DescriptorType type);
vk::ShaderStageFlags to_vk_shader_stage_flags(ShaderStage flags);
std::string to_string(vk::Result result);
std::string to_string(vk::ObjectType obj_type);
[[nodiscard]] vk::Format to_vk_vertex_format(VertexFieldFormat field);
[[nodiscard]] std::vector<vk::DescriptorSetLayout> create_descriptor_set_layouts(
const std::unordered_map<std::string, RhiResourceBindingDescription>& all_bindings, VulkanRenderDevice& render_device);
bool operator&(const ShaderStage& lhs, const ShaderStage& rhs);
} // namespace nova::renderer::rhi
// Only validate errors in debug mode
// Release mode needs to be fast A F
#ifdef NOVA_DEBUG
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
#define NOVA_CHECK_RESULT(expr) \
{ \
const vk::Result result = (expr); \
if(result != VK_SUCCESS) { \
logger->error("{}:{}=>{}={}", __FILE__, __LINE__, #expr, to_string(result)); \
} \
}
#else
#define NOVA_CHECK_RESULT(expr) expr
#endif
| 2,893
|
C++
|
.h
| 49
| 54.265306
| 142
| 0.570315
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,267
|
vulkan_render_backend.hpp
|
NovaMods_nova-renderer/src/render/backend/vulkan_render_backend.hpp
|
#pragma once
#include <cstdint>
#include <vector>
#include "../../rhi/vulkan/vulkan.hpp"
namespace nova::renderer {
/*!
* \brief Abstraction over a rendering device that implements Vulkan
*
* Handles things like GPU/CPU synchronization, memory management, and submitting commands to the actual device
*
* Note that, although the device may be working on commands for multiple frames worth of work, this abstraction
* can only be used to record commands for a single frame at a time
*/
class VulkanBackend {
public:
/*!
* \brief Initialize the Vulkan instance and device, created the command queues and memory allocators, registers debug helpers
*/
explicit VulkanBackend(HWND window_handle);
/*!
* \brief Waits for the GPU to finish all in-flight frames, then destroys all resources and generally cleans up
*/
~VulkanBackend();
/*!
* \brief Advances the internal frame index, waits for the previous frame of that index to complete, frees
* resources that are scheduled for destruction, all that jazz
*/
void begin_frame();
/*!
* \brief Retrieves a command buffer that may be used for the current frame
*/
[[nodiscard]] vk::CommandBuffer get_command_buffer();
/*!
* \brief Batches the provided command list for execution when the current frame ends
*/
void submit_command_buffer(vk::CommandBuffer buffer);
/*!
* \brief Submits all the batched command lists, then flips the swapchain
*/
void end_frame();
[[nodiscard]] vk::Instance get_instance() const;
[[nodiscard]] vk::Device get_device() const;
private:
/*!
* \brief Maximum number of frames we can submit to the GPU before waiting for any
*/
const static uint32_t num_gpu_frames{3};
std::vector<const char*> enabled_layer_names;
vk::Instance instance;
VkSurfaceKHR surface;
vk::Device device;
vk::Queue graphics_queue;
/*!
*\brief Semaphores used to synchronize the GPU frames
*/
std::array<vk::Semaphore, num_gpu_frames> frame_fences;
/*!
* \brief Command buffers to submit at the end of the current frame
*/
std::vector<vk::CommandBuffer> batched_command_buffers;
uint32_t frame_idx{0};
#pragma region Init
void create_surface(HWND window_handle);
#pragma endregion
#pragma region Debug
static VKAPI_ATTR VkBool32 VKAPI_CALL debug_report_callback(VkDebugUtilsMessageSeverityFlagBitsEXT message_severity,
VkDebugUtilsMessageTypeFlagsEXT message_types,
const VkDebugUtilsMessengerCallbackDataEXT* callback_data,
void* render_device);
#pragma endregion
};
} // namespace nova::renderer
| 3,113
|
C++
|
.h
| 72
| 32.861111
| 134
| 0.62053
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,268
|
json_utils.hpp
|
NovaMods_nova-renderer/src/loading/json_utils.hpp
|
#pragma once
#include <rx/core/json.h>
#include <optional>
namespace nova::renderer {
/*!
* \brief Retrieves an individual value from the provided JSON structure
* \tparam ValType The type of the value to retrieve
* \param json_obj The JSON object where your value might be found
* \param key The name of the value
* \return An optional that contains the value, if it can be found, or an empty optional if the value cannot be found
*
* \note Only enabled if we're not getting a string. Use `get_json_string` to get a string
*/
template <typename ValType>
std::optional<ValType> get_json_opt(const nlohmann::json& json_obj, const char* key);
/*!
* \brief Retrieves an individual value from the provided JSON structure
* \tparam ValType The type of the value to retrieve
* \param json_obj The JSON object where your value might be found
* \param key The name of the value
* \param deserializer A function that deserializes the JSON value
* \return An optional that contains the value, if it can be found, or an empty optional if the value cannot be found
*/
template <typename ValType, typename FuncType>
std::optional<ValType> get_json_opt(const nlohmann::json& json_obj, const char* key, FuncType&& deserializer);
/*!
* \brief Retrieves an individual value from the provided JSON structure
* \tparam ValType The type of the value to retrieve
* \param json_obj The JSON object where your value might be found
* \param key The name of the value
* \param default_value The value to use if the requested key isn't present in the JSON
* \return The value from the JSON if the key exists in the JSON, or `default_value` if it does not
*/
template <typename ValType>
ValType get_json_value(const nlohmann::json& json_obj, const char* key, ValType default_value);
/*!
* \brief Retrieves an individual value from the provided JSON structure
* \tparam ValType The type of the value to retrieve
* \param json_obj The JSON object where your value might be found
* \param key The name of the value
* \param default_value The value to use if the requested key isn't present in the JSON
* \param deserializer A function that deserializes the JSON value
* \return The value from the JSON if the key exists in the JSON, or `default_value` if it does not
*/
template <typename ValType, typename FuncType>
ValType get_json_value(const nlohmann::json& json_obj, const char* key, ValType default_value, FuncType&& deserializer);
/*!
* \brief Retrieves an array of values from the provided JSON object
* \tparam ValType The type fo the values in the array
* \param json_obj The JSON object where the values might be found
* \param key The name fo the array that has your values
* \return An array of values, if the value can be found, or an empty vector if the values cannot be found
*/
template <typename ValType>
std::vector<ValType> get_json_array(const nlohmann::json& json_obj, const char* key);
/*!
* \brief Retrieves an array of values from the provided JSON object
* \tparam ValType The type fo the values in the array
* \param json_obj The JSON object where the values might be found
* \param key The name fo the array that has your values
* \param deserializer A function that can deserialize each value from JSON
* \return An array of values, if the value can be found, or an empty vector if the values cannot be found
*/
template <typename ValType, typename FuncType>
std::vector<ValType> get_json_array(const nlohmann::json& json_obj, const char* key, FuncType&& deserializer);
template <typename ValType>
std::optional<ValType> get_json_opt(const nlohmann::json& json_obj, const char* key) {
const auto& val_json = json_obj[key];
if(val_json) {
return val_json.decode<ValType>({});
}
return rx::nullopt;
}
template <typename ValType, typename FuncType>
std::optional<ValType> get_json_opt(const nlohmann::json& json_obj, const char* key, FuncType&& deserializer) {
const auto& val_json = json_obj[key];
if(val_json) {
return deserializer(val_json);
}
return rx::nullopt;
}
template <typename ValType>
ValType get_json_value(const nlohmann::json& json_obj, const char* key, ValType default_value) {
const auto& json_val = json_obj[key];
if(json_val) {
return json_val.decode<ValType>(default_value);
}
return default_value;
}
template <typename ValType, typename FuncType>
ValType get_json_value(const nlohmann::json& json_obj, const char* key, ValType default_value, FuncType&& deserializer) {
const auto& val = json_obj[key];
if(val) {
ValType value = deserializer(val);
return value;
}
return default_value;
}
template <typename ValType>
std::vector<ValType> get_json_array(const nlohmann::json& json_obj, const char* key) {
const auto& arr = json_obj[key];
if(arr && !arr.is_empty()) {
std::vector<ValType> vec;
vec.reserve(arr.size());
for(uint32_t i = 0; i < arr.size(); i++) {
vec.push_back(arr[i].decode<ValType>({}));
}
return vec;
}
return {};
}
template <typename ValType, typename FuncType>
std::vector<ValType> get_json_array(const nlohmann::json& json_obj, const char* key, FuncType&& deserializer) {
const auto& arr = json_obj[key];
if(arr && !arr.is_empty()) {
std::vector<ValType> vec;
vec.reserve(arr.size());
for(uint32_t i = 0; i < arr.size(); i++) {
vec.push_back(deserializer(arr[i]));
}
return vec;
}
return {};
}
} // namespace nova::renderer
| 6,045
|
C++
|
.h
| 125
| 41.072
| 125
| 0.663727
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,269
|
render_graph_builder.hpp
|
NovaMods_nova-renderer/src/loading/renderpack/render_graph_builder.hpp
|
#pragma once
#include "nova_renderer/renderpack_data.hpp"
#include "nova_renderer/util/result.hpp"
namespace nova::renderer::renderpack {
struct Range {
uint32_t first_write_pass = ~0U;
uint32_t last_write_pass = 0;
uint32_t first_read_pass = ~0U;
uint32_t last_read_pass = 0;
[[nodiscard]] bool has_writer() const;
[[nodiscard]] bool has_reader() const;
[[nodiscard]] bool is_used() const;
[[nodiscard]] bool can_alias() const;
[[nodiscard]] unsigned last_used_pass() const;
[[nodiscard]] unsigned first_used_pass() const;
[[nodiscard]] bool is_disjoint_with(const Range& other) const;
};
/*!
* \brief Orders the provided render passes to satisfy both their implicit and explicit dependencies
*
* \param passes A map from pass name to pass of all the passes to order
* \return The names of the passes in submission order
*/
ntl::Result<std::vector<RenderPassCreateInfo>> order_passes(const std::vector<RenderPassCreateInfo>& passes);
/*!
* \brief Puts textures in usage order and determines which have overlapping usage ranges
*
* Knowing which textures have an overlapping usage range is super important cause if their ranges overlap, they can't be aliased
*
* \param passes All the passes in the current frame graph
* \param resource_used_range A map to hold the usage ranges of each texture
* \param resources_in_order A vector to hold the textures in usage order
*/
void determine_usage_order_of_textures(const std::vector<RenderPassCreateInfo>& passes,
std::unordered_map<std::string, Range>& resource_used_range,
std::vector<std::string>& resources_in_order);
/*!
* \brief Determines which textures can be aliased to which other textures
*
* \param textures All the dynamic textures that this frame graph needs
* \param resource_used_range The range of passes where each texture is used
* \param resources_in_order The dynamic textures in usage order
*
* \return A map from texture name to the name of the texture the first texture can be aliased with
*/
std::unordered_map<std::string, std::string> determine_aliasing_of_textures(const std::unordered_map<std::string, TextureCreateInfo>& textures,
const std::unordered_map<std::string, Range>& resource_used_range,
const std::vector<std::string>& resources_in_order);
} // namespace nova::renderer::renderpack
| 2,725
|
C++
|
.h
| 49
| 45.285714
| 147
| 0.647898
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| true
| true
| true
| false
| false
| true
| false
| false
|
753,270
|
renderpack_validator.hpp
|
NovaMods_nova-renderer/src/loading/renderpack/renderpack_validator.hpp
|
#pragma once
#include <rx/core/json.h>
#include <string>
#include <vector>
namespace nova::renderer::renderpack {
struct ValidationReport {
std::vector<std::string> warnings;
std::vector<std::string> errors;
void merge_in(const ValidationReport& other);
};
void print(const ValidationReport& report);
/*!
* \brief Checks if the pipeline_json has all the properties that a full pipeline should have, printing a debug
* message if it doesn't. Missing fields that can be filled in with a default value are filled in (pipeline_json is
* modified) and a debug message is logged, but if a missing field is required then a `json_validation_error`
* exception is thrown
*
* \param pipeline_json The JSON pipeline to validate and possibly fill in
*/
ValidationReport validate_graphics_pipeline(nlohmann::json& pipeline_json);
/*!
* \brief Validates the dynamic resources that the given JSON file contains. Any warnings about missing fields with
* default values are sent to the warning logger, while information about missing required fields is sent out as an
* exception
*
* \param resources_json The JSON dynamic resources to validate
*/
ValidationReport validate_renderpack_resources_data(nlohmann::json& resources_json);
/*!
* \brief Validates a single texture's JSON, printing warnings to the warning logger and throwing an exception with
* any missing required fields
*
* \param texture_json The JSON of the texture to validate
*/
ValidationReport validate_texture_data(const nlohmann::json& texture_json);
/*!
* \brief Validates a texture format, printing warnings to the warning logger and throwing an exception with any
* missing required fields
*
* \param format_json The JSON to validate
*/
ValidationReport validate_texture_format(const nlohmann::json& format_json, const std::string& texture_name);
/*!
* \brief Validates that the provided JSON has all the fields it needed. Warnings about optional fields are
* printed to the warning logger, errors are thrown together in an exception
*
* \param sampler_json The JSON to validate
*/
ValidationReport validate_sampler_data(const nlohmann::json& sampler_json);
/*!
* \brief Validates that the provided JSON for has all the fields it needs. Optional fields that are missing
* generate a warning, required fields that are missing generate an exception
*
* \param material_json The JSON to validate
*
* \return A list of all errors encountered when validating this material
*/
ValidationReport validate_material(const nlohmann::json& material_json);
} // namespace nova::renderer::renderpack
| 2,811
|
C++
|
.h
| 59
| 42.084746
| 119
| 0.723924
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| true
| true
| true
| false
| false
| true
| false
| false
|
753,271
|
console_log_stream.hpp
|
NovaMods_nova-renderer/src/logging/console_log_stream.hpp
|
#pragma once
#include <rx/core/stream.h>
#include <string>
namespace nova {
class StdoutStream final : public rx::stream {
public:
StdoutStream();
virtual ~StdoutStream() = default;
rx_u64 on_write(const uint8_t* data, rx_u64 size) override;
bool on_flush() override;
const std::string& name() const& override;
private:
std::string my_name = "NovaConsoleLogStream";
};
} // namespace nova
| 468
|
C++
|
.h
| 15
| 24.933333
| 67
| 0.650685
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,272
|
material_data_buffer.hpp
|
NovaMods_nova-renderer/src/renderer/material_data_buffer.hpp
|
#pragma once
#include <cstdint>
#include <vector>
namespace nova::renderer {
/*!
* \brief Array that can hold data of multiple types of multiple sizes
*
* This array uses a linear allocator internally
*/
class MaterialDataBuffer {
public:
explicit MaterialDataBuffer(size_t num_bytes);
MaterialDataBuffer(const MaterialDataBuffer& other) = delete;
MaterialDataBuffer& operator=(const MaterialDataBuffer& other) = delete;
MaterialDataBuffer(MaterialDataBuffer&& old) noexcept = default;
MaterialDataBuffer& operator=(MaterialDataBuffer&& old) noexcept = default;
~MaterialDataBuffer() = default;
/*!
* \brief Provides access to an element in this array
*
* This operator performs no checks that the requested element is of the requested type. I recommend that you only use indices you
* get from `get_next_free_index` with the same type as what you're requesting
*/
template <typename MaterialDataStruct>
[[nodiscard]] MaterialDataStruct& at(uint32_t idx);
/*!
* \brief Provides access to an element in this array
*
* This operator performs no checks that the requested element is of the requested type. I recommend that you only use indices you
* get from `get_next_free_index` with the same type as what you're requesting
*/
template <typename MaterialDataStruct>
[[nodiscard]] const MaterialDataStruct& at(uint32_t idx) const;
/*!
* \brief Gets the index of the next free element of the requested type
*/
template <typename MaterialDataStruct>
[[nodiscard]] uint32_t get_next_free_index();
[[nodiscard]] uint8_t* data() const;
private:
std::vector<uint8_t> buffer;
uint32_t num_allocated_bytes = 0;
};
template <typename MaterialDataStruct>
MaterialDataStruct& MaterialDataBuffer::at(uint32_t idx) {
return reinterpret_cast<MaterialDataStruct*>(buffer.data)[idx];
}
template <typename MaterialDataStruct>
const MaterialDataStruct& MaterialDataBuffer::at(uint32_t idx) const {
return reinterpret_cast<MaterialDataStruct*>(buffer.data)[idx];
}
template <typename MaterialDataStruct>
uint32_t MaterialDataBuffer::get_next_free_index() {
constexpr uint32_t struct_size = sizeof(MaterialDataStruct);
// Here's a Al Gore rhythm for your soul
// This class is a party. The idea is that it's an array of any type you want. You reinterpret the buffer pointer to the type you
// want at runtime
//
// So like if you want to store five floats, one float3, and a float4x4 all in the same buffer... you can do that, and they each get
// an index. They get an index as if the buffer was an array of their type? So when we find a place to put them in the buffer - aka
// in this method - we have to align the number of already-allocated bytes to the size of the struct of the new material, rounding
// up. This means that we end up with a lot of empty bytes here any there. Ideally we can find a way to force alignment on material
// structs and avoid wasting _too_ much data, but who knows
// Intentionally using integer division
const auto new_idx = (num_allocated_bytes / struct_size) + 1;
num_allocated_bytes = struct_size * new_idx;
return new_idx;
}
} // namespace nova::renderer
| 3,552
|
C++
|
.h
| 69
| 43.536232
| 140
| 0.682081
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| true
| true
| true
| false
| false
| true
| false
| false
|
753,273
|
pipeline_reflection.hpp
|
NovaMods_nova-renderer/src/renderer/pipeline_reflection.hpp
|
#pragma once
#include <unordered_map>
#include <string>
#include "nova_renderer/rhi/pipeline_create_info.hpp"
#include "nova_renderer/rhi/rhi_enums.hpp"
#include "nova_renderer/rhi/rhi_types.hpp"
namespace spirv_cross {
struct Resource;
class Compiler;
} // namespace spirv_cross
namespace nova::renderer {
std::unordered_map<std::string, rhi::RhiResourceBindingDescription> get_all_descriptors(const RhiGraphicsPipelineState& pipeline_state);
void get_shader_module_descriptors(const std::vector<uint32_t>& spirv,
rhi::ShaderStage shader_stage,
std::unordered_map<std::string, rhi::RhiResourceBindingDescription>& bindings);
void add_resource_to_bindings(std::unordered_map<std::string, rhi::RhiResourceBindingDescription>& bindings,
rhi::ShaderStage shader_stage,
const spirv_cross::Compiler& shader_compiler,
const spirv_cross::Resource& resource,
rhi::DescriptorType type);
} // namespace nova::renderer
| 1,147
|
C++
|
.h
| 21
| 42.238095
| 140
| 0.643176
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| true
| true
| false
| false
| true
| false
| false
|
753,274
|
visibility_cache.hpp
|
NovaMods_nova-renderer/src/renderer/visibility_cache.hpp
|
#pragma once
#include <unordered_map>
#include "nova_renderer/camera.hpp"
#include "nova_renderer/renderables.hpp"
namespace nova::renderer {
/*!
* \brief Cache of cache of which objects are visible to which cameras
*
* Implements frustum culling. Will eventually use hardware occlusion queries
*
* This class caches visibility per camera. When camera parameters change for a camera at a given index, the cache for that camera is
* invalidated. Future occlusion queries will have to re-calculate themselves
*/
class VisibilityCache {
public:
explicit VisibilityCache(rx::memory::allocator& allocator);
VisibilityCache(const VisibilityCache& other) = delete;
VisibilityCache& operator=(const VisibilityCache& other) = delete;
VisibilityCache(VisibilityCache&& old) noexcept = default;
VisibilityCache& operator=(VisibilityCache&& old) noexcept = default;
~VisibilityCache() = default;
/*!
* \brief Sets the visibility of a renderable in the visibility cache
*
* \param camera The camera to set visibility for
* \param renderable The renderable to set visibility for
* \param visibility Whether or not the renderable is visible
*/
void set_renderable_visibility(const Camera& camera, RenderableId renderable, bool visibility);
/*!
* \brief Checks if a given renderable is visible to a given camera
*
* This method first checks if the visibility cache for this camera is up-to-date. If so, it fetches the visibility result for the
* renderable and returns is directly. However, if the visibility cache is _not_ up-to-date, this method invalidates the cache and
* recalculates visibility for this renderable
*
* If the cache is up-to-date but doesn't have this renderable in it, this method will calculate visibility and save it to the
* cache, then return the result
*/
[[nodiscard]] bool is_renderable_visible_to_camera(RenderableId renderable, const Camera& camera);
private:
/*!
* \brief A map of the camera parameters that were most recently seen for a given camera
*
* If we see see a camera that has different parameters than what's in this cache, the visibility results for that camera are
* invalidated
*/
std::unordered_map<CameraIndex, Camera> cached_cameras;
/*!
* \brief Cache of which renderables are visible for a given camera
*/
std::unordered_map<CameraIndex, std::unordered_map<RenderableId, bool>> visibility_cache;
};
} // namespace nova::renderer
| 2,739
|
C++
|
.h
| 54
| 42.777778
| 138
| 0.688972
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,275
|
backbuffer_output_pass.hpp
|
NovaMods_nova-renderer/src/renderer/builtin/backbuffer_output_pass.hpp
|
#pragma once
#include "nova_renderer/rendergraph.hpp"
namespace nova::renderer {
class BackbufferOutputRenderpass final : public GlobalRenderpass {
public:
explicit BackbufferOutputRenderpass(rhi::RhiImage* ui_output,
rhi::RhiImage* scene_output,
rhi::RhiSampler* point_sampler,
std::unique_ptr<rhi::RhiPipeline> pipeline,
MeshId mesh,
rhi::RenderDevice& device);
static const renderpack::RenderPassCreateInfo& get_create_info();
protected:
void record_post_renderpass_barriers(rhi::RhiRenderCommandList& cmds, FrameContext& ctx) const override;
private:
std::vector<rhi::RhiResourceBarrier> post_pass_barriers;
};
} // namespace nova::renderer
| 921
|
C++
|
.h
| 18
| 34.833333
| 112
| 0.575083
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,276
|
zip_folder_accessor.hpp
|
NovaMods_nova-renderer/src/filesystem/zip_folder_accessor.hpp
|
#pragma once
#include <miniz.h>
#include "nova_renderer/filesystem/folder_accessor.hpp"
namespace nova::filesystem {
struct FileTreeNode {
rx::string name;
rx::vector<FileTreeNode> children;
FileTreeNode* parent = nullptr;
[[nodiscard]] rx::string get_full_path() const;
};
/*!
* \brief Allows access to a zip folder
*/
class ZipFolderAccessor : public FolderAccessorBase {
public:
explicit ZipFolderAccessor(const rx::string& folder);
ZipFolderAccessor(const rx::string& folder, mz_zip_archive archive);
ZipFolderAccessor(ZipFolderAccessor&& other) noexcept = default;
ZipFolderAccessor& operator=(ZipFolderAccessor&& other) noexcept = default;
ZipFolderAccessor(const ZipFolderAccessor& other) = delete;
ZipFolderAccessor& operator=(const ZipFolderAccessor& other) = delete;
~ZipFolderAccessor() override;
rx::vector<uint8_t> read_file(const rx::string& path) override final;
rx::vector<rx::string> get_all_items_in_folder(const rx::string& folder) override final;
[[nodiscard]] FolderAccessorBase* create_subfolder_accessor(const rx::string& path) const override;
private:
/*!
* \brief Map from filename to its index in the zip folder. Miniz seems to like indexes
*/
rx::map<rx::string, int32_t> resource_indexes;
mz_zip_archive zip_archive = {};
FileTreeNode files;
void build_file_tree();
bool does_resource_exist_on_filesystem(const rx::string& resource_path) override final;
};
/*!
* \brief Prints out the nodes in a depth-first fashion
*/
void print_file_tree(const FileTreeNode& folder, uint32_t depth);
} // namespace nova::filesystem
| 1,797
|
C++
|
.h
| 40
| 37.625
| 107
| 0.683151
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,277
|
loading_utils.hpp
|
NovaMods_nova-renderer/src/filesystem/loading_utils.hpp
|
#pragma once
#include <string>
#include "nova_renderer/util/filesystem.hpp"
namespace nova::renderer {
/*!
* \brief Determines if a given path refers to a zip folder or a regular folder
* \param path_to_folder The path from Nova's working directory to the folder you want to check
* \return True if the folder in question is a zip folder, false otherwise
*/
bool is_zip_folder(const fs::path& path_to_folder);
} // namespace nova::renderer
| 471
|
C++
|
.h
| 11
| 39
| 99
| 0.724289
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,278
|
regular_folder_accessor.hpp
|
NovaMods_nova-renderer/src/filesystem/regular_folder_accessor.hpp
|
#pragma once
#include "nova_renderer/filesystem/folder_accessor.hpp"
namespace nova::filesystem {
/*!
* \brief Allows access to resources in a regular folder
*/
class RegularFolderAccessor final : public FolderAccessorBase {
public:
explicit RegularFolderAccessor(const rx::string& folder);
~RegularFolderAccessor() override = default;
rx::vector<uint8_t> read_file(const rx::string& path) override;
rx::vector<rx::string> get_all_items_in_folder(const rx::string& folder) override;
bool does_resource_exist_on_filesystem(const rx::string& resource_path) override;
protected:
FolderAccessorBase* create_subfolder_accessor(const rx::string& path) const override;
};
} // namespace nova::filesystem
| 781
|
C++
|
.h
| 17
| 40
| 93
| 0.720317
|
NovaMods/nova-renderer
| 114
| 12
| 25
|
MPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,279
|
SteamItemDropIdler.cpp
|
kokole_SteamItemDropIdler/SteamItemDropIdler/SteamItemDropIdler.cpp
|
#include "stdafx.h"
CSteamAPILoader g_steamAPILoader;
int main( int argc, char* argv[] )
//int CALLBACK WinMain( HINSTANCE hInstance, HINSTANCE hPrevInstance, LPSTR lpCmdLine, int nCmdShow )
{
HANDLE hConsole = GetStdHandle( STD_OUTPUT_HANDLE );
SetConsoleTextAttribute( hConsole, FOREGROUND_GREEN );
printf( "--- Made by kokole ---\n" );
SetConsoleTextAttribute( hConsole, FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE );
char steamAccountName[33];
char steamAccountPassword[33];
AppId_t appID;
SteamItemDef_t dropListDefinition;
if ( __argc == 5 ) {
strcpy_s( steamAccountName, __argv[1] );
strcpy_s( steamAccountPassword, __argv[2] );
sscanf( __argv[3], "%d", &appID );
sscanf( __argv[4], "%d", &dropListDefinition );
memset( __argv[1], 0, strlen( __argv[1] ) );
memset( __argv[2], 0, strlen( __argv[2] ) );
printf( "Enter your Steam account name: %s\n", steamAccountName );
printf( "Enter your Steam account password: \n" );
printf( "Enter the AppID: %d\n", appID );
printf( "Enter the drop list definition: %d\n", dropListDefinition );
}
else {
//goto funcEnd;
printf( "Enter your Steam account name: " );
scanf( "%32s", steamAccountName );
getchar(); // skip newline
printf( "Enter your Steam account password: " );
scanf( "%32s", steamAccountPassword );
getchar();
printf( "Enter the AppID: " );
scanf( "%d", &appID );
getchar();
printf( "Enter the drop list definition: " );
scanf( "%d", &dropListDefinition );
getchar();
}
char consoleTitle[256];
sprintf_s( consoleTitle, "Steam Item Drop Idler (%s)", steamAccountName );
SetConsoleTitleA( consoleTitle );
// load steam stuff
CreateInterfaceFn steam3Factory = g_steamAPILoader.GetSteam3Factory();
if ( !steam3Factory ) {
printf( "GetSteam3Factory failed\n" );
goto funcEnd;
}
IClientEngine* clientEngine = (IClientEngine*)steam3Factory( CLIENTENGINE_INTERFACE_VERSION, NULL );
if ( !clientEngine ) {
printf( "clientEngine is null\n" );
goto funcEnd;
}
ISteamClient017* steamClient = (ISteamClient017*)steam3Factory( STEAMCLIENT_INTERFACE_VERSION_017, NULL );
if ( !steamClient ) {
printf( "steamClient is null\n" );
goto funcEnd;
}
HSteamPipe hSteamPipe;
HSteamUser hSteamUser = clientEngine->CreateLocalUser( &hSteamPipe, k_EAccountTypeIndividual );
if ( !hSteamPipe || !hSteamUser ) {
printf( "CreateLocalUser failed (1)\n" );
goto funcEnd;
}
IClientBilling* clientBilling = clientEngine->GetIClientBilling( hSteamUser, hSteamPipe, CLIENTBILLING_INTERFACE_VERSION );
if ( !clientBilling ) {
printf( "clientBilling is null\n" );
goto funcEnd;
}
IClientFriends* clientFriends = clientEngine->GetIClientFriends( hSteamUser, hSteamPipe, CLIENTFRIENDS_INTERFACE_VERSION );
if ( !clientFriends ) {
printf( "clientFriends is null\n" );
goto funcEnd;
}
IClientUser* clientUser = clientEngine->GetIClientUser( hSteamUser, hSteamPipe, CLIENTUSER_INTERFACE_VERSION );
if ( !clientUser ) {
printf( "clientUser is null\n" );
goto funcEnd;
}
IClientUtils* clientUtils = clientEngine->GetIClientUtils( hSteamPipe, CLIENTUTILS_INTERFACE_VERSION );
if ( !clientUtils ) {
printf( "clientUtils is null\n" );
goto funcEnd;
}
ISteamGameCoordinator001* steamGameCoordinator = (ISteamGameCoordinator001*)steamClient->GetISteamGenericInterface( hSteamUser, hSteamPipe, STEAMGAMECOORDINATOR_INTERFACE_VERSION_001 );
if ( !steamGameCoordinator ) {
printf( "steamGameCoordinator is null\n" );
goto funcEnd;
}
ISteamInventory001* steamInventory = (ISteamInventory001*)steamClient->GetISteamInventory( hSteamUser, hSteamPipe, "STEAMINVENTORY_INTERFACE_V001" );
if ( !steamInventory ) {
printf( "steamInventory is null\n" );
goto funcEnd;
}
ISteamUser017* steamUser = (ISteamUser017*)steamClient->GetISteamUser( hSteamUser, hSteamPipe, STEAMUSER_INTERFACE_VERSION_017 );
if ( !steamUser ) {
printf( "steamUser is null\n" );
goto funcEnd;
}
clientUser->LogOnWithPassword( false, steamAccountName, steamAccountPassword );
bool bPlayingGame = false;
bool bPlayingOnServer = false; // for games that require us to be connected to a server
while ( true )
{
// process steam user callbacks
CallbackMsg_t callbackMsg;
while ( Steam_BGetCallback( hSteamPipe, &callbackMsg ) )
{
switch ( callbackMsg.m_iCallback )
{
case SteamServersConnected_t::k_iCallback:
clientFriends->SetPersonaState( k_EPersonaStateOnline );
if ( (*(bool( __thiscall** )(IClientUser*, AppId_t))(*(DWORD*)clientUser + 692))(clientUser, appID) ) { // BIsSubscribedApp
clientUtils->SetAppIDForCurrentPipe( appID, true );
bPlayingGame = true;
}
else {
printf( "You are not subscribed to this app. Trying to add a free license...\n" );
SteamAPICall_t hRequestFreeLicenseForApps = (*(SteamAPICall_t( __thiscall** )(IClientBilling*, AppId_t*, int))(*(DWORD*)clientBilling + 24))(clientBilling, &appID, 1); // RequestFreeLicenseForApps
bool bFailed;
while ( !clientUtils->IsAPICallCompleted( hRequestFreeLicenseForApps, &bFailed ) )
Sleep( 1000 );
RequestFreeLicenseResponse_t requestFreeLicenseResponse;
if ( !clientUtils->GetAPICallResult( hRequestFreeLicenseForApps, &requestFreeLicenseResponse, sizeof( RequestFreeLicenseResponse_t ), RequestFreeLicenseResponse_t::k_iCallback, &bFailed ) ) {
printf( "GetAPICallResult failed\n" );
goto funcEnd;
}
if ( requestFreeLicenseResponse.m_EResult == k_EResultOK && requestFreeLicenseResponse.m_nGrantedAppIds == 1 ) {
printf( "Added a free license\n" );
clientUtils->SetAppIDForCurrentPipe( appID, true );
bPlayingGame = true;
}
else {
printf( "Failed to add a free license. You do not own this game\n" );
goto funcEnd;
}
}
SetConsoleTextAttribute( hConsole, FOREGROUND_GREEN );
printf( "Item drop idling is now in progress\n" );
SetConsoleTextAttribute( hConsole, FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE );
break;
case SteamServerConnectFailure_t::k_iCallback:
{
SteamServerConnectFailure_t* steamServerConnectFailure = (SteamServerConnectFailure_t*)callbackMsg.m_pubParam;
switch ( steamServerConnectFailure->m_eResult )
{
case k_EResultInvalidLoginAuthCode:
printf( "Invalid Steam Guard code\n" );
case k_EResultAccountLogonDenied:
{
char steamGuardCode[33];
printf( "Enter the Steam Guard code: " );
scanf( "%32s", steamGuardCode );
getchar();
// this is Set2ndFactorAuthCode, however I have to do this because IClientUser.h is outdated
(*(void( __thiscall** )(IClientUser*, const char*, bool))(*(DWORD*)clientUser + 676))(clientUser, steamGuardCode, false);
clientUser->LogOnWithPassword( false, steamAccountName, steamAccountPassword );
break;
}
case k_EResultTwoFactorCodeMismatch:
printf( "Invalid Steam Mobile Authenticator code\n" );
case k_EResultAccountLogonDeniedNeedTwoFactorCode:
{
char steamMobileAuthenticatorCode[33];
printf( "Enter the Steam Mobile Authenticator code: " );
scanf( "%32s", steamMobileAuthenticatorCode );
getchar();
(*(void( __thiscall** )(IClientUser*, const char*))(*(DWORD*)clientUser + 196))(clientUser, steamMobileAuthenticatorCode); // SetTwoFactorCode
clientUser->LogOnWithPassword( false, steamAccountName, steamAccountPassword );
break;
}
default:
SetConsoleTextAttribute( hConsole, FOREGROUND_RED );
printf( "Login failed (%d)\n", steamServerConnectFailure->m_eResult );
SetConsoleTextAttribute( hConsole, FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE );
break;
}
bPlayingGame = false;
bPlayingOnServer = false;
break;
}
case SteamServersDisconnected_t::k_iCallback:
{
SteamServersDisconnected_t* steamServersDisconnected = (SteamServersDisconnected_t*)callbackMsg.m_pubParam;
printf( "Disconnected from steam servers (%d)\n", steamServersDisconnected->m_eResult );
bPlayingGame = false;
bPlayingOnServer = false;
break;
}
/*default:
printf( "User callback: %d\n", callbackMsg.m_iCallback );
break;*/
}
Steam_FreeLastCallback( hSteamPipe );
}
// do the actual item drop idling if we're "playing" the game
if ( bPlayingGame ) {
if ( appID == 440 ) {
static bool bHelloMsgSent = false;
static bool bGameServerInited = false;
// do game coordinator stuff
if ( !bHelloMsgSent ) {
// k_EMsgGCClientHello
unsigned char response[] = { 0xA6, 0x0F, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x08, 0x98, 0xE1, 0xC0, 0x01 };
steamGameCoordinator->SendMessage( 0x80000FA6, response, sizeof( response ) );
printf( "Sent hello msg to game coordinator\n" );
bHelloMsgSent = true;
}
uint32 msgSize;
while ( steamGameCoordinator->IsMessageAvailable( &msgSize ) ) {
uint32 msgType;
unsigned char* msg = new unsigned char[msgSize];
if ( steamGameCoordinator->RetrieveMessage( &msgType, msg, msgSize, &msgSize ) == k_EGCResultOK ) {
printf( "Retrieved message of type 0x%X from game coordinator\n", msgType );
if ( msgType == 0x80000FA4 ) { // k_EMsgGCClientWelcome
printf( "Got welcome msg from game coordinator\n" );
}
else if ( msgType == 0x8000001B ) { // k_ESOMsg_CacheSubscriptionCheck
// k_ESOMsg_CacheSubscriptionRefresh
unsigned char response[] = { 0x1C, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
*(CSteamID*)&response[9] = steamUser->GetSteamID();
steamGameCoordinator->SendMessage( 0x8000001C, response, sizeof( response ) );
printf( "Sent response to game coordinator\n" );
}
}
else {
printf( "Failed to retrieve message from game coordinator\n" );
}
delete[] msg;
}
// do game server stuff
static HSteamPipe hSteamGameServerPipe;
static HSteamUser hSteamGameServerUser;
static ISteamGameServer012* steamGameServer;
if ( !bGameServerInited ) {
// called by SteamGameServer_Init. needed for games that require us to be connected to a server
steamClient->SetLocalIPBinding( 0, 26901 );
hSteamGameServerUser = steamClient->CreateLocalUser( &hSteamGameServerPipe, k_EAccountTypeGameServer );
if ( !hSteamGameServerPipe || !hSteamGameServerUser ) {
printf( "CreateLocalUser failed (2)\n" );
goto funcEnd;
}
steamGameServer = (ISteamGameServer012*)steamClient->GetISteamGameServer( hSteamGameServerUser, hSteamGameServerPipe, STEAMGAMESERVER_INTERFACE_VERSION_012 );
if ( !steamGameServer ) {
printf( "steamGameServer is null\n" );
goto funcEnd;
}
steamGameServer->InitGameServer( 0, 27015, MASTERSERVERUPDATERPORT_USEGAMESOCKETSHARE, k_unServerFlagSecure, 440, "3158168" );
steamGameServer->SetProduct( "tf" );
steamGameServer->SetGameDescription( "Team Fortress" );
steamGameServer->SetModDir( "tf" );
steamGameServer->SetDedicatedServer( false );
steamGameServer->LogOnAnonymous();
steamGameServer->SetMaxPlayerCount( 1 );
steamGameServer->SetBotPlayerCount( 0 );
steamGameServer->SetPasswordProtected( true );
steamGameServer->SetRegion( "-1" );
steamGameServer->SetServerName( "Team Fortress 2" );
steamGameServer->SetMapName( "ctf_2fort" );
steamGameServer->SetGameData( "tf_mm_trusted:0,tf_mm_servermode:0,lobby:0,steamblocking:0" );
steamGameServer->SetKeyValue( "tf_gamemode_ctf", "1" );
steamGameServer->SetKeyValue( "sv_tags", "ctf" );
steamGameServer->SetGameTags( "ctf" );
//steamGameServer->EnableHeartbeats( true );
bGameServerInited = true;
}
if ( !bPlayingOnServer ) {
static HAuthTicket hAuthTicket = 0;
if ( hAuthTicket ) {
steamUser->CancelAuthTicket( hAuthTicket );
steamGameServer->EndAuthSession( steamUser->GetSteamID() );
hAuthTicket = 0;
}
unsigned char ticket[1024];
uint32 ticketSize;
hAuthTicket = steamUser->GetAuthSessionTicket( ticket, sizeof( ticket ), &ticketSize );
if ( hAuthTicket != k_HAuthTicketInvalid ) {
EBeginAuthSessionResult beginAuthSessionResult = steamGameServer->BeginAuthSession( ticket, ticketSize, steamUser->GetSteamID() );
if ( beginAuthSessionResult == k_EBeginAuthSessionResultOK )
bPlayingOnServer = true;
else
printf( "BeginAuthSession failed (%d)\n", beginAuthSessionResult );
}
else {
printf( "GetAuthSessionTicket failed\n" );
}
}
// process steam game server callbacks
while ( Steam_BGetCallback( hSteamGameServerPipe, &callbackMsg ) )
{
switch ( callbackMsg.m_iCallback )
{
case ValidateAuthTicketResponse_t::k_iCallback:
{
ValidateAuthTicketResponse_t* validateAuthTicketResponse = (ValidateAuthTicketResponse_t*)callbackMsg.m_pubParam;
if ( validateAuthTicketResponse->m_eAuthSessionResponse == k_EAuthSessionResponseOK ) {
printf( "BeginAuthSession callback ok\n" );
//steamGameServer->BUpdateUserData( validateAuthTicketResponse->m_SteamID, "Player", 0 );
}
else {
printf( "BeginAuthSession callback failed (%d)\n", validateAuthTicketResponse->m_eAuthSessionResponse );
bPlayingOnServer = false;
}
break;
}
/*default:
printf( "Game server callback: %d\n", callbackMsg.m_iCallback );
break;*/
}
Steam_FreeLastCallback( hSteamGameServerPipe );
}
}
else {
steamInventory->SendItemDropHeartbeat();
SteamInventoryResult_t steamInventoryResult;
steamInventory->TriggerItemDrop( &steamInventoryResult, dropListDefinition );
steamInventory->DestroyResult( steamInventoryResult );
}
}
Sleep( 1000 );
}
funcEnd:
printf( "Press enter to exit...\n" );
getchar();
return 0;
}
| 13,888
|
C++
|
.cpp
| 327
| 37.746177
| 201
| 0.714148
|
kokole/SteamItemDropIdler
| 105
| 61
| 4
|
GPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
753,281
|
stdafx.h
|
kokole_SteamItemDropIdler/SteamItemDropIdler/stdafx.h
|
#pragma once
#include "targetver.h"
#define _CRT_SECURE_NO_WARNINGS
#include <Windows.h>
#define STEAMWORKS_CLIENT_INTERFACES
#include "Open Steamworks\Open Steamworks\Steamworks.h"
#pragma comment( lib, "Open Steamworks\\Resources\\Libs\\Win32\\steamclient.lib" )
| 267
|
C++
|
.h
| 7
| 36.857143
| 82
| 0.802326
|
kokole/SteamItemDropIdler
| 105
| 61
| 4
|
GPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
753,282
|
TSteamPrepurchaseReceiptInfo.h
|
kokole_SteamItemDropIdler/SteamItemDropIdler/Open Steamworks/Open Steamworks/TSteamPrepurchaseReceiptInfo.h
|
//========================== Open Steamworks ================================
//
// This file is part of the Open Steamworks project. All individuals associated
// with this project do not claim ownership of the contents
//
// The code, comments, and all related files, projects, resources,
// redistributables included with this project are Copyright Valve Corporation.
// Additionally, Valve, the Valve logo, Half-Life, the Half-Life logo, the
// Lambda logo, Steam, the Steam logo, Team Fortress, the Team Fortress logo,
// Opposing Force, Day of Defeat, the Day of Defeat logo, Counter-Strike, the
// Counter-Strike logo, Source, the Source logo, and Counter-Strike Condition
// Zero are trademarks and or registered trademarks of Valve Corporation.
// All other trademarks are property of their respective owners.
//
//=============================================================================
#ifndef TSTEAMPREPURCHASERECEIPTINFO_H
#define TSTEAMPREPURCHASERECEIPTINFO_H
#ifdef _WIN32
#pragma once
#endif
typedef struct TSteamPrepurchaseReceiptInfo
{
char szTypeOfProofOfPurchase[ STEAM_TYPE_OF_PROOF_OF_PURCHASE_SIZE + 1];
} TSteamPrepurchaseReceiptInfo;
#endif // TSTEAMPREPURCHASERECEIPTINFO_H
| 1,214
|
C++
|
.h
| 25
| 47.32
| 79
| 0.710549
|
kokole/SteamItemDropIdler
| 105
| 61
| 4
|
GPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
753,283
|
HTTPCommon.h
|
kokole_SteamItemDropIdler/SteamItemDropIdler/Open Steamworks/Open Steamworks/HTTPCommon.h
|
//========================== Open Steamworks ================================
//
// This file is part of the Open Steamworks project. All individuals associated
// with this project do not claim ownership of the contents
//
// The code, comments, and all related files, projects, resources,
// redistributables included with this project are Copyright Valve Corporation.
// Additionally, Valve, the Valve logo, Half-Life, the Half-Life logo, the
// Lambda logo, Steam, the Steam logo, Team Fortress, the Team Fortress logo,
// Opposing Force, Day of Defeat, the Day of Defeat logo, Counter-Strike, the
// Counter-Strike logo, Source, the Source logo, and Counter-Strike Condition
// Zero are trademarks and or registered trademarks of Valve Corporation.
// All other trademarks are property of their respective owners.
//
//=============================================================================
#ifndef HTTPCOMMON_H
#define HTTPCOMMON_H
#ifdef _WIN32
#pragma once
#endif
#define CLIENTHTTP_INTERFACE_VERSION "CLIENTHTTP_INTERFACE_VERSION001"
#define STEAMHTTP_INTERFACE_VERSION_001 "STEAMHTTP_INTERFACE_VERSION001"
#define STEAMHTTP_INTERFACE_VERSION_002 "STEAMHTTP_INTERFACE_VERSION002"
typedef uint32 HTTPRequestHandle;
#define INVALID_HTTPREQUEST_HANDLE 0
// This enum is used in client API methods, do not re-number existing values.
enum EHTTPMethod
{
k_EHTTPMethodInvalid = 0,
k_EHTTPMethodGET,
k_EHTTPMethodHEAD,
k_EHTTPMethodPOST,
k_EHTTPMethodPUT,
k_EHTTPMethodDELETE,
k_EHTTPMethodOPTIONS,
// The remaining HTTP methods are not yet supported, per rfc2616 section 5.1.1 only GET and HEAD are required for
// a compliant general purpose server. We'll likely add more as we find uses for them.
// k_EHTTPMethodTRACE,
// k_EHTTPMethodCONNECT
};
// HTTP Status codes that the server can send in response to a request, see rfc2616 section 10.3 for descriptions
// of each of these.
typedef enum EHTTPStatusCode
{
// Invalid status code (this isn't defined in HTTP, used to indicate unset in our code)
k_EHTTPStatusCodeInvalid = 0,
// Informational codes
k_EHTTPStatusCode100Continue = 100,
k_EHTTPStatusCode101SwitchingProtocols = 101,
// Success codes
k_EHTTPStatusCode200OK = 200,
k_EHTTPStatusCode201Created = 201,
k_EHTTPStatusCode202Accepted = 202,
k_EHTTPStatusCode203NonAuthoritative = 203,
k_EHTTPStatusCode204NoContent = 204,
k_EHTTPStatusCode205ResetContent = 205,
k_EHTTPStatusCode206PartialContent = 206,
// Redirection codes
k_EHTTPStatusCode300MultipleChoices = 300,
k_EHTTPStatusCode301MovedPermanently = 301,
k_EHTTPStatusCode302Found = 302,
k_EHTTPStatusCode303SeeOther = 303,
k_EHTTPStatusCode304NotModified = 304,
k_EHTTPStatusCode305UseProxy = 305,
//k_EHTTPStatusCode306Unused = 306, (used in old HTTP spec, now unused in 1.1)
k_EHTTPStatusCode307TemporaryRedirect = 307,
// Error codes
k_EHTTPStatusCode400BadRequest = 400,
k_EHTTPStatusCode401Unauthorized = 401,
k_EHTTPStatusCode402PaymentRequired = 402, // This is reserved for future HTTP specs, not really supported by clients
k_EHTTPStatusCode403Forbidden = 403,
k_EHTTPStatusCode404NotFound = 404,
k_EHTTPStatusCode405MethodNotAllowed = 405,
k_EHTTPStatusCode406NotAcceptable = 406,
k_EHTTPStatusCode407ProxyAuthRequired = 407,
k_EHTTPStatusCode408RequestTimeout = 408,
k_EHTTPStatusCode409Conflict = 409,
k_EHTTPStatusCode410Gone = 410,
k_EHTTPStatusCode411LengthRequired = 411,
k_EHTTPStatusCode412PreconditionFailed = 412,
k_EHTTPStatusCode413RequestEntityTooLarge = 413,
k_EHTTPStatusCode414RequestURITooLong = 414,
k_EHTTPStatusCode415UnsupportedMediaType = 415,
k_EHTTPStatusCode416RequestedRangeNotSatisfiable = 416,
k_EHTTPStatusCode417ExpectationFailed = 417,
// Server error codes
k_EHTTPStatusCode500InternalServerError = 500,
k_EHTTPStatusCode501NotImplemented = 501,
k_EHTTPStatusCode502BadGateway = 502,
k_EHTTPStatusCode503ServiceUnavailable = 503,
k_EHTTPStatusCode504GatewayTimeout = 504,
k_EHTTPStatusCode505HTTPVersionNotSupported = 505,
} EHTTPStatusCode;
struct HTTPRequestCompleted_t
{
enum { k_iCallback = k_iClientHTTPCallbacks + 1 };
// Handle value for the request that has completed.
HTTPRequestHandle m_hRequest;
// Context value that the user defined on the request that this callback is associated with, 0 if
// no context value was set.
uint64 m_ulContextValue;
// This will be true if we actually got any sort of response from the server (even an error).
// It will be false if we failed due to an internal error or client side network failure.
bool m_bRequestSuccessful;
// Will be the HTTP status code value returned by the server, k_EHTTPStatusCode200OK is the normal
// OK response, if you get something else you probably need to treat it as a failure.
EHTTPStatusCode m_eStatusCode;
};
#endif // HTTPCOMMON_H
| 4,910
|
C++
|
.h
| 109
| 43.137615
| 119
| 0.77601
|
kokole/SteamItemDropIdler
| 105
| 61
| 4
|
GPL-2.0
|
9/20/2024, 9:42:13 PM (Europe/Amsterdam)
| false
| true
| false
| false
| false
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.