diff --git a/src/app/mirai_app.cpp b/src/app/mirai_app.cpp index 84c04c8..77586e7 100644 --- a/src/app/mirai_app.cpp +++ b/src/app/mirai_app.cpp @@ -2,8 +2,21 @@ #include "core/logger.h" #include "core/time_util.h" +#include "gpu_resource/gpu_buffer.h" namespace mirai { + void test_func() { + buffer_create_info info; + info.debug_name = "测试Buffer"; + info.device = vulkan_context::get().get_default_device(); + info.size = 64; + info.usage = buffer_usage::vertex; + auto buf = make_obj(info); + + std::array test_data{}; + buf->upload(nullptr, std::span(test_data)); + } + bool mirai_app::setup(const mirai_app_config& config) { window_mgr_ = make_obj(); @@ -14,6 +27,8 @@ namespace mirai { vulkan_context_config context_config{window_mgr_->get_main_window()->get_vk_surface()}; vulkan_context::get().setup(context_config); + + test_func(); return true; } diff --git a/src/core/fixed_size_function.h b/src/core/fixed_size_function.h new file mode 100644 index 0000000..f8f6533 --- /dev/null +++ b/src/core/fixed_size_function.h @@ -0,0 +1,98 @@ +#pragma once +#include +#include +#include +#include +#include +#include + +template +class fixed_size_function; + +template +class fixed_size_function { +private: + // 对齐缓冲区 + alignas(std::max_align_t) std::byte buffer[MaxSize]; + + struct v_table { + Ret (*call)(const void*, Args&&...); + void (*destroy)(void*); + void (*move)(void* dest, void* src); + }; + + const v_table* vtable = nullptr; + + template + static Ret call_impl(const void* ptr, Args&&... args) { + // 使用 const_cast 仅在必要时(如果 F 的 operator() 是非 const 的) + // 实际上 std::function 默认也是要求 F 是可调用的 + return (*static_cast(ptr))(std::forward(args)...); + } + + template + static void destroy_impl(void* ptr) { + if constexpr (!std::is_trivially_destructible_v) { + std::destroy_at(static_cast(ptr)); // C++17/20 更好的写法 + } + } + + template + static void move_impl(void* dest, void* src) { + if constexpr (std::is_trivially_copyable_v) { + std::memcpy(dest, src, sizeof(F)); + } else { + new (dest) F(std::move(*static_cast(src))); + } + } + + template + static constexpr v_table vtable_for = { call_impl, destroy_impl, move_impl }; + +public: + fixed_size_function() noexcept = default; + + // 使用 C++20 constraints (requires) 替代 std::enable_if + template + requires (!std::is_same_v, fixed_size_function> && + std::is_invocable_r_v) + fixed_size_function(F&& f) { + using functor = std::remove_cvref_t; + static_assert(sizeof(functor) <= MaxSize, "函数体过大"); + + new (buffer) functor(std::forward(f)); + vtable = &vtable_for; + } + + ~fixed_size_function() { + if (vtable) vtable->destroy(buffer); + } + + // 移动构造 + fixed_size_function(fixed_size_function&& other) noexcept { + if (other.vtable) { + other.vtable->move(buffer, other.buffer); + vtable = other.vtable; + other.vtable = nullptr; + } + } + + // C++23 deducing this 优化调用逻辑 + // 我们可以捕捉当前的 const 属性和值类别 + template + Ret operator()(this Self&& self, Args... args) { + if (!self.vtable) [[unlikely]] { // C++20 属性优化分支预测 + throw std::bad_function_call(); + } + return self.vtable->call(self.buffer, std::forward(args)...); + } + + // C++23 可以考虑返回 std::expected 来避免异常 + // 这里为了保持 operator() 习惯保留了异常,但可以增加 try_call + std::expected try_call(Args... args) const { + if (!vtable) return std::unexpected("空函数"); + return vtable->call(buffer, std::forward(args)...); + } + + explicit operator bool() const noexcept { return vtable != nullptr; } +}; diff --git a/src/core/logger.h b/src/core/logger.h index 142c39a..3ad5598 100644 --- a/src/core/logger.h +++ b/src/core/logger.h @@ -45,14 +45,18 @@ namespace mirai { struct logger_config { std::string name{"mirai"}; + #if MIRAI_DEBUG + log_level level{log_level::trace}; + #else log_level level{log_level::info}; + #endif bool console_enabled{true}; bool file_enabled{true}; std::filesystem::path file_path{"logs/mirai.log"}; bool file_rotation_enabled{true}; size_type max_file_size{10 * 1024 * 1024}; // 10 MB size_type max_files{5}; - std::string pattern{"[%Y-%m-%d %H:%M:%S.%e] [%^%l%$] [%t] %v"}; + std::string pattern{"[%Y-%m-%d %H:%M:%S.%e] [%^%l%$] %v"}; bool include_source_location{true}; bool async_enabled{false}; size_type async_queue_size{8192}; diff --git a/src/core/object.h b/src/core/object.h index 0d21114..e2c6a90 100644 --- a/src/core/object.h +++ b/src/core/object.h @@ -218,11 +218,11 @@ namespace mirai { [[nodiscard]] constexpr virtual const mirai::type_info* get_parent_type() const noexcept override { \ return &mirai::get_type_info(); \ } \ - [[nodiscard]] constexpr auto shared_this() noexcept -> std::shared_ptr { \ - return std::static_pointer_cast(mirai::object::shared_from_this()); \ + [[nodiscard]] auto shared_this() noexcept -> std::shared_ptr { \ + return std::static_pointer_cast(shared_from_this()); \ } \ - [[nodiscard]] constexpr auto shared_this() const noexcept -> std::shared_ptr { \ - return std::static_pointer_cast(mirai::object::shared_from_this()); \ + [[nodiscard]] auto shared_this() const noexcept -> std::shared_ptr { \ + return std::static_pointer_cast(shared_from_this()); \ } \ friend struct mirai::object_factory; \ template \ diff --git a/src/gpu_resource/allocation_types.h b/src/gpu_resource/allocation_types.h index 95f3483..ebb18db 100644 --- a/src/gpu_resource/allocation_types.h +++ b/src/gpu_resource/allocation_types.h @@ -6,6 +6,7 @@ #include namespace mirai { + class vulkan_device; class vulkan_queue; /** @@ -22,10 +23,10 @@ namespace mirai { vk::DeviceMemory device_memory{}; /// 映射的指针(如果已映射) void* mapped_data = nullptr; + /// 是否为直接访问内存 + bool is_direct_access = false; /// 内存类型索引 u32 memory_type_index = 0; - /// 是否为专用分配 - bool is_dedicated = false; }; /** @@ -36,14 +37,14 @@ namespace mirai { u64 size = 0; /// Buffer 用途 buffer_usage usage = buffer_usage::vertex; - /// 内存用途 - memory_usage mem_usage = memory_usage::gpu_only; /// 资源共享模式 resource_sharing_mode sharing_mode = resource_sharing_mode::exclusive; - /// 是否持久映射 - bool persistent_mapped = false; + /// 设备 + std::shared_ptr device{}; + #if MIRAI_DEBUG /// 调试名称 std::string debug_name; + #endif }; struct buffer_allocation_info { /// Buffer 大小(字节) @@ -68,6 +69,9 @@ namespace mirai { vma::Allocation allocation = VK_NULL_HANDLE; /// 分配信息 allocation_info info; + #if MIRAI_DEBUG + std::string debug_name; + #endif /// 是否有效 [[nodiscard]] bool is_valid() const noexcept { return buffer && allocation; diff --git a/src/gpu_resource/allocator.cpp b/src/gpu_resource/allocator.cpp index cdfe444..7b150d7 100644 --- a/src/gpu_resource/allocator.cpp +++ b/src/gpu_resource/allocator.cpp @@ -82,53 +82,13 @@ namespace mirai { vma_allocator_ = result.value(); } - vma::MemoryUsage vma_allocator::to_vma_memory_usage(memory_usage usage) noexcept { - switch (usage) { - case memory_usage::gpu_only: - return vma::MemoryUsage::eGpuOnly; - case memory_usage::cpu_only: - return vma::MemoryUsage::eCpuOnly; - case memory_usage::cpu_to_gpu: - return vma::MemoryUsage::eCpuToGpu; - case memory_usage::gpu_to_cpu: - return vma::MemoryUsage::eGpuToCpu; - case memory_usage::auto_prefer_device: - return vma::MemoryUsage::eAutoPreferDevice; - case memory_usage::auto_prefer_host: - return vma::MemoryUsage::eAutoPreferHost; - default: - return vma::MemoryUsage::eAuto; - } - } - - vma::AllocationCreateFlags - vma_allocator::to_vma_allocation_flags(memory_usage usage, bool persistent_mapped) noexcept { - vma::AllocationCreateFlags flags; - - // 持久映射 - if (persistent_mapped) { - flags |= vma::AllocationCreateFlagBits::eMapped; - } - - // 根据内存用途设置访问模式 - switch (usage) { - case memory_usage::cpu_only: - case memory_usage::cpu_to_gpu: - flags |= vma::AllocationCreateFlagBits::eHostAccessSequentialWrite; - break; - case memory_usage::gpu_to_cpu: - flags |= vma::AllocationCreateFlagBits::eHostAccessRandom; - break; - default: - break; - } - - return flags; - } - result_t vma_allocator::alloc_buffer(const buffer_create_info& info) { buffer_allocation alloc; + #if MIRAI_DEBUG + alloc.debug_name = info.debug_name; + #endif + vk::BufferCreateInfo buffer_info{}; buffer_info.size = info.size; buffer_info.usage = to_vulkan_buffer_usage(info.usage); @@ -139,17 +99,91 @@ namespace mirai { } vma::AllocationCreateInfo alloc_info{}; - alloc_info.usage = to_vma_memory_usage(info.mem_usage); - alloc_info.flags = to_vma_allocation_flags(info.mem_usage, info.persistent_mapped); - - vma::AllocationInfo vma_alloc_info{}; - vk::Result vk_result; - vk::Buffer vk_buffer; + alloc_info.usage = vma::MemoryUsage::eAuto; + alloc_info.flags = vma::AllocationCreateFlagBits::eHostAccessSequentialWrite; + alloc_info.flags |= vma::AllocationCreateFlagBits::eMapped; + alloc_info.preferredFlags = vk::MemoryPropertyFlagBits::eDeviceLocal; { std::lock_guard lock(mutex_); - // vk_result = vma::UniqueBuffer(vma_allocator_, &buffer_info, &alloc_info, reinterpret_cast(&vk_buffer), - // reinterpret_cast(&alloc.allocation), &vma_alloc_info); + auto [result, pair] = vma_allocator_.createBuffer(buffer_info, alloc_info); + if (result != vk::Result::eSuccess) { + return MAKE_ERROR_INFO(error_code::vulkan_allocation_failed, + "VMA 分配 Buffer 失败: {}", + vk::to_string(result)); + } + const auto& [allocation, buffer] = pair; + alloc.buffer = buffer; + alloc.allocation = allocation; + } + auto res_info = vma_allocator_.getAllocationInfo(alloc.allocation); + auto mem_flags = vma_allocator_.getAllocationMemoryProperties(alloc.allocation); + auto is_direct_access = static_cast(mem_flags & vk::MemoryPropertyFlagBits::eDeviceLocal); + // 获取映射好的指针(因为加了 eMapped 标志) + const auto mapped_ptr = vma_allocator_.getAllocationInfo(alloc.allocation).pMappedData; + + alloc.info.size = res_info.size; + alloc.info.offset = res_info.offset; + alloc.info.device_memory = res_info.deviceMemory; + alloc.info.mapped_data = mapped_ptr; + alloc.info.is_direct_access = is_direct_access; + alloc.info.memory_type_index = res_info.memoryType; + + return alloc; + } + + result_t vma_allocator::alloc_staging_buffer(const buffer_create_info& info) { + buffer_allocation alloc; + + vma::AllocationCreateInfo alloc_info{}; + alloc_info.usage = vma::MemoryUsage::eAuto; + alloc_info.flags = vma::AllocationCreateFlagBits::eHostAccessSequentialWrite; + alloc_info.flags |= vma::AllocationCreateFlagBits::eMapped; + alloc_info.preferredFlags = vk::MemoryPropertyFlagBits::eHostVisible; + + vk::BufferCreateInfo buffer_create_info{}; + buffer_create_info.setSize(info.size); + buffer_create_info.setUsage(vk::BufferUsageFlagBits::eTransferSrc); + + { + auto [result, pair] = vma_allocator_.createBuffer(buffer_create_info, alloc_info); + if (result != vk::Result::eSuccess) { + return MAKE_ERROR_INFO(error_code::vulkan_allocation_failed, + "VMA 分配暂存 Buffer 失败: {}", + vk::to_string(result)); + } + const auto& [allocation, buffer] = pair; + alloc.buffer = buffer; + alloc.allocation = allocation; + } + + auto res_info = vma_allocator_.getAllocationInfo(alloc.allocation); + auto mem_flags = vma_allocator_.getAllocationMemoryProperties(alloc.allocation); + auto is_direct_access = static_cast(mem_flags & vk::MemoryPropertyFlagBits::eHostVisible); + // 获取映射好的指针(因为加了 eMapped 标志) + const auto mapped_ptr = vma_allocator_.getAllocationInfo(alloc.allocation).pMappedData; + alloc.info.size = res_info.size; + alloc.info.offset = res_info.offset; + alloc.info.device_memory = res_info.deviceMemory; + alloc.info.mapped_data = mapped_ptr; + alloc.info.is_direct_access = is_direct_access; + alloc.info.memory_type_index = res_info.memoryType; + + return alloc; + } + + void_result_t vma_allocator::free_buffer(const buffer_allocation& buffer) { + { + std::lock_guard lock(mutex_); + vma_allocator_.destroyBuffer(buffer.buffer, buffer.allocation); + } + return {}; + } + + void_result_t vma_allocator::free_staging_buffer(const buffer_allocation& buffer) { + { + std::lock_guard lock(mutex_); + vma_allocator_.destroyBuffer(buffer.buffer, buffer.allocation); } return {}; } diff --git a/src/gpu_resource/allocator.h b/src/gpu_resource/allocator.h index 2d6ade6..55d9f71 100644 --- a/src/gpu_resource/allocator.h +++ b/src/gpu_resource/allocator.h @@ -42,10 +42,10 @@ namespace mirai { void setup(const allocator_config& config); - vma::MemoryUsage to_vma_memory_usage(memory_usage usage) noexcept; - vma::AllocationCreateFlags to_vma_allocation_flags(memory_usage usage, bool persistent_mapped) noexcept; - result_t alloc_buffer(const buffer_create_info& info); + result_t alloc_staging_buffer(const buffer_create_info& info); + void_result_t free_buffer(const buffer_allocation& buffer); + void_result_t free_staging_buffer(const buffer_allocation& buffer); protected: void on_destroying() override; private: diff --git a/src/gpu_resource/gpu_buffer.cpp b/src/gpu_resource/gpu_buffer.cpp index 78a0efc..7455075 100644 --- a/src/gpu_resource/gpu_buffer.cpp +++ b/src/gpu_resource/gpu_buffer.cpp @@ -1,21 +1,85 @@ #include "gpu_buffer.h" #include "allocator.h" +#include "core/logger.h" +#include "render/vulkan_command_buffer.h" +#include "render/vulkan_device.h" +#include "resource_types_to_string.h" namespace mirai { gpu_buffer::gpu_buffer(const buffer_create_info& info) { - vma_allocator::get(); + device_ = info.device; + auto allocator = device_->get_allocator(); + auto buffer = allocator->alloc_buffer(info); + if (!buffer) { + MIRAI_LOG_ERROR("分配 GPU Buffer 失败: {}", buffer.error().full_description()); + return; + } + allocation_ = buffer.value(); + + #if MIRAI_DEBUG + MIRAI_LOG_DEBUG("创建 GPU Buffer {}: 大小={} 字节, 用途={}, 共享模式={}", + info.debug_name, + allocation_.info.size, + to_string(info.usage), + to_string(info.sharing_mode) + ); + #endif } - bool gpu_buffer::is_host_visible() const noexcept { - switch (mem_usage_) { - case memory_usage::cpu_only: - case memory_usage::cpu_to_gpu: - case memory_usage::gpu_to_cpu: - case memory_usage::auto_prefer_host: - return true; - default: - return false; + gpu_buffer::~gpu_buffer() { + if (allocation_.is_valid()) { + auto allocator = device_->get_allocator(); + auto result = allocator->free_buffer(allocation_); + if (!result) { + MIRAI_LOG_ERROR("释放 GPU Buffer 失败: {}", result.error().full_description()); + } + #if MIRAI_DEBUG + MIRAI_LOG_DEBUG("销毁 GPU Buffer {},大小={} 字节", allocation_.debug_name, allocation_.info.size); + #endif + } + } + + void gpu_buffer::upload(std::shared_ptr cmd, std::span data) { + // 如果开启了Resizable Bar,则可以直接访问显存 + if (allocation_.info.is_direct_access) { + auto mapped_ptr = static_cast(allocation_.info.mapped_data); + memcpy(mapped_ptr, data.data(), std::min(allocation_.info.size, data.size())); + } else { + // 使用暂存缓冲区上传数据 + auto staging_buffer_info = buffer_create_info{}; + staging_buffer_info.size = allocation_.info.size; + staging_buffer_info.usage = buffer_usage::transfer_src; + staging_buffer_info.sharing_mode = resource_sharing_mode::concurrent; + staging_buffer_info.device = device_; + + auto allocator = device_->get_allocator(); + auto staging_buffer_alloc = allocator->alloc_staging_buffer(staging_buffer_info); + if (!staging_buffer_alloc) { + MIRAI_LOG_ERROR("分配暂存 Buffer 失败: {}", staging_buffer_alloc.error().full_description()); + return; + } + const auto& staging_buffer = staging_buffer_alloc.value(); + + // 拷贝数据到暂存缓冲区 + auto mapped_ptr = static_cast(staging_buffer.info.mapped_data); + memcpy(mapped_ptr, data.data(), std::min(staging_buffer.info.size, data.size())); + + // 记录命令将数据从暂存缓冲区复制到目标缓冲区 + cmd->copy_buffer(staging_buffer.buffer, allocation_.buffer, allocation_.info.size); + + vulkan_cmd_buffer_cleanup_task cleanup_task{}; + #if MIRAI_DEBUG + cleanup_task.debug_name = "GPU Buffer Upload任务释放Staging Buffer "; + #endif + cleanup_task.task = [allocator, staging_buffer] { + // 释放暂存缓冲区 + auto free_result = allocator->free_staging_buffer(staging_buffer); + if (!free_result) { + MIRAI_LOG_ERROR("释放暂存 Buffer 失败: {}", free_result.error().full_description()); + } + }; + cmd->add_cleanup_task(cleanup_task); } } } diff --git a/src/gpu_resource/gpu_buffer.h b/src/gpu_resource/gpu_buffer.h index 7652c0a..c24894c 100644 --- a/src/gpu_resource/gpu_buffer.h +++ b/src/gpu_resource/gpu_buffer.h @@ -9,29 +9,16 @@ namespace mirai { MIRAI_OBJECT_TYPE_INFO(gpu_buffer, gpu_resource) gpu_buffer(const buffer_create_info& info); + ~gpu_buffer() override; + + void upload(std::shared_ptr cmd, std::span data) override; + + [[nodiscard]] auto get_allocation_info() const noexcept { return allocation_.info; } [[nodiscard]] auto get_buffer() const noexcept { return allocation_.buffer; } - [[nodiscard]] u64 get_size() const noexcept { return size_; } - [[nodiscard]] buffer_usage get_usage() const noexcept { return usage_; } - [[nodiscard]] memory_usage get_memory_usage() const noexcept { return mem_usage_; } - [[nodiscard]] bool is_persistent_mapped() const noexcept { return persistent_mapped_; } - [[nodiscard]] void* get_mapped_data() const noexcept { return mapped_data_; } + [[nodiscard]] auto get_size() const noexcept { return allocation_.info.size; } private: - [[nodiscard]] bool is_host_visible() const noexcept; - buffer_allocation allocation_; - - // buffer申请大小 - u64 size_ = 0; - // buffer用途 - buffer_usage usage_ = buffer_usage::vertex; - // 内存用途 - memory_usage mem_usage_ = memory_usage::gpu_only; - // 是否持久映射 - bool persistent_mapped_ = false; - // 映射的指针 - void* mapped_data_ = nullptr; - // 调试名称 - std::string debug_name_; + std::shared_ptr device_; }; } diff --git a/src/gpu_resource/gpu_resource.h b/src/gpu_resource/gpu_resource.h index 4422aee..82b0521 100644 --- a/src/gpu_resource/gpu_resource.h +++ b/src/gpu_resource/gpu_resource.h @@ -3,7 +3,10 @@ #include "resource_types.h" #include "core/object.h" +#include + namespace mirai { + class vulkan_command_buffer; class vulkan_queue; class gpu_resource : public object { @@ -12,9 +15,11 @@ namespace mirai { // 如果没有传入任何参数,则代表资源使用并发(concurrent)模式 gpu_resource() : sharing_mode_(resource_sharing_mode::concurrent) {} // 如果传入队列,则代表资源使用独占(exclusive)模式 - gpu_resource(std::shared_ptr queue) : sharing_mode_(resource_sharing_mode::exclusive), queue_(queue) {} + gpu_resource(const std::shared_ptr& queue) : sharing_mode_(resource_sharing_mode::exclusive), + queue_(queue) { + } - virtual resource_async_task upload(std::vector data) = 0; + virtual void upload(std::shared_ptr cmd, std::span data) = 0; [[nodiscard]] auto get_sharing_mode() const noexcept { return sharing_mode_; } [[nodiscard]] auto get_queue() const noexcept { return queue_.lock(); } diff --git a/src/gpu_resource/render_target.cpp b/src/gpu_resource/render_target.cpp index e67b255..c7d14b2 100644 --- a/src/gpu_resource/render_target.cpp +++ b/src/gpu_resource/render_target.cpp @@ -4,23 +4,23 @@ namespace mirai { render_target::render_target(vk::Image image) { - image_ref_ = image; + // image_ref_ = image; - auto default_device = vulkan_context::get().get_default_device(); - // 创建图像视图 - vk::ImageViewCreateInfo view_info{}; - view_info.setImage(image_ref_); - view_info.setViewType(vk::ImageViewType::e2D); - view_info.setFormat(vk::Format::eB8G8R8A8Srgb); + // auto default_device = vulkan_context::get().get_default_device(); + // // 创建图像视图 + // vk::ImageViewCreateInfo view_info{}; + // view_info.setImage(image_ref_); + // view_info.setViewType(vk::ImageViewType::e2D); + // view_info.setFormat(vk::Format::eB8G8R8A8Srgb); } void render_target::resize(vec2i size) { auto default_device = vulkan_context::get().get_default_device(); - default_device->get_device().waitIdle(); + // default_device->get_device().waitIdle(); } vec2i render_target::get_size() const noexcept { - + return vec2i{800, 600}; } } diff --git a/src/gpu_resource/render_target.h b/src/gpu_resource/render_target.h index fd3d808..14457bd 100644 --- a/src/gpu_resource/render_target.h +++ b/src/gpu_resource/render_target.h @@ -17,7 +17,7 @@ namespace mirai { void resize(vec2i size); vec2i get_size() const noexcept; - [[nodiscard]] vk::ImageView get_image_view() const noexcept { return image_view_; } + // [[nodiscard]] vk::ImageView get_image_view() const noexcept { return image_view_; } [[nodiscard]] vk::Framebuffer get_framebuffer() const noexcept { return framebuffer_; } protected: void on_destroying() override; diff --git a/src/gpu_resource/resource_manager.cpp b/src/gpu_resource/resource_manager.cpp index aec6828..5f1de61 100644 --- a/src/gpu_resource/resource_manager.cpp +++ b/src/gpu_resource/resource_manager.cpp @@ -8,6 +8,7 @@ #include "render/vulkan_device.h" #include "gpu_resource/texture/texture.h" #include "gpu_resource/gpu_buffer.h" +#include "render/vulkan_command_buffer.h" #include "render/vulkan_fence.h" #include "render/vulkan_time_semaphore.h" @@ -71,22 +72,27 @@ namespace mirai { resource_async_task resource_manager::upload_resource(const std::shared_ptr& tex, const std::vector& data) { // 1. 这里的逻辑在管理器线程执行,立即派发 Submit - uint64_t completion_value = dispatch_immediate_transfer(tex, data); + // uint64_t completion_value = dispatch_immediate_transfer(tex, data); // 2. 挂起并等待 GPU // 这里的 co_await 会调用 TimelineAwaiter - co_await timeline_awaiter{get_semaphore(), completion_value}; + // co_await timeline_awaiter{get_semaphore(), completion_value}; // 3. 当管理器轮询到完成,会自动调用 handle.resume() // 代码会在这里“奇迹般”恢复,执行清理工作 MIRAI_LOG_DEBUG("上传完成,清理暂存缓冲区..."); + return {}; } uint64_t resource_manager::dispatch_immediate_transfer(std::shared_ptr tex, std::shared_ptr staging) { auto queue = device_->get_transfer_queue(); - auto cmd = device_->create_command_buffer(vk::CommandBufferLevel::ePrimary); + auto cmd = make_obj(queue); - cmd.begin(vk::CommandBufferBeginInfo{vk::CommandBufferUsageFlagBits::eOneTimeSubmit}); + auto result = cmd->begin(); + if (result != vk::Result::eSuccess) { + MIRAI_LOG_ERROR("开始命令缓冲区失败: {}", vk::to_string(result)); + throw std::runtime_error("开始命令缓冲区失败"); + } vk::ImageSubresourceRange range{vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1}; vk::ImageMemoryBarrier2 barrier{vk::PipelineStageFlagBits2::eTopOfPipe, vk::AccessFlagBits2::eNone, @@ -95,94 +101,90 @@ namespace mirai { VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, tex->get_image(), range}; - cmd.pipelineBarrier2(vk::DependencyInfo{0, nullptr, 0, nullptr, 1, &barrier}); + vk::DependencyInfo begin_barrier_info{}; + begin_barrier_info.setImageMemoryBarriers({barrier}); + cmd->pipeline_barrier(begin_barrier_info); - vk::BufferImageCopy copy_region{0, 0, 0, - vk::ImageAspectFlagBits::eColor, 0, {0, 0, 1}, - tex->get_extent()}; - cmd.copyBufferToImage(staging->buffer, tex->get_image(), vk::ImageLayout::eTransferDstOptimal, 1, ©_region); + vk::BufferImageCopy copy_region{}; + copy_region.setImageExtent(tex->get_extent()); + copy_region.setImageSubresource({vk::ImageAspectFlagBits::eColor, 0, 0, 1}); + cmd->copy_buffer_to_image(copy_region, tex->get_image(), vk::ImageLayout::eTransferDstOptimal); vk::ImageMemoryBarrier2 barrier2{vk::PipelineStageFlagBits2::eCopy, vk::AccessFlagBits2::eTransferWrite, vk::PipelineStageFlagBits2::eBottomOfPipe, vk::AccessFlagBits2::eNone, vk::ImageLayout::eTransferDstOptimal, vk::ImageLayout::eShaderReadOnlyOptimal, VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, tex->get_image(), range}; - cmd.pipelineBarrier2(vk::DependencyInfo{0, nullptr, 0, nullptr, 1, &barrier2}); + vk::DependencyInfo end_barrier_info{}; + end_barrier_info.setImageMemoryBarriers({barrier2}); + cmd->pipeline_barrier(end_barrier_info); - cmd.end(); + result = cmd->end(); + if (result != vk::Result::eSuccess) { + MIRAI_LOG_ERROR("结束命令缓冲区失败: {}", vk::to_string(result)); + throw std::runtime_error("结束命令缓冲区失败"); + } auto fence = device_->create_fence(); - vk::SubmitInfo submit_info{0, nullptr, nullptr, 1, &cmd, 0, nullptr}; - queue.submit(1, &submit_info, fence); + queue->submit({cmd->get_command_buffer()}, {}, {}); auto res = fence->device_wait(); if (res == vk::Result::eSuccess) { fence.reset(); } - device_->destroy_command_buffer(cmd); return ++last_value_; } uint64_t resource_manager::dispatch_immediate_transfer(std::shared_ptr buffer, std::shared_ptr staging) { - auto& ctx = vulkan_context::get(); - auto device = ctx.get_default_device(); - auto queue = ctx.get_transfer_queue(); - - auto cmd = device->create_command_buffer(vk::CommandBufferLevel::ePrimary); - - cmd.begin(vk::CommandBufferBeginInfo{vk::CommandBufferUsageFlagBits::eOneTimeSubmit}); - - vk::BufferCopy copy_region{0, 0, buffer->get_size()}; - cmd.copyBuffer(staging->buffer, buffer->get_buffer(), 1, ©_region); - - cmd.end(); - - auto fence = device->create_fence(); - vk::SubmitInfo submit_info{0, nullptr, nullptr, 1, &cmd, 0, nullptr}; - queue.submit(1, &submit_info, fence); - - auto result = device->wait_for_fence(fence); - if (result == file_result::success) { - device->destroy_fence(fence); - } - device->destroy_command_buffer(cmd); - + // auto queue = device_->get_transfer_queue(); + // auto cmd = make_obj(queue); + // + // auto result = cmd->begin(); + // vk::BufferCopy copy_region{0, 0, buffer->get_size()}; + // cmd->copy_buffer(staging->buffer, buffer->get_buffer(), copy_region); + // result = cmd->end(); + // + // auto fence = make_obj(device_); + // vk::SubmitInfo submit_info{0, nullptr, nullptr, 1, &cmd, 0, nullptr}; + // queue->submit(1, &submit_info, fence); + // + // result = fence->device_wait(); return ++last_value_; } void resource_manager::upload_thread_func() { - while (running_) { - uint64_t gpu_value = 0; - auto res = device_.getSemaphoreCounterValue(global_timeline_, &gpu_value); - - if (res == vk::Result::eSuccess) { - std::lock_guard lock(active_mutex_); - - for (auto it = active_indices_.begin(); it != active_indices_.end(); ) { - size_t idx = *it; - if (gpu_value >= task_pool_[idx].target_value) { - // 唤醒协程 - auto handle = task_pool_[idx].handle; - - // 先标记为不活跃,再恢复协程(防止重入问题) - task_pool_[idx].is_active.store(false); - it = active_indices_.erase(it); - - // 恢复执行协程后续的清理代码 - if (handle) handle.resume(); - } else { - ++it; - } - } - } - - // 避免 100% CPU 占用 - if (active_indices_.empty()) { - std::this_thread::sleep_for(std::chrono::milliseconds(1)); - } else { - std::this_thread::yield(); - } - } + // while (running_) { + // uint64_t gpu_value = 0; + // auto res = device_.getSemaphoreCounterValue(global_timeline_, &gpu_value); + // + // if (res == vk::Result::eSuccess) { + // std::lock_guard lock(active_mutex_); + // + // for (auto it = active_indices_.begin(); it != active_indices_.end(); ) { + // size_t idx = *it; + // if (gpu_value >= task_pool_[idx].target_value) { + // // 唤醒协程 + // auto handle = task_pool_[idx].handle; + // + // // 先标记为不活跃,再恢复协程(防止重入问题) + // task_pool_[idx].is_active.store(false); + // it = active_indices_.erase(it); + // + // // 恢复执行协程后续的清理代码 + // if (handle) handle.resume(); + // } else { + // ++it; + // } + // } + // } + // + // // 避免 100% CPU 占用 + // if (active_indices_.empty()) { + // std::this_thread::sleep_for(std::chrono::milliseconds(1)); + // } else { + // std::this_thread::yield(); + // } + // } } } diff --git a/src/gpu_resource/resource_manager.h b/src/gpu_resource/resource_manager.h index 80ac53f..0be6661 100644 --- a/src/gpu_resource/resource_manager.h +++ b/src/gpu_resource/resource_manager.h @@ -19,6 +19,19 @@ namespace mirai { std::coroutine_handle<> handle; uint64_t target_value = 0; std::atomic is_active{ false }; + + resource_upload_task() = default; + resource_upload_task(const resource_upload_task& other) noexcept { + handle = other.handle; + target_value = other.target_value; + is_active.store(other.is_active.load()); + } + auto& operator=(const resource_upload_task& other) noexcept { + handle = other.handle; + target_value = other.target_value; + is_active.store(other.is_active.load()); + return *this; + } }; struct resource_async_task { diff --git a/src/gpu_resource/resource_types_to_string.cpp b/src/gpu_resource/resource_types_to_string.cpp new file mode 100644 index 0000000..3210450 --- /dev/null +++ b/src/gpu_resource/resource_types_to_string.cpp @@ -0,0 +1,43 @@ +#include "resource_types_to_string.h" + +namespace mirai { + std::string to_string(resource_sharing_mode mode) { + switch (mode) { + case resource_sharing_mode::exclusive: + return "Exclusive"; + case resource_sharing_mode::concurrent: + return "Concurrent"; + default: + return "Unknown"; + } + } + + std::string to_string(buffer_usage usage) { + std::string result; + if (static_cast(usage) == 0) { + return "None"; + } + if ((usage & buffer_usage::vertex) == buffer_usage::vertex) { + result += "Vertex|"; + } + if ((usage & buffer_usage::index) == buffer_usage::index) { + result += "Index|"; + } + if ((usage & buffer_usage::uniform) == buffer_usage::uniform) { + result += "Uniform|"; + } + if ((usage & buffer_usage::storage) == buffer_usage::storage) { + result += "Storage|"; + } + if ((usage & buffer_usage::transfer_src) == buffer_usage::transfer_src) { + result += "TransferSrc|"; + } + if ((usage & buffer_usage::transfer_dst) == buffer_usage::transfer_dst) { + result += "TransferDst|"; + } + if (!result.empty()) { + result.pop_back(); // 移除最后的 '|' + } + return result; + } +} diff --git a/src/gpu_resource/resource_types_to_string.h b/src/gpu_resource/resource_types_to_string.h new file mode 100644 index 0000000..995670b --- /dev/null +++ b/src/gpu_resource/resource_types_to_string.h @@ -0,0 +1,9 @@ +#pragma once +#include + +#include "resource_types.h" + +namespace mirai { + std::string to_string(resource_sharing_mode mode); + std::string to_string(buffer_usage usage); +} diff --git a/src/gpu_resource/swapchain.cpp b/src/gpu_resource/swapchain.cpp index 0bc1383..a0bd155 100644 --- a/src/gpu_resource/swapchain.cpp +++ b/src/gpu_resource/swapchain.cpp @@ -84,19 +84,19 @@ namespace mirai { } void swapchain::create_image_views() { - auto default_device = vulkan_context::get().get_default_device(); - u32 image_count = 0; - auto result = default_device->get_device().getSwapchainImagesKHR(swapchain_, &image_count, images); - if (result != vk::Result::eSuccess) { - MIRAI_LOG_ERROR("获取交换链图像数量失败:{}", vk::to_string(result)); - return; - } - std::vector swapchain_images(image_count); - result = default_device->get_device().getSwapchainImagesKHR(swapchain_, &image_count, swapchain_images.data()); - if (result != vk::Result::eSuccess) { - MIRAI_LOG_ERROR("获取交换链图像失败:{}", vk::to_string(result)); - return; - } + // auto default_device = vulkan_context::get().get_default_device(); + // u32 image_count = 0; + // auto result = default_device->get_device().getSwapchainImagesKHR(swapchain_, &image_count, images); + // if (result != vk::Result::eSuccess) { + // MIRAI_LOG_ERROR("获取交换链图像数量失败:{}", vk::to_string(result)); + // return; + // } + // std::vector swapchain_images(image_count); + // result = default_device->get_device().getSwapchainImagesKHR(swapchain_, &image_count, swapchain_images.data()); + // if (result != vk::Result::eSuccess) { + // MIRAI_LOG_ERROR("获取交换链图像失败:{}", vk::to_string(result)); + // return; + // } } diff --git a/src/gpu_resource/texture/texture2d.cpp b/src/gpu_resource/texture/texture2d.cpp index 818582a..deabe79 100644 --- a/src/gpu_resource/texture/texture2d.cpp +++ b/src/gpu_resource/texture/texture2d.cpp @@ -6,24 +6,24 @@ #include "types/error.h" namespace mirai { - resource_async_task texture2d::upload(std::vector data) { - auto& manager = resource_manager::get(); - // A. 准备工作 (Staging Buffer 分配等) - auto staging = manager.allocate_staging(data.size()); - memcpy(staging->map(), data.data(), data.size()); - - // B. 立即派发 (Immediate Dispatch) - // 内部调用两次 submit,并返回本次任务的全局 target_value - uint64_t wait_val = manager.dispatch_immediate_transfer(shared_from_this(), staging); - - // C. 异步等待 - // 线程会在这里返回,直到 GPU 完成任务后被管理器唤醒 - co_await TimelineAwaiter{ manager.get_global_semaphore(), wait_val, manager }; - - // D. 恢复执行 (清理工作) - manager.free_staging(staging); - MIRAI_LOG_DEBUG("纹理上传及所有权转移完成"); - } + // resource_async_task texture2d::upload(std::vector data) { + // auto& manager = resource_manager::get(); + // // A. 准备工作 (Staging Buffer 分配等) + // auto staging = manager.allocate_staging(data.size()); + // memcpy(staging->map(), data.data(), data.size()); + // + // // B. 立即派发 (Immediate Dispatch) + // // 内部调用两次 submit,并返回本次任务的全局 target_value + // uint64_t wait_val = manager.dispatch_immediate_transfer(shared_from_this(), staging); + // + // // C. 异步等待 + // // 线程会在这里返回,直到 GPU 完成任务后被管理器唤醒 + // co_await TimelineAwaiter{ manager.get_global_semaphore(), wait_val, manager }; + // + // // D. 恢复执行 (清理工作) + // manager.free_staging(staging); + // MIRAI_LOG_DEBUG("纹理上传及所有权转移完成"); + // } vk::ImageSubresourceRange texture2d::get_full_range() const noexcept { return vk::ImageSubresourceRange{ diff --git a/src/gpu_resource/texture/texture2d.h b/src/gpu_resource/texture/texture2d.h index afdacf8..c17cbfb 100644 --- a/src/gpu_resource/texture/texture2d.h +++ b/src/gpu_resource/texture/texture2d.h @@ -5,7 +5,7 @@ namespace mirai { class texture2d : public texture { MIRAI_OBJECT_TYPE_INFO(texture2d, texture); - resource_async_task upload(std::vector data) override; + // resource_async_task upload(std::vector data) override; texture_type get_texture_type() const noexcept override { return texture_type::texture_2d; } [[nodiscard]] auto size() const { return size_; } diff --git a/src/render/vulkan_command_buffer.cpp b/src/render/vulkan_command_buffer.cpp new file mode 100644 index 0000000..6b3314f --- /dev/null +++ b/src/render/vulkan_command_buffer.cpp @@ -0,0 +1,34 @@ +#include "vulkan_command_buffer.h" + +#include "vulkan_context.h" + +namespace mirai { + vulkan_command_buffer::vulkan_command_buffer(std::shared_ptr queue, vk::CommandBufferLevel level) { + queue_ = queue; + level_ = level; + cmd_ = vulkan_thread_context::get().allocate_command_buffer(queue_, level_); + } + + vk::Result vulkan_command_buffer::begin(const vk::CommandBufferBeginInfo& info) const { + return cmd_.begin(info); + } + + void vulkan_command_buffer::pipeline_barrier(const vk::DependencyInfo& dependency_info) { + return cmd_.pipelineBarrier2(dependency_info); + } + + void vulkan_command_buffer::execute_cleanup_tasks() { + for (const auto& task : cleanup_tasks_) { + if (task.task) { + task.task(); + } + } + cleanup_tasks_.clear(); + } + + void vulkan_command_buffer::on_destroying() { + object::on_destroying(); + + vulkan_thread_context::get().free_command_buffer(queue_, cmd_); + } +} diff --git a/src/render/vulkan_command_buffer.h b/src/render/vulkan_command_buffer.h new file mode 100644 index 0000000..c518d4b --- /dev/null +++ b/src/render/vulkan_command_buffer.h @@ -0,0 +1,59 @@ +#pragma once +#include "core/object.h" +#include + + +namespace mirai { + class vulkan_queue; + + // 用于命令缓冲区完成执行后,如果需要清理资源 + struct vulkan_cmd_buffer_cleanup_task { + std::function task; + #if MIRAI_DEBUG + std::string debug_name; + #endif + }; + + /** + * 线程安全的command buffer包装器 + */ + class vulkan_command_buffer : public object { + MIRAI_OBJECT_TYPE_INFO(vulkan_command_buffer, object) + public: + vulkan_command_buffer(std::shared_ptr queue, + vk::CommandBufferLevel level = vk::CommandBufferLevel::ePrimary); + + vk::Result begin(const vk::CommandBufferBeginInfo& info = vk::CommandBufferBeginInfo{ + vk::CommandBufferUsageFlagBits::eOneTimeSubmit + }) const; + vk::Result end() const { return cmd_.end(); } + + void pipeline_barrier(const vk::DependencyInfo& dependency_info); + + void copy_buffer_to_image(const vk::BufferImageCopy& copy_region, + vk::Image image, + vk::ImageLayout layout) const { + cmd_.copyBufferToImage(vk::Buffer{}, image, layout, 1, ©_region); + } + + void copy_buffer(vk::Buffer src, vk::Buffer dst, const vk::BufferCopy& copy_region) const { + cmd_.copyBuffer(src, dst, 1, ©_region); + } + + [[nodiscard]] auto get_queue() const { return queue_; } + [[nodiscard]] auto get_command_buffer() const { return cmd_; } + [[nodiscard]] auto get_level() const { return level_; } + + void add_cleanup_task(const vulkan_cmd_buffer_cleanup_task& task) { + cleanup_tasks_.emplace_back(task); + } + void execute_cleanup_tasks(); + protected: + void on_destroying() override; + private: + std::shared_ptr queue_; + vk::CommandBuffer cmd_; + vk::CommandBufferLevel level_; + std::vector cleanup_tasks_; + }; +} diff --git a/src/render/vulkan_context.cpp b/src/render/vulkan_context.cpp index b2d2cc2..63b79b4 100644 --- a/src/render/vulkan_context.cpp +++ b/src/render/vulkan_context.cpp @@ -6,6 +6,8 @@ #include "core/logger.h" namespace mirai { + thread_local vulkan_thread_context vulkan_thread_context::instance; + void vulkan_context::init(const vulkan_context_init_config& config) { instance_ = make_obj(config.instance_config); } @@ -15,6 +17,9 @@ namespace mirai { } void vulkan_context::shutdown() { + for (const auto& d : devices_) { + d->request_destroy(); + } devices_.clear(); instance_.reset(); } @@ -52,15 +57,38 @@ namespace mirai { create_device(config); } + vk::CommandBuffer vulkan_thread_context::allocate_command_buffer(const std::shared_ptr& queue, + vk::CommandBufferLevel level) { + const auto pool = get_pool(queue); + const auto device = queue->get_device(); + vk::CommandBufferAllocateInfo alloc_info( + pool, + level, + 1 + ); + auto [result, cmd_buffers] = device.allocateCommandBuffers(alloc_info); + if (result != vk::Result::eSuccess) { + MIRAI_LOG_ERROR("分配命令缓冲区失败: {}", vk::to_string(result)); + throw std::runtime_error("分配命令缓冲区失败"); + } + return cmd_buffers[0]; + } + + void vulkan_thread_context::free_command_buffer(const std::shared_ptr& queue, vk::CommandBuffer cmd) { + const auto pool = get_pool(queue); + const auto device = queue->get_device(); + device.freeCommandBuffers(pool, 1, &cmd); + } + vk::CommandPool vulkan_thread_context::get_pool(const std::shared_ptr& queue) { const auto family_index = queue->get_family_index(); - const auto device = queue->get_device(); if (!pools_.contains(family_index)) { // 创建新池子,允许重置单个 CommandBuffer vk::CommandPoolCreateInfo info( vk::CommandPoolCreateFlagBits::eResetCommandBuffer, family_index ); + const auto device = queue->get_device(); auto [result, pool] = device.createCommandPool(info); if (result != vk::Result::eSuccess) { MIRAI_LOG_ERROR("创建命令池失败: {}", vk::to_string(result)); diff --git a/src/render/vulkan_context.h b/src/render/vulkan_context.h index 503c115..2942804 100644 --- a/src/render/vulkan_context.h +++ b/src/render/vulkan_context.h @@ -53,6 +53,8 @@ namespace mirai { return instance; } + vk::CommandBuffer allocate_command_buffer(const std::shared_ptr& queue, vk::CommandBufferLevel level = vk::CommandBufferLevel::ePrimary); + void free_command_buffer(const std::shared_ptr& queue, vk::CommandBuffer cmd); vk::CommandPool get_pool(const std::shared_ptr& queue); private: std::unordered_map pools_; diff --git a/src/render/vulkan_device.cpp b/src/render/vulkan_device.cpp index 7589d94..fe29052 100644 --- a/src/render/vulkan_device.cpp +++ b/src/render/vulkan_device.cpp @@ -143,8 +143,6 @@ namespace mirai { } MIRAI_LOG_INFO("VMA 分配器初始化完成"); - - resource_manager_ = make_obj(device_); } std::shared_ptr vulkan_device::create_fence() { @@ -159,6 +157,21 @@ namespace mirai { return make_obj(shared_this(), initial_value); } + void vulkan_device::request_destroy() { + graphics_queue_.reset(); + present_queue_.reset(); + compute_queue_.reset(); + transfer_queue_.reset(); + resource_manager_.reset(); + allocator_.reset(); + } + + void vulkan_device::on_created() { + object::on_created(); + + resource_manager_ = make_obj(shared_this()); + } + void vulkan_device::on_destroying() { object::on_destroying(); diff --git a/src/render/vulkan_device.h b/src/render/vulkan_device.h index b0cbb0a..629e27a 100644 --- a/src/render/vulkan_device.h +++ b/src/render/vulkan_device.h @@ -54,7 +54,10 @@ namespace mirai { [[nodiscard]] std::shared_ptr create_fence(); [[nodiscard]] std::shared_ptr create_semaphore(); [[nodiscard]] std::shared_ptr create_timeline_semaphore(u32 initial_value = 0); + + void request_destroy(); protected: + void on_created() override; void on_destroying() override; private: diff --git a/src/render/vulkan_queue.cpp b/src/render/vulkan_queue.cpp index c510fd8..101257b 100644 --- a/src/render/vulkan_queue.cpp +++ b/src/render/vulkan_queue.cpp @@ -1,6 +1,8 @@ #include "render/vulkan_queue.h" namespace mirai { + std::unordered_map vulkan_queue::lock_map_{}; + vulkan_queue::vulkan_queue(vk::Device device, vk::Queue queue, u32 family_index, u32 queue_index) : device_(device) , queue_(queue) , family_index_(family_index), @@ -28,11 +30,13 @@ namespace mirai { } void_result_t vulkan_graphics_queue::submit(const std::vector& command_buffers) { - vk::PipelineStageFlags waitStages[] = {vk::PipelineStageFlagBits::eColorAttachmentOutput}; - submitInfo.setWaitSemaphores(imageAvailableSemaphore); - submitInfo.setPWaitDstStageMask(waitStages); + // vk::PipelineStageFlags waitStages[] = {vk::PipelineStageFlagBits::eColorAttachmentOutput}; + // submitInfo.setWaitSemaphores(imageAvailableSemaphore); + // submitInfo.setPWaitDstStageMask(waitStages); + return {}; } void_result_t vulkan_present_queue::present(const vk::PresentInfoKHR& present_info) { + return {}; } } // namespace mirai diff --git a/src/types/error.h b/src/types/error.h index 6ab921a..6560cf9 100644 --- a/src/types/error.h +++ b/src/types/error.h @@ -86,6 +86,8 @@ enum class error_code : u32 { synchronization_error = 307, /// 资源绑定错误 resource_binding_error = 308, + /// 分配失败 + vulkan_allocation_failed = 309, // ---- 窗口/输入错误 (400-499) ---- /// 窗口创建失败