完成Buffer创建和销毁,以及直接访问模式的数据上传

This commit is contained in:
2026-01-16 20:04:17 +08:00
parent 735f1a08b6
commit d45ae1d436
27 changed files with 638 additions and 215 deletions

View File

@@ -2,8 +2,21 @@
#include "core/logger.h"
#include "core/time_util.h"
#include "gpu_resource/gpu_buffer.h"
namespace mirai {
void test_func() {
buffer_create_info info;
info.debug_name = "测试Buffer";
info.device = vulkan_context::get().get_default_device();
info.size = 64;
info.usage = buffer_usage::vertex;
auto buf = make_obj<gpu_buffer>(info);
std::array<uint8_t, 64> test_data{};
buf->upload(nullptr, std::span(test_data));
}
bool mirai_app::setup(const mirai_app_config& config) {
window_mgr_ = make_obj<window_manager>();
@@ -14,6 +27,8 @@ namespace mirai {
vulkan_context_config context_config{window_mgr_->get_main_window()->get_vk_surface()};
vulkan_context::get().setup(context_config);
test_func();
return true;
}

View File

@@ -0,0 +1,98 @@
#pragma once
#include <iostream>
#include <type_traits>
#include <utility>
#include <expected>
#include <cstring>
#include <functional>
template <typename Signature, size_t MaxSize = 32>
class fixed_size_function;
template <typename Ret, typename... Args, size_t MaxSize>
class fixed_size_function<Ret(Args...), MaxSize> {
private:
// 对齐缓冲区
alignas(std::max_align_t) std::byte buffer[MaxSize];
struct v_table {
Ret (*call)(const void*, Args&&...);
void (*destroy)(void*);
void (*move)(void* dest, void* src);
};
const v_table* vtable = nullptr;
template <typename F>
static Ret call_impl(const void* ptr, Args&&... args) {
// 使用 const_cast 仅在必要时(如果 F 的 operator() 是非 const 的)
// 实际上 std::function 默认也是要求 F 是可调用的
return (*static_cast<const F*>(ptr))(std::forward<Args>(args)...);
}
template <typename F>
static void destroy_impl(void* ptr) {
if constexpr (!std::is_trivially_destructible_v<F>) {
std::destroy_at(static_cast<F*>(ptr)); // C++17/20 更好的写法
}
}
template <typename F>
static void move_impl(void* dest, void* src) {
if constexpr (std::is_trivially_copyable_v<F>) {
std::memcpy(dest, src, sizeof(F));
} else {
new (dest) F(std::move(*static_cast<F*>(src)));
}
}
template <typename F>
static constexpr v_table vtable_for = { call_impl<F>, destroy_impl<F>, move_impl<F> };
public:
fixed_size_function() noexcept = default;
// 使用 C++20 constraints (requires) 替代 std::enable_if
template <typename F>
requires (!std::is_same_v<std::remove_cvref_t<F>, fixed_size_function> &&
std::is_invocable_r_v<Ret, F, Args...>)
fixed_size_function(F&& f) {
using functor = std::remove_cvref_t<F>;
static_assert(sizeof(functor) <= MaxSize, "函数体过大");
new (buffer) functor(std::forward<F>(f));
vtable = &vtable_for<functor>;
}
~fixed_size_function() {
if (vtable) vtable->destroy(buffer);
}
// 移动构造
fixed_size_function(fixed_size_function&& other) noexcept {
if (other.vtable) {
other.vtable->move(buffer, other.buffer);
vtable = other.vtable;
other.vtable = nullptr;
}
}
// C++23 deducing this 优化调用逻辑
// 我们可以捕捉当前的 const 属性和值类别
template <typename Self>
Ret operator()(this Self&& self, Args... args) {
if (!self.vtable) [[unlikely]] { // C++20 属性优化分支预测
throw std::bad_function_call();
}
return self.vtable->call(self.buffer, std::forward<Args>(args)...);
}
// C++23 可以考虑返回 std::expected 来避免异常
// 这里为了保持 operator() 习惯保留了异常,但可以增加 try_call
std::expected<Ret, std::string_view> try_call(Args... args) const {
if (!vtable) return std::unexpected("空函数");
return vtable->call(buffer, std::forward<Args>(args)...);
}
explicit operator bool() const noexcept { return vtable != nullptr; }
};

View File

@@ -45,14 +45,18 @@ namespace mirai {
struct logger_config {
std::string name{"mirai"};
#if MIRAI_DEBUG
log_level level{log_level::trace};
#else
log_level level{log_level::info};
#endif
bool console_enabled{true};
bool file_enabled{true};
std::filesystem::path file_path{"logs/mirai.log"};
bool file_rotation_enabled{true};
size_type max_file_size{10 * 1024 * 1024}; // 10 MB
size_type max_files{5};
std::string pattern{"[%Y-%m-%d %H:%M:%S.%e] [%^%l%$] [%t] %v"};
std::string pattern{"[%Y-%m-%d %H:%M:%S.%e] [%^%l%$] %v"};
bool include_source_location{true};
bool async_enabled{false};
size_type async_queue_size{8192};

View File

@@ -218,11 +218,11 @@ namespace mirai {
[[nodiscard]] constexpr virtual const mirai::type_info* get_parent_type() const noexcept override { \
return &mirai::get_type_info<parent_t>(); \
} \
[[nodiscard]] constexpr auto shared_this() noexcept -> std::shared_ptr<class_name> { \
return std::static_pointer_cast<class_name>(mirai::object::shared_from_this()); \
[[nodiscard]] auto shared_this() noexcept -> std::shared_ptr<class_name> { \
return std::static_pointer_cast<class_name>(shared_from_this()); \
} \
[[nodiscard]] constexpr auto shared_this() const noexcept -> std::shared_ptr<const class_name> { \
return std::static_pointer_cast<const class_name>(mirai::object::shared_from_this()); \
[[nodiscard]] auto shared_this() const noexcept -> std::shared_ptr<const class_name> { \
return std::static_pointer_cast<const class_name>(shared_from_this()); \
} \
friend struct mirai::object_factory; \
template<typename T_> \

View File

@@ -6,6 +6,7 @@
#include <vk_mem_alloc.hpp>
namespace mirai {
class vulkan_device;
class vulkan_queue;
/**
@@ -22,10 +23,10 @@ namespace mirai {
vk::DeviceMemory device_memory{};
/// 映射的指针(如果已映射)
void* mapped_data = nullptr;
/// 是否为直接访问内存
bool is_direct_access = false;
/// 内存类型索引
u32 memory_type_index = 0;
/// 是否为专用分配
bool is_dedicated = false;
};
/**
@@ -36,14 +37,14 @@ namespace mirai {
u64 size = 0;
/// Buffer 用途
buffer_usage usage = buffer_usage::vertex;
/// 内存用途
memory_usage mem_usage = memory_usage::gpu_only;
/// 资源共享模式
resource_sharing_mode sharing_mode = resource_sharing_mode::exclusive;
/// 是否持久映射
bool persistent_mapped = false;
/// 设备
std::shared_ptr<vulkan_device> device{};
#if MIRAI_DEBUG
/// 调试名称
std::string debug_name;
#endif
};
struct buffer_allocation_info {
/// Buffer 大小(字节)
@@ -68,6 +69,9 @@ namespace mirai {
vma::Allocation allocation = VK_NULL_HANDLE;
/// 分配信息
allocation_info info;
#if MIRAI_DEBUG
std::string debug_name;
#endif
/// 是否有效
[[nodiscard]] bool is_valid() const noexcept {
return buffer && allocation;

View File

@@ -82,53 +82,13 @@ namespace mirai {
vma_allocator_ = result.value();
}
vma::MemoryUsage vma_allocator::to_vma_memory_usage(memory_usage usage) noexcept {
switch (usage) {
case memory_usage::gpu_only:
return vma::MemoryUsage::eGpuOnly;
case memory_usage::cpu_only:
return vma::MemoryUsage::eCpuOnly;
case memory_usage::cpu_to_gpu:
return vma::MemoryUsage::eCpuToGpu;
case memory_usage::gpu_to_cpu:
return vma::MemoryUsage::eGpuToCpu;
case memory_usage::auto_prefer_device:
return vma::MemoryUsage::eAutoPreferDevice;
case memory_usage::auto_prefer_host:
return vma::MemoryUsage::eAutoPreferHost;
default:
return vma::MemoryUsage::eAuto;
}
}
vma::AllocationCreateFlags
vma_allocator::to_vma_allocation_flags(memory_usage usage, bool persistent_mapped) noexcept {
vma::AllocationCreateFlags flags;
// 持久映射
if (persistent_mapped) {
flags |= vma::AllocationCreateFlagBits::eMapped;
}
// 根据内存用途设置访问模式
switch (usage) {
case memory_usage::cpu_only:
case memory_usage::cpu_to_gpu:
flags |= vma::AllocationCreateFlagBits::eHostAccessSequentialWrite;
break;
case memory_usage::gpu_to_cpu:
flags |= vma::AllocationCreateFlagBits::eHostAccessRandom;
break;
default:
break;
}
return flags;
}
result_t<buffer_allocation> vma_allocator::alloc_buffer(const buffer_create_info& info) {
buffer_allocation alloc;
#if MIRAI_DEBUG
alloc.debug_name = info.debug_name;
#endif
vk::BufferCreateInfo buffer_info{};
buffer_info.size = info.size;
buffer_info.usage = to_vulkan_buffer_usage(info.usage);
@@ -139,17 +99,91 @@ namespace mirai {
}
vma::AllocationCreateInfo alloc_info{};
alloc_info.usage = to_vma_memory_usage(info.mem_usage);
alloc_info.flags = to_vma_allocation_flags(info.mem_usage, info.persistent_mapped);
vma::AllocationInfo vma_alloc_info{};
vk::Result vk_result;
vk::Buffer vk_buffer;
alloc_info.usage = vma::MemoryUsage::eAuto;
alloc_info.flags = vma::AllocationCreateFlagBits::eHostAccessSequentialWrite;
alloc_info.flags |= vma::AllocationCreateFlagBits::eMapped;
alloc_info.preferredFlags = vk::MemoryPropertyFlagBits::eDeviceLocal;
{
std::lock_guard lock(mutex_);
// vk_result = vma::UniqueBuffer(vma_allocator_, &buffer_info, &alloc_info, reinterpret_cast<VkBuffer*>(&vk_buffer),
// reinterpret_cast<VmaAllocation*>(&alloc.allocation), &vma_alloc_info);
auto [result, pair] = vma_allocator_.createBuffer(buffer_info, alloc_info);
if (result != vk::Result::eSuccess) {
return MAKE_ERROR_INFO(error_code::vulkan_allocation_failed,
"VMA 分配 Buffer 失败: {}",
vk::to_string(result));
}
const auto& [allocation, buffer] = pair;
alloc.buffer = buffer;
alloc.allocation = allocation;
}
auto res_info = vma_allocator_.getAllocationInfo(alloc.allocation);
auto mem_flags = vma_allocator_.getAllocationMemoryProperties(alloc.allocation);
auto is_direct_access = static_cast<bool>(mem_flags & vk::MemoryPropertyFlagBits::eDeviceLocal);
// 获取映射好的指针(因为加了 eMapped 标志)
const auto mapped_ptr = vma_allocator_.getAllocationInfo(alloc.allocation).pMappedData;
alloc.info.size = res_info.size;
alloc.info.offset = res_info.offset;
alloc.info.device_memory = res_info.deviceMemory;
alloc.info.mapped_data = mapped_ptr;
alloc.info.is_direct_access = is_direct_access;
alloc.info.memory_type_index = res_info.memoryType;
return alloc;
}
result_t<buffer_allocation> vma_allocator::alloc_staging_buffer(const buffer_create_info& info) {
buffer_allocation alloc;
vma::AllocationCreateInfo alloc_info{};
alloc_info.usage = vma::MemoryUsage::eAuto;
alloc_info.flags = vma::AllocationCreateFlagBits::eHostAccessSequentialWrite;
alloc_info.flags |= vma::AllocationCreateFlagBits::eMapped;
alloc_info.preferredFlags = vk::MemoryPropertyFlagBits::eHostVisible;
vk::BufferCreateInfo buffer_create_info{};
buffer_create_info.setSize(info.size);
buffer_create_info.setUsage(vk::BufferUsageFlagBits::eTransferSrc);
{
auto [result, pair] = vma_allocator_.createBuffer(buffer_create_info, alloc_info);
if (result != vk::Result::eSuccess) {
return MAKE_ERROR_INFO(error_code::vulkan_allocation_failed,
"VMA 分配暂存 Buffer 失败: {}",
vk::to_string(result));
}
const auto& [allocation, buffer] = pair;
alloc.buffer = buffer;
alloc.allocation = allocation;
}
auto res_info = vma_allocator_.getAllocationInfo(alloc.allocation);
auto mem_flags = vma_allocator_.getAllocationMemoryProperties(alloc.allocation);
auto is_direct_access = static_cast<bool>(mem_flags & vk::MemoryPropertyFlagBits::eHostVisible);
// 获取映射好的指针(因为加了 eMapped 标志)
const auto mapped_ptr = vma_allocator_.getAllocationInfo(alloc.allocation).pMappedData;
alloc.info.size = res_info.size;
alloc.info.offset = res_info.offset;
alloc.info.device_memory = res_info.deviceMemory;
alloc.info.mapped_data = mapped_ptr;
alloc.info.is_direct_access = is_direct_access;
alloc.info.memory_type_index = res_info.memoryType;
return alloc;
}
void_result_t vma_allocator::free_buffer(const buffer_allocation& buffer) {
{
std::lock_guard lock(mutex_);
vma_allocator_.destroyBuffer(buffer.buffer, buffer.allocation);
}
return {};
}
void_result_t vma_allocator::free_staging_buffer(const buffer_allocation& buffer) {
{
std::lock_guard lock(mutex_);
vma_allocator_.destroyBuffer(buffer.buffer, buffer.allocation);
}
return {};
}

View File

@@ -42,10 +42,10 @@ namespace mirai {
void setup(const allocator_config& config);
vma::MemoryUsage to_vma_memory_usage(memory_usage usage) noexcept;
vma::AllocationCreateFlags to_vma_allocation_flags(memory_usage usage, bool persistent_mapped) noexcept;
result_t<buffer_allocation> alloc_buffer(const buffer_create_info& info);
result_t<buffer_allocation> alloc_staging_buffer(const buffer_create_info& info);
void_result_t free_buffer(const buffer_allocation& buffer);
void_result_t free_staging_buffer(const buffer_allocation& buffer);
protected:
void on_destroying() override;
private:

View File

@@ -1,21 +1,85 @@
#include "gpu_buffer.h"
#include "allocator.h"
#include "core/logger.h"
#include "render/vulkan_command_buffer.h"
#include "render/vulkan_device.h"
#include "resource_types_to_string.h"
namespace mirai {
gpu_buffer::gpu_buffer(const buffer_create_info& info) {
vma_allocator::get();
device_ = info.device;
auto allocator = device_->get_allocator();
auto buffer = allocator->alloc_buffer(info);
if (!buffer) {
MIRAI_LOG_ERROR("分配 GPU Buffer 失败: {}", buffer.error().full_description());
return;
}
allocation_ = buffer.value();
#if MIRAI_DEBUG
MIRAI_LOG_DEBUG("创建 GPU Buffer {}: 大小={} 字节, 用途={}, 共享模式={}",
info.debug_name,
allocation_.info.size,
to_string(info.usage),
to_string(info.sharing_mode)
);
#endif
}
bool gpu_buffer::is_host_visible() const noexcept {
switch (mem_usage_) {
case memory_usage::cpu_only:
case memory_usage::cpu_to_gpu:
case memory_usage::gpu_to_cpu:
case memory_usage::auto_prefer_host:
return true;
default:
return false;
gpu_buffer::~gpu_buffer() {
if (allocation_.is_valid()) {
auto allocator = device_->get_allocator();
auto result = allocator->free_buffer(allocation_);
if (!result) {
MIRAI_LOG_ERROR("释放 GPU Buffer 失败: {}", result.error().full_description());
}
#if MIRAI_DEBUG
MIRAI_LOG_DEBUG("销毁 GPU Buffer {},大小={} 字节", allocation_.debug_name, allocation_.info.size);
#endif
}
}
void gpu_buffer::upload(std::shared_ptr<vulkan_command_buffer> cmd, std::span<uint8_t> data) {
// 如果开启了Resizable Bar则可以直接访问显存
if (allocation_.info.is_direct_access) {
auto mapped_ptr = static_cast<uint8_t*>(allocation_.info.mapped_data);
memcpy(mapped_ptr, data.data(), std::min<u64>(allocation_.info.size, data.size()));
} else {
// 使用暂存缓冲区上传数据
auto staging_buffer_info = buffer_create_info{};
staging_buffer_info.size = allocation_.info.size;
staging_buffer_info.usage = buffer_usage::transfer_src;
staging_buffer_info.sharing_mode = resource_sharing_mode::concurrent;
staging_buffer_info.device = device_;
auto allocator = device_->get_allocator();
auto staging_buffer_alloc = allocator->alloc_staging_buffer(staging_buffer_info);
if (!staging_buffer_alloc) {
MIRAI_LOG_ERROR("分配暂存 Buffer 失败: {}", staging_buffer_alloc.error().full_description());
return;
}
const auto& staging_buffer = staging_buffer_alloc.value();
// 拷贝数据到暂存缓冲区
auto mapped_ptr = static_cast<uint8_t*>(staging_buffer.info.mapped_data);
memcpy(mapped_ptr, data.data(), std::min<u64>(staging_buffer.info.size, data.size()));
// 记录命令将数据从暂存缓冲区复制到目标缓冲区
cmd->copy_buffer(staging_buffer.buffer, allocation_.buffer, allocation_.info.size);
vulkan_cmd_buffer_cleanup_task cleanup_task{};
#if MIRAI_DEBUG
cleanup_task.debug_name = "GPU Buffer Upload任务释放Staging Buffer ";
#endif
cleanup_task.task = [allocator, staging_buffer] {
// 释放暂存缓冲区
auto free_result = allocator->free_staging_buffer(staging_buffer);
if (!free_result) {
MIRAI_LOG_ERROR("释放暂存 Buffer 失败: {}", free_result.error().full_description());
}
};
cmd->add_cleanup_task(cleanup_task);
}
}
}

View File

@@ -9,29 +9,16 @@ namespace mirai {
MIRAI_OBJECT_TYPE_INFO(gpu_buffer, gpu_resource)
gpu_buffer(const buffer_create_info& info);
~gpu_buffer() override;
void upload(std::shared_ptr<vulkan_command_buffer> cmd, std::span<uint8_t> data) override;
[[nodiscard]] auto get_allocation_info() const noexcept { return allocation_.info; }
[[nodiscard]] auto get_buffer() const noexcept { return allocation_.buffer; }
[[nodiscard]] u64 get_size() const noexcept { return size_; }
[[nodiscard]] buffer_usage get_usage() const noexcept { return usage_; }
[[nodiscard]] memory_usage get_memory_usage() const noexcept { return mem_usage_; }
[[nodiscard]] bool is_persistent_mapped() const noexcept { return persistent_mapped_; }
[[nodiscard]] void* get_mapped_data() const noexcept { return mapped_data_; }
[[nodiscard]] auto get_size() const noexcept { return allocation_.info.size; }
private:
[[nodiscard]] bool is_host_visible() const noexcept;
buffer_allocation allocation_;
// buffer申请大小
u64 size_ = 0;
// buffer用途
buffer_usage usage_ = buffer_usage::vertex;
// 内存用途
memory_usage mem_usage_ = memory_usage::gpu_only;
// 是否持久映射
bool persistent_mapped_ = false;
// 映射的指针
void* mapped_data_ = nullptr;
// 调试名称
std::string debug_name_;
std::shared_ptr<vulkan_device> device_;
};
}

View File

@@ -3,7 +3,10 @@
#include "resource_types.h"
#include "core/object.h"
#include <future>
namespace mirai {
class vulkan_command_buffer;
class vulkan_queue;
class gpu_resource : public object {
@@ -12,9 +15,11 @@ namespace mirai {
// 如果没有传入任何参数,则代表资源使用并发(concurrent)模式
gpu_resource() : sharing_mode_(resource_sharing_mode::concurrent) {}
// 如果传入队列,则代表资源使用独占(exclusive)模式
gpu_resource(std::shared_ptr<vulkan_queue> queue) : sharing_mode_(resource_sharing_mode::exclusive), queue_(queue) {}
gpu_resource(const std::shared_ptr<vulkan_queue>& queue) : sharing_mode_(resource_sharing_mode::exclusive),
queue_(queue) {
}
virtual resource_async_task upload(std::vector<uint8_t> data) = 0;
virtual void upload(std::shared_ptr<vulkan_command_buffer> cmd, std::span<uint8_t> data) = 0;
[[nodiscard]] auto get_sharing_mode() const noexcept { return sharing_mode_; }
[[nodiscard]] auto get_queue() const noexcept { return queue_.lock(); }

View File

@@ -4,23 +4,23 @@
namespace mirai {
render_target::render_target(vk::Image image) {
image_ref_ = image;
// image_ref_ = image;
auto default_device = vulkan_context::get().get_default_device();
// 创建图像视图
vk::ImageViewCreateInfo view_info{};
view_info.setImage(image_ref_);
view_info.setViewType(vk::ImageViewType::e2D);
view_info.setFormat(vk::Format::eB8G8R8A8Srgb);
// auto default_device = vulkan_context::get().get_default_device();
// // 创建图像视图
// vk::ImageViewCreateInfo view_info{};
// view_info.setImage(image_ref_);
// view_info.setViewType(vk::ImageViewType::e2D);
// view_info.setFormat(vk::Format::eB8G8R8A8Srgb);
}
void render_target::resize(vec2i size) {
auto default_device = vulkan_context::get().get_default_device();
default_device->get_device().waitIdle();
// default_device->get_device().waitIdle();
}
vec2i render_target::get_size() const noexcept {
return vec2i{800, 600};
}
}

View File

@@ -17,7 +17,7 @@ namespace mirai {
void resize(vec2i size);
vec2i get_size() const noexcept;
[[nodiscard]] vk::ImageView get_image_view() const noexcept { return image_view_; }
// [[nodiscard]] vk::ImageView get_image_view() const noexcept { return image_view_; }
[[nodiscard]] vk::Framebuffer get_framebuffer() const noexcept { return framebuffer_; }
protected:
void on_destroying() override;

View File

@@ -8,6 +8,7 @@
#include "render/vulkan_device.h"
#include "gpu_resource/texture/texture.h"
#include "gpu_resource/gpu_buffer.h"
#include "render/vulkan_command_buffer.h"
#include "render/vulkan_fence.h"
#include "render/vulkan_time_semaphore.h"
@@ -71,22 +72,27 @@ namespace mirai {
resource_async_task resource_manager::upload_resource(const std::shared_ptr<texture>& tex, const std::vector<uint8_t>& data) {
// 1. 这里的逻辑在管理器线程执行,立即派发 Submit
uint64_t completion_value = dispatch_immediate_transfer(tex, data);
// uint64_t completion_value = dispatch_immediate_transfer(tex, data);
// 2. 挂起并等待 GPU
// 这里的 co_await 会调用 TimelineAwaiter
co_await timeline_awaiter{get_semaphore(), completion_value};
// co_await timeline_awaiter{get_semaphore(), completion_value};
// 3. 当管理器轮询到完成,会自动调用 handle.resume()
// 代码会在这里“奇迹般”恢复,执行清理工作
MIRAI_LOG_DEBUG("上传完成,清理暂存缓冲区...");
return {};
}
uint64_t resource_manager::dispatch_immediate_transfer(std::shared_ptr<texture> tex, std::shared_ptr<staging_buffer> staging) {
auto queue = device_->get_transfer_queue();
auto cmd = device_->create_command_buffer(vk::CommandBufferLevel::ePrimary);
auto cmd = make_obj<vulkan_command_buffer>(queue);
cmd.begin(vk::CommandBufferBeginInfo{vk::CommandBufferUsageFlagBits::eOneTimeSubmit});
auto result = cmd->begin();
if (result != vk::Result::eSuccess) {
MIRAI_LOG_ERROR("开始命令缓冲区失败: {}", vk::to_string(result));
throw std::runtime_error("开始命令缓冲区失败");
}
vk::ImageSubresourceRange range{vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1};
vk::ImageMemoryBarrier2 barrier{vk::PipelineStageFlagBits2::eTopOfPipe, vk::AccessFlagBits2::eNone,
@@ -95,94 +101,90 @@ namespace mirai {
VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED,
tex->get_image(), range};
cmd.pipelineBarrier2(vk::DependencyInfo{0, nullptr, 0, nullptr, 1, &barrier});
vk::DependencyInfo begin_barrier_info{};
begin_barrier_info.setImageMemoryBarriers({barrier});
cmd->pipeline_barrier(begin_barrier_info);
vk::BufferImageCopy copy_region{0, 0, 0,
vk::ImageAspectFlagBits::eColor, 0, {0, 0, 1},
tex->get_extent()};
cmd.copyBufferToImage(staging->buffer, tex->get_image(), vk::ImageLayout::eTransferDstOptimal, 1, &copy_region);
vk::BufferImageCopy copy_region{};
copy_region.setImageExtent(tex->get_extent());
copy_region.setImageSubresource({vk::ImageAspectFlagBits::eColor, 0, 0, 1});
cmd->copy_buffer_to_image(copy_region, tex->get_image(), vk::ImageLayout::eTransferDstOptimal);
vk::ImageMemoryBarrier2 barrier2{vk::PipelineStageFlagBits2::eCopy, vk::AccessFlagBits2::eTransferWrite,
vk::PipelineStageFlagBits2::eBottomOfPipe, vk::AccessFlagBits2::eNone,
vk::ImageLayout::eTransferDstOptimal, vk::ImageLayout::eShaderReadOnlyOptimal,
VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED,
tex->get_image(), range};
cmd.pipelineBarrier2(vk::DependencyInfo{0, nullptr, 0, nullptr, 1, &barrier2});
vk::DependencyInfo end_barrier_info{};
end_barrier_info.setImageMemoryBarriers({barrier2});
cmd->pipeline_barrier(end_barrier_info);
cmd.end();
result = cmd->end();
if (result != vk::Result::eSuccess) {
MIRAI_LOG_ERROR("结束命令缓冲区失败: {}", vk::to_string(result));
throw std::runtime_error("结束命令缓冲区失败");
}
auto fence = device_->create_fence();
vk::SubmitInfo submit_info{0, nullptr, nullptr, 1, &cmd, 0, nullptr};
queue.submit(1, &submit_info, fence);
queue->submit({cmd->get_command_buffer()}, {}, {});
auto res = fence->device_wait();
if (res == vk::Result::eSuccess) {
fence.reset();
}
device_->destroy_command_buffer(cmd);
return ++last_value_;
}
uint64_t resource_manager::dispatch_immediate_transfer(std::shared_ptr<gpu_buffer> buffer, std::shared_ptr<staging_buffer> staging) {
auto& ctx = vulkan_context::get();
auto device = ctx.get_default_device();
auto queue = ctx.get_transfer_queue();
auto cmd = device->create_command_buffer(vk::CommandBufferLevel::ePrimary);
cmd.begin(vk::CommandBufferBeginInfo{vk::CommandBufferUsageFlagBits::eOneTimeSubmit});
vk::BufferCopy copy_region{0, 0, buffer->get_size()};
cmd.copyBuffer(staging->buffer, buffer->get_buffer(), 1, &copy_region);
cmd.end();
auto fence = device->create_fence();
vk::SubmitInfo submit_info{0, nullptr, nullptr, 1, &cmd, 0, nullptr};
queue.submit(1, &submit_info, fence);
auto result = device->wait_for_fence(fence);
if (result == file_result::success) {
device->destroy_fence(fence);
}
device->destroy_command_buffer(cmd);
// auto queue = device_->get_transfer_queue();
// auto cmd = make_obj<vulkan_command_buffer>(queue);
//
// auto result = cmd->begin();
// vk::BufferCopy copy_region{0, 0, buffer->get_size()};
// cmd->copy_buffer(staging->buffer, buffer->get_buffer(), copy_region);
// result = cmd->end();
//
// auto fence = make_obj<vulkan_fence>(device_);
// vk::SubmitInfo submit_info{0, nullptr, nullptr, 1, &cmd, 0, nullptr};
// queue->submit(1, &submit_info, fence);
//
// result = fence->device_wait();
return ++last_value_;
}
void resource_manager::upload_thread_func() {
while (running_) {
uint64_t gpu_value = 0;
auto res = device_.getSemaphoreCounterValue(global_timeline_, &gpu_value);
if (res == vk::Result::eSuccess) {
std::lock_guard lock(active_mutex_);
for (auto it = active_indices_.begin(); it != active_indices_.end(); ) {
size_t idx = *it;
if (gpu_value >= task_pool_[idx].target_value) {
// 唤醒协程
auto handle = task_pool_[idx].handle;
// 先标记为不活跃,再恢复协程(防止重入问题)
task_pool_[idx].is_active.store(false);
it = active_indices_.erase(it);
// 恢复执行协程后续的清理代码
if (handle) handle.resume();
} else {
++it;
}
}
}
// 避免 100% CPU 占用
if (active_indices_.empty()) {
std::this_thread::sleep_for(std::chrono::milliseconds(1));
} else {
std::this_thread::yield();
}
}
// while (running_) {
// uint64_t gpu_value = 0;
// auto res = device_.getSemaphoreCounterValue(global_timeline_, &gpu_value);
//
// if (res == vk::Result::eSuccess) {
// std::lock_guard lock(active_mutex_);
//
// for (auto it = active_indices_.begin(); it != active_indices_.end(); ) {
// size_t idx = *it;
// if (gpu_value >= task_pool_[idx].target_value) {
// // 唤醒协程
// auto handle = task_pool_[idx].handle;
//
// // 先标记为不活跃,再恢复协程(防止重入问题)
// task_pool_[idx].is_active.store(false);
// it = active_indices_.erase(it);
//
// // 恢复执行协程后续的清理代码
// if (handle) handle.resume();
// } else {
// ++it;
// }
// }
// }
//
// // 避免 100% CPU 占用
// if (active_indices_.empty()) {
// std::this_thread::sleep_for(std::chrono::milliseconds(1));
// } else {
// std::this_thread::yield();
// }
// }
}
}

View File

@@ -19,6 +19,19 @@ namespace mirai {
std::coroutine_handle<> handle;
uint64_t target_value = 0;
std::atomic<bool> is_active{ false };
resource_upload_task() = default;
resource_upload_task(const resource_upload_task& other) noexcept {
handle = other.handle;
target_value = other.target_value;
is_active.store(other.is_active.load());
}
auto& operator=(const resource_upload_task& other) noexcept {
handle = other.handle;
target_value = other.target_value;
is_active.store(other.is_active.load());
return *this;
}
};
struct resource_async_task {

View File

@@ -0,0 +1,43 @@
#include "resource_types_to_string.h"
namespace mirai {
std::string to_string(resource_sharing_mode mode) {
switch (mode) {
case resource_sharing_mode::exclusive:
return "Exclusive";
case resource_sharing_mode::concurrent:
return "Concurrent";
default:
return "Unknown";
}
}
std::string to_string(buffer_usage usage) {
std::string result;
if (static_cast<u32>(usage) == 0) {
return "None";
}
if ((usage & buffer_usage::vertex) == buffer_usage::vertex) {
result += "Vertex|";
}
if ((usage & buffer_usage::index) == buffer_usage::index) {
result += "Index|";
}
if ((usage & buffer_usage::uniform) == buffer_usage::uniform) {
result += "Uniform|";
}
if ((usage & buffer_usage::storage) == buffer_usage::storage) {
result += "Storage|";
}
if ((usage & buffer_usage::transfer_src) == buffer_usage::transfer_src) {
result += "TransferSrc|";
}
if ((usage & buffer_usage::transfer_dst) == buffer_usage::transfer_dst) {
result += "TransferDst|";
}
if (!result.empty()) {
result.pop_back(); // 移除最后的 '|'
}
return result;
}
}

View File

@@ -0,0 +1,9 @@
#pragma once
#include <string>
#include "resource_types.h"
namespace mirai {
std::string to_string(resource_sharing_mode mode);
std::string to_string(buffer_usage usage);
}

View File

@@ -84,19 +84,19 @@ namespace mirai {
}
void swapchain::create_image_views() {
auto default_device = vulkan_context::get().get_default_device();
u32 image_count = 0;
auto result = default_device->get_device().getSwapchainImagesKHR(swapchain_, &image_count, images);
if (result != vk::Result::eSuccess) {
MIRAI_LOG_ERROR("获取交换链图像数量失败:{}", vk::to_string(result));
return;
}
std::vector<vk::Image> swapchain_images(image_count);
result = default_device->get_device().getSwapchainImagesKHR(swapchain_, &image_count, swapchain_images.data());
if (result != vk::Result::eSuccess) {
MIRAI_LOG_ERROR("获取交换链图像失败:{}", vk::to_string(result));
return;
}
// auto default_device = vulkan_context::get().get_default_device();
// u32 image_count = 0;
// auto result = default_device->get_device().getSwapchainImagesKHR(swapchain_, &image_count, images);
// if (result != vk::Result::eSuccess) {
// MIRAI_LOG_ERROR("获取交换链图像数量失败:{}", vk::to_string(result));
// return;
// }
// std::vector<vk::Image> swapchain_images(image_count);
// result = default_device->get_device().getSwapchainImagesKHR(swapchain_, &image_count, swapchain_images.data());
// if (result != vk::Result::eSuccess) {
// MIRAI_LOG_ERROR("获取交换链图像失败:{}", vk::to_string(result));
// return;
// }
}

View File

@@ -6,24 +6,24 @@
#include "types/error.h"
namespace mirai {
resource_async_task texture2d::upload(std::vector<uint8_t> data) {
auto& manager = resource_manager::get();
// A. 准备工作 (Staging Buffer 分配等)
auto staging = manager.allocate_staging(data.size());
memcpy(staging->map(), data.data(), data.size());
// B. 立即派发 (Immediate Dispatch)
// 内部调用两次 submit并返回本次任务的全局 target_value
uint64_t wait_val = manager.dispatch_immediate_transfer(shared_from_this(), staging);
// C. 异步等待
// 线程会在这里返回,直到 GPU 完成任务后被管理器唤醒
co_await TimelineAwaiter{ manager.get_global_semaphore(), wait_val, manager };
// D. 恢复执行 (清理工作)
manager.free_staging(staging);
MIRAI_LOG_DEBUG("纹理上传及所有权转移完成");
}
// resource_async_task texture2d::upload(std::vector<uint8_t> data) {
// auto& manager = resource_manager::get();
// // A. 准备工作 (Staging Buffer 分配等)
// auto staging = manager.allocate_staging(data.size());
// memcpy(staging->map(), data.data(), data.size());
//
// // B. 立即派发 (Immediate Dispatch)
// // 内部调用两次 submit并返回本次任务的全局 target_value
// uint64_t wait_val = manager.dispatch_immediate_transfer(shared_from_this(), staging);
//
// // C. 异步等待
// // 线程会在这里返回,直到 GPU 完成任务后被管理器唤醒
// co_await TimelineAwaiter{ manager.get_global_semaphore(), wait_val, manager };
//
// // D. 恢复执行 (清理工作)
// manager.free_staging(staging);
// MIRAI_LOG_DEBUG("纹理上传及所有权转移完成");
// }
vk::ImageSubresourceRange texture2d::get_full_range() const noexcept {
return vk::ImageSubresourceRange{

View File

@@ -5,7 +5,7 @@ namespace mirai {
class texture2d : public texture {
MIRAI_OBJECT_TYPE_INFO(texture2d, texture);
resource_async_task upload(std::vector<uint8_t> data) override;
// resource_async_task upload(std::vector<uint8_t> data) override;
texture_type get_texture_type() const noexcept override { return texture_type::texture_2d; }
[[nodiscard]] auto size() const { return size_; }

View File

@@ -0,0 +1,34 @@
#include "vulkan_command_buffer.h"
#include "vulkan_context.h"
namespace mirai {
vulkan_command_buffer::vulkan_command_buffer(std::shared_ptr<vulkan_queue> queue, vk::CommandBufferLevel level) {
queue_ = queue;
level_ = level;
cmd_ = vulkan_thread_context::get().allocate_command_buffer(queue_, level_);
}
vk::Result vulkan_command_buffer::begin(const vk::CommandBufferBeginInfo& info) const {
return cmd_.begin(info);
}
void vulkan_command_buffer::pipeline_barrier(const vk::DependencyInfo& dependency_info) {
return cmd_.pipelineBarrier2(dependency_info);
}
void vulkan_command_buffer::execute_cleanup_tasks() {
for (const auto& task : cleanup_tasks_) {
if (task.task) {
task.task();
}
}
cleanup_tasks_.clear();
}
void vulkan_command_buffer::on_destroying() {
object::on_destroying();
vulkan_thread_context::get().free_command_buffer(queue_, cmd_);
}
}

View File

@@ -0,0 +1,59 @@
#pragma once
#include "core/object.h"
#include <vulkan/vulkan.hpp>
namespace mirai {
class vulkan_queue;
// 用于命令缓冲区完成执行后,如果需要清理资源
struct vulkan_cmd_buffer_cleanup_task {
std::function<void()> task;
#if MIRAI_DEBUG
std::string debug_name;
#endif
};
/**
* 线程安全的command buffer包装器
*/
class vulkan_command_buffer : public object {
MIRAI_OBJECT_TYPE_INFO(vulkan_command_buffer, object)
public:
vulkan_command_buffer(std::shared_ptr<vulkan_queue> queue,
vk::CommandBufferLevel level = vk::CommandBufferLevel::ePrimary);
vk::Result begin(const vk::CommandBufferBeginInfo& info = vk::CommandBufferBeginInfo{
vk::CommandBufferUsageFlagBits::eOneTimeSubmit
}) const;
vk::Result end() const { return cmd_.end(); }
void pipeline_barrier(const vk::DependencyInfo& dependency_info);
void copy_buffer_to_image(const vk::BufferImageCopy& copy_region,
vk::Image image,
vk::ImageLayout layout) const {
cmd_.copyBufferToImage(vk::Buffer{}, image, layout, 1, &copy_region);
}
void copy_buffer(vk::Buffer src, vk::Buffer dst, const vk::BufferCopy& copy_region) const {
cmd_.copyBuffer(src, dst, 1, &copy_region);
}
[[nodiscard]] auto get_queue() const { return queue_; }
[[nodiscard]] auto get_command_buffer() const { return cmd_; }
[[nodiscard]] auto get_level() const { return level_; }
void add_cleanup_task(const vulkan_cmd_buffer_cleanup_task& task) {
cleanup_tasks_.emplace_back(task);
}
void execute_cleanup_tasks();
protected:
void on_destroying() override;
private:
std::shared_ptr<vulkan_queue> queue_;
vk::CommandBuffer cmd_;
vk::CommandBufferLevel level_;
std::vector<vulkan_cmd_buffer_cleanup_task> cleanup_tasks_;
};
}

View File

@@ -6,6 +6,8 @@
#include "core/logger.h"
namespace mirai {
thread_local vulkan_thread_context vulkan_thread_context::instance;
void vulkan_context::init(const vulkan_context_init_config& config) {
instance_ = make_obj<vulkan_instance>(config.instance_config);
}
@@ -15,6 +17,9 @@ namespace mirai {
}
void vulkan_context::shutdown() {
for (const auto& d : devices_) {
d->request_destroy();
}
devices_.clear();
instance_.reset();
}
@@ -52,15 +57,38 @@ namespace mirai {
create_device(config);
}
vk::CommandBuffer vulkan_thread_context::allocate_command_buffer(const std::shared_ptr<vulkan_queue>& queue,
vk::CommandBufferLevel level) {
const auto pool = get_pool(queue);
const auto device = queue->get_device();
vk::CommandBufferAllocateInfo alloc_info(
pool,
level,
1
);
auto [result, cmd_buffers] = device.allocateCommandBuffers(alloc_info);
if (result != vk::Result::eSuccess) {
MIRAI_LOG_ERROR("分配命令缓冲区失败: {}", vk::to_string(result));
throw std::runtime_error("分配命令缓冲区失败");
}
return cmd_buffers[0];
}
void vulkan_thread_context::free_command_buffer(const std::shared_ptr<vulkan_queue>& queue, vk::CommandBuffer cmd) {
const auto pool = get_pool(queue);
const auto device = queue->get_device();
device.freeCommandBuffers(pool, 1, &cmd);
}
vk::CommandPool vulkan_thread_context::get_pool(const std::shared_ptr<vulkan_queue>& queue) {
const auto family_index = queue->get_family_index();
const auto device = queue->get_device();
if (!pools_.contains(family_index)) {
// 创建新池子,允许重置单个 CommandBuffer
vk::CommandPoolCreateInfo info(
vk::CommandPoolCreateFlagBits::eResetCommandBuffer,
family_index
);
const auto device = queue->get_device();
auto [result, pool] = device.createCommandPool(info);
if (result != vk::Result::eSuccess) {
MIRAI_LOG_ERROR("创建命令池失败: {}", vk::to_string(result));

View File

@@ -53,6 +53,8 @@ namespace mirai {
return instance;
}
vk::CommandBuffer allocate_command_buffer(const std::shared_ptr<vulkan_queue>& queue, vk::CommandBufferLevel level = vk::CommandBufferLevel::ePrimary);
void free_command_buffer(const std::shared_ptr<vulkan_queue>& queue, vk::CommandBuffer cmd);
vk::CommandPool get_pool(const std::shared_ptr<vulkan_queue>& queue);
private:
std::unordered_map<u32, vk::CommandPool> pools_;

View File

@@ -143,8 +143,6 @@ namespace mirai {
}
MIRAI_LOG_INFO("VMA 分配器初始化完成");
resource_manager_ = make_obj<resource_manager>(device_);
}
std::shared_ptr<vulkan_fence> vulkan_device::create_fence() {
@@ -159,6 +157,21 @@ namespace mirai {
return make_obj<vulkan_time_semaphore>(shared_this(), initial_value);
}
void vulkan_device::request_destroy() {
graphics_queue_.reset();
present_queue_.reset();
compute_queue_.reset();
transfer_queue_.reset();
resource_manager_.reset();
allocator_.reset();
}
void vulkan_device::on_created() {
object::on_created();
resource_manager_ = make_obj<resource_manager>(shared_this());
}
void vulkan_device::on_destroying() {
object::on_destroying();

View File

@@ -54,7 +54,10 @@ namespace mirai {
[[nodiscard]] std::shared_ptr<vulkan_fence> create_fence();
[[nodiscard]] std::shared_ptr<vulkan_semaphore> create_semaphore();
[[nodiscard]] std::shared_ptr<vulkan_time_semaphore> create_timeline_semaphore(u32 initial_value = 0);
void request_destroy();
protected:
void on_created() override;
void on_destroying() override;
private:

View File

@@ -1,6 +1,8 @@
#include "render/vulkan_queue.h"
namespace mirai {
std::unordered_map<VkQueue, std::mutex> vulkan_queue::lock_map_{};
vulkan_queue::vulkan_queue(vk::Device device, vk::Queue queue, u32 family_index, u32 queue_index) : device_(device)
, queue_(queue)
, family_index_(family_index),
@@ -28,11 +30,13 @@ namespace mirai {
}
void_result_t vulkan_graphics_queue::submit(const std::vector<vk::CommandBufferSubmitInfo>& command_buffers) {
vk::PipelineStageFlags waitStages[] = {vk::PipelineStageFlagBits::eColorAttachmentOutput};
submitInfo.setWaitSemaphores(imageAvailableSemaphore);
submitInfo.setPWaitDstStageMask(waitStages);
// vk::PipelineStageFlags waitStages[] = {vk::PipelineStageFlagBits::eColorAttachmentOutput};
// submitInfo.setWaitSemaphores(imageAvailableSemaphore);
// submitInfo.setPWaitDstStageMask(waitStages);
return {};
}
void_result_t vulkan_present_queue::present(const vk::PresentInfoKHR& present_info) {
return {};
}
} // namespace mirai

View File

@@ -86,6 +86,8 @@ enum class error_code : u32 {
synchronization_error = 307,
/// 资源绑定错误
resource_binding_error = 308,
/// 分配失败
vulkan_allocation_failed = 309,
// ---- 窗口/输入错误 (400-499) ----
/// 窗口创建失败