| 1 | /* |
| 2 | * Copyright 2015 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
| 8 | #include "src/gpu/ganesh/vk/GrVkGpu.h" |
| 9 | |
| 10 | #include "include/core/SkTextureCompressionType.h" |
| 11 | #include "include/gpu/GrBackendSemaphore.h" |
| 12 | #include "include/gpu/GrBackendSurface.h" |
| 13 | #include "include/gpu/GrContextOptions.h" |
| 14 | #include "include/gpu/GrDirectContext.h" |
| 15 | #include "include/private/base/SkTo.h" |
| 16 | #include "src/base/SkRectMemcpy.h" |
| 17 | #include "src/core/SkCompressedDataUtils.h" |
| 18 | #include "src/core/SkMipmap.h" |
| 19 | #include "src/core/SkTraceEvent.h" |
| 20 | #include "src/gpu/ganesh/GrBackendUtils.h" |
| 21 | #include "src/gpu/ganesh/GrDataUtils.h" |
| 22 | #include "src/gpu/ganesh/GrDirectContextPriv.h" |
| 23 | #include "src/gpu/ganesh/GrGeometryProcessor.h" |
| 24 | #include "src/gpu/ganesh/GrGpuResourceCacheAccess.h" |
| 25 | #include "src/gpu/ganesh/GrNativeRect.h" |
| 26 | #include "src/gpu/ganesh/GrPipeline.h" |
| 27 | #include "src/gpu/ganesh/GrRenderTarget.h" |
| 28 | #include "src/gpu/ganesh/GrResourceProvider.h" |
| 29 | #include "src/gpu/ganesh/GrTexture.h" |
| 30 | #include "src/gpu/ganesh/GrThreadSafePipelineBuilder.h" |
| 31 | #include "src/gpu/ganesh/SkGr.h" |
| 32 | #include "src/gpu/ganesh/image/SkImage_Ganesh.h" |
| 33 | #include "src/gpu/ganesh/surface/SkSurface_Ganesh.h" |
| 34 | #include "src/gpu/ganesh/vk/GrVkBuffer.h" |
| 35 | #include "src/gpu/ganesh/vk/GrVkCommandBuffer.h" |
| 36 | #include "src/gpu/ganesh/vk/GrVkCommandPool.h" |
| 37 | #include "src/gpu/ganesh/vk/GrVkFramebuffer.h" |
| 38 | #include "src/gpu/ganesh/vk/GrVkImage.h" |
| 39 | #include "src/gpu/ganesh/vk/GrVkOpsRenderPass.h" |
| 40 | #include "src/gpu/ganesh/vk/GrVkPipeline.h" |
| 41 | #include "src/gpu/ganesh/vk/GrVkPipelineState.h" |
| 42 | #include "src/gpu/ganesh/vk/GrVkRenderPass.h" |
| 43 | #include "src/gpu/ganesh/vk/GrVkResourceProvider.h" |
| 44 | #include "src/gpu/ganesh/vk/GrVkSemaphore.h" |
| 45 | #include "src/gpu/ganesh/vk/GrVkTexture.h" |
| 46 | #include "src/gpu/ganesh/vk/GrVkTextureRenderTarget.h" |
| 47 | #include "src/gpu/vk/VulkanAMDMemoryAllocator.h" |
| 48 | #include "src/gpu/vk/VulkanInterface.h" |
| 49 | #include "src/gpu/vk/VulkanMemory.h" |
| 50 | #include "src/gpu/vk/VulkanUtilsPriv.h" |
| 51 | |
| 52 | #include "include/gpu/vk/GrVkTypes.h" |
| 53 | #include "include/gpu/vk/VulkanExtensions.h" |
| 54 | |
| 55 | #include <utility> |
| 56 | |
| 57 | using namespace skia_private; |
| 58 | |
| 59 | #define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X) |
| 60 | #define VK_CALL_RET(RET, X) GR_VK_CALL_RESULT(this, RET, X) |
| 61 | |
| 62 | sk_sp<GrGpu> GrVkGpu::Make(const GrVkBackendContext& backendContext, |
| 63 | const GrContextOptions& options, GrDirectContext* direct) { |
| 64 | if (backendContext.fInstance == VK_NULL_HANDLE || |
| 65 | backendContext.fPhysicalDevice == VK_NULL_HANDLE || |
| 66 | backendContext.fDevice == VK_NULL_HANDLE || |
| 67 | backendContext.fQueue == VK_NULL_HANDLE) { |
| 68 | return nullptr; |
| 69 | } |
| 70 | if (!backendContext.fGetProc) { |
| 71 | return nullptr; |
| 72 | } |
| 73 | |
| 74 | PFN_vkEnumerateInstanceVersion localEnumerateInstanceVersion = |
| 75 | reinterpret_cast<PFN_vkEnumerateInstanceVersion>( |
| 76 | backendContext.fGetProc("vkEnumerateInstanceVersion" , |
| 77 | VK_NULL_HANDLE, VK_NULL_HANDLE)); |
| 78 | uint32_t instanceVersion = 0; |
| 79 | if (!localEnumerateInstanceVersion) { |
| 80 | instanceVersion = VK_MAKE_VERSION(1, 0, 0); |
| 81 | } else { |
| 82 | VkResult err = localEnumerateInstanceVersion(&instanceVersion); |
| 83 | if (err) { |
| 84 | SkDebugf(format: "Failed to enumerate instance version. Err: %d\n" , err); |
| 85 | return nullptr; |
| 86 | } |
| 87 | } |
| 88 | |
| 89 | PFN_vkGetPhysicalDeviceProperties localGetPhysicalDeviceProperties = |
| 90 | reinterpret_cast<PFN_vkGetPhysicalDeviceProperties>( |
| 91 | backendContext.fGetProc("vkGetPhysicalDeviceProperties" , |
| 92 | backendContext.fInstance, |
| 93 | VK_NULL_HANDLE)); |
| 94 | |
| 95 | if (!localGetPhysicalDeviceProperties) { |
| 96 | return nullptr; |
| 97 | } |
| 98 | VkPhysicalDeviceProperties physDeviceProperties; |
| 99 | localGetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &physDeviceProperties); |
| 100 | uint32_t physDevVersion = physDeviceProperties.apiVersion; |
| 101 | |
| 102 | uint32_t apiVersion = backendContext.fMaxAPIVersion ? backendContext.fMaxAPIVersion |
| 103 | : instanceVersion; |
| 104 | |
| 105 | instanceVersion = std::min(a: instanceVersion, b: apiVersion); |
| 106 | physDevVersion = std::min(a: physDevVersion, b: apiVersion); |
| 107 | |
| 108 | sk_sp<const skgpu::VulkanInterface> interface; |
| 109 | |
| 110 | if (backendContext.fVkExtensions) { |
| 111 | interface.reset(ptr: new skgpu::VulkanInterface(backendContext.fGetProc, |
| 112 | backendContext.fInstance, |
| 113 | backendContext.fDevice, |
| 114 | instanceVersion, |
| 115 | physDevVersion, |
| 116 | backendContext.fVkExtensions)); |
| 117 | if (!interface->validate(instanceVersion, physicalDeviceVersion: physDevVersion, backendContext.fVkExtensions)) { |
| 118 | return nullptr; |
| 119 | } |
| 120 | } else { |
| 121 | skgpu::VulkanExtensions extensions; |
| 122 | // The only extension flag that may effect the vulkan backend is the swapchain extension. We |
| 123 | // need to know if this is enabled to know if we can transition to a present layout when |
| 124 | // flushing a surface. |
| 125 | if (backendContext.fExtensions & kKHR_swapchain_GrVkExtensionFlag) { |
| 126 | const char* swapChainExtName = VK_KHR_SWAPCHAIN_EXTENSION_NAME; |
| 127 | extensions.init(backendContext.fGetProc, backendContext.fInstance, |
| 128 | backendContext.fPhysicalDevice, instanceExtensionCount: 0, instanceExtensions: nullptr, deviceExtensionCount: 1, deviceExtensions: &swapChainExtName); |
| 129 | } |
| 130 | interface.reset(ptr: new skgpu::VulkanInterface(backendContext.fGetProc, |
| 131 | backendContext.fInstance, |
| 132 | backendContext.fDevice, |
| 133 | instanceVersion, |
| 134 | physDevVersion, |
| 135 | &extensions)); |
| 136 | if (!interface->validate(instanceVersion, physicalDeviceVersion: physDevVersion, &extensions)) { |
| 137 | return nullptr; |
| 138 | } |
| 139 | } |
| 140 | |
| 141 | sk_sp<GrVkCaps> caps; |
| 142 | if (backendContext.fDeviceFeatures2) { |
| 143 | caps.reset(ptr: new GrVkCaps(options, interface.get(), backendContext.fPhysicalDevice, |
| 144 | *backendContext.fDeviceFeatures2, instanceVersion, physDevVersion, |
| 145 | *backendContext.fVkExtensions, backendContext.fProtectedContext)); |
| 146 | } else if (backendContext.fDeviceFeatures) { |
| 147 | VkPhysicalDeviceFeatures2 features2; |
| 148 | features2.pNext = nullptr; |
| 149 | features2.features = *backendContext.fDeviceFeatures; |
| 150 | caps.reset(ptr: new GrVkCaps(options, interface.get(), backendContext.fPhysicalDevice, |
| 151 | features2, instanceVersion, physDevVersion, |
| 152 | *backendContext.fVkExtensions, backendContext.fProtectedContext)); |
| 153 | } else { |
| 154 | VkPhysicalDeviceFeatures2 features; |
| 155 | memset(s: &features, c: 0, n: sizeof(VkPhysicalDeviceFeatures2)); |
| 156 | features.pNext = nullptr; |
| 157 | if (backendContext.fFeatures & kGeometryShader_GrVkFeatureFlag) { |
| 158 | features.features.geometryShader = true; |
| 159 | } |
| 160 | if (backendContext.fFeatures & kDualSrcBlend_GrVkFeatureFlag) { |
| 161 | features.features.dualSrcBlend = true; |
| 162 | } |
| 163 | if (backendContext.fFeatures & kSampleRateShading_GrVkFeatureFlag) { |
| 164 | features.features.sampleRateShading = true; |
| 165 | } |
| 166 | skgpu::VulkanExtensions extensions; |
| 167 | // The only extension flag that may effect the vulkan backend is the swapchain extension. We |
| 168 | // need to know if this is enabled to know if we can transition to a present layout when |
| 169 | // flushing a surface. |
| 170 | if (backendContext.fExtensions & kKHR_swapchain_GrVkExtensionFlag) { |
| 171 | const char* swapChainExtName = VK_KHR_SWAPCHAIN_EXTENSION_NAME; |
| 172 | extensions.init(backendContext.fGetProc, backendContext.fInstance, |
| 173 | backendContext.fPhysicalDevice, instanceExtensionCount: 0, instanceExtensions: nullptr, deviceExtensionCount: 1, deviceExtensions: &swapChainExtName); |
| 174 | } |
| 175 | caps.reset(ptr: new GrVkCaps(options, interface.get(), backendContext.fPhysicalDevice, |
| 176 | features, instanceVersion, physDevVersion, extensions, |
| 177 | backendContext.fProtectedContext)); |
| 178 | } |
| 179 | |
| 180 | if (!caps) { |
| 181 | return nullptr; |
| 182 | } |
| 183 | |
| 184 | sk_sp<skgpu::VulkanMemoryAllocator> memoryAllocator = backendContext.fMemoryAllocator; |
| 185 | if (!memoryAllocator) { |
| 186 | // We were not given a memory allocator at creation |
| 187 | bool mustUseCoherentHostVisibleMemory = caps->mustUseCoherentHostVisibleMemory(); |
| 188 | memoryAllocator = skgpu::VulkanAMDMemoryAllocator::Make(instance: backendContext.fInstance, |
| 189 | physicalDevice: backendContext.fPhysicalDevice, |
| 190 | device: backendContext.fDevice, |
| 191 | physicalDeviceVersion: physDevVersion, |
| 192 | extensions: backendContext.fVkExtensions, |
| 193 | interface, |
| 194 | mustUseCoherentHostVisibleMemory, |
| 195 | /*=threadSafe=*/false); |
| 196 | } |
| 197 | if (!memoryAllocator) { |
| 198 | SkDEBUGFAIL("No supplied vulkan memory allocator and unable to create one internally." ); |
| 199 | return nullptr; |
| 200 | } |
| 201 | |
| 202 | sk_sp<GrVkGpu> vkGpu(new GrVkGpu(direct, backendContext, std::move(caps), interface, |
| 203 | instanceVersion, physDevVersion, |
| 204 | std::move(memoryAllocator))); |
| 205 | if (backendContext.fProtectedContext == GrProtected::kYes && |
| 206 | !vkGpu->vkCaps().supportsProtectedMemory()) { |
| 207 | return nullptr; |
| 208 | } |
| 209 | return std::move(vkGpu); |
| 210 | } |
| 211 | |
| 212 | //////////////////////////////////////////////////////////////////////////////// |
| 213 | |
| 214 | GrVkGpu::GrVkGpu(GrDirectContext* direct, |
| 215 | const GrVkBackendContext& backendContext, |
| 216 | sk_sp<GrVkCaps> caps, |
| 217 | sk_sp<const skgpu::VulkanInterface> interface, |
| 218 | uint32_t instanceVersion, |
| 219 | uint32_t physicalDeviceVersion, |
| 220 | sk_sp<skgpu::VulkanMemoryAllocator> memoryAllocator) |
| 221 | : INHERITED(direct) |
| 222 | , fInterface(std::move(interface)) |
| 223 | , fMemoryAllocator(std::move(memoryAllocator)) |
| 224 | , fVkCaps(std::move(caps)) |
| 225 | , fPhysicalDevice(backendContext.fPhysicalDevice) |
| 226 | , fDevice(backendContext.fDevice) |
| 227 | , fQueue(backendContext.fQueue) |
| 228 | , fQueueIndex(backendContext.fGraphicsQueueIndex) |
| 229 | , fResourceProvider(this) |
| 230 | , fStagingBufferManager(this) |
| 231 | , fDisconnected(false) |
| 232 | , fProtectedContext(backendContext.fProtectedContext) { |
| 233 | SkASSERT(!backendContext.fOwnsInstanceAndDevice); |
| 234 | SkASSERT(fMemoryAllocator); |
| 235 | |
| 236 | this->initCapsAndCompiler(caps: fVkCaps); |
| 237 | |
| 238 | VK_CALL(GetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &fPhysDevProps)); |
| 239 | VK_CALL(GetPhysicalDeviceMemoryProperties(backendContext.fPhysicalDevice, &fPhysDevMemProps)); |
| 240 | |
| 241 | fResourceProvider.init(); |
| 242 | |
| 243 | fMainCmdPool = fResourceProvider.findOrCreateCommandPool(); |
| 244 | if (fMainCmdPool) { |
| 245 | fMainCmdBuffer = fMainCmdPool->getPrimaryCommandBuffer(); |
| 246 | SkASSERT(this->currentCommandBuffer()); |
| 247 | this->currentCommandBuffer()->begin(gpu: this); |
| 248 | } |
| 249 | } |
| 250 | |
| 251 | void GrVkGpu::destroyResources() { |
| 252 | if (fMainCmdPool) { |
| 253 | fMainCmdPool->getPrimaryCommandBuffer()->end(gpu: this, /*abandoningBuffer=*/true); |
| 254 | fMainCmdPool->close(); |
| 255 | } |
| 256 | |
| 257 | // wait for all commands to finish |
| 258 | this->finishOutstandingGpuWork(); |
| 259 | |
| 260 | if (fMainCmdPool) { |
| 261 | fMainCmdPool->unref(); |
| 262 | fMainCmdPool = nullptr; |
| 263 | } |
| 264 | |
| 265 | for (int i = 0; i < fSemaphoresToWaitOn.size(); ++i) { |
| 266 | fSemaphoresToWaitOn[i]->unref(); |
| 267 | } |
| 268 | fSemaphoresToWaitOn.clear(); |
| 269 | |
| 270 | for (int i = 0; i < fSemaphoresToSignal.size(); ++i) { |
| 271 | fSemaphoresToSignal[i]->unref(); |
| 272 | } |
| 273 | fSemaphoresToSignal.clear(); |
| 274 | |
| 275 | fStagingBufferManager.reset(); |
| 276 | |
| 277 | fMSAALoadManager.destroyResources(gpu: this); |
| 278 | |
| 279 | // must call this just before we destroy the command pool and VkDevice |
| 280 | fResourceProvider.destroyResources(); |
| 281 | } |
| 282 | |
| 283 | GrVkGpu::~GrVkGpu() { |
| 284 | if (!fDisconnected) { |
| 285 | this->destroyResources(); |
| 286 | } |
| 287 | // We don't delete the memory allocator until the very end of the GrVkGpu lifetime so that |
| 288 | // clients can continue to delete backend textures even after a context has been abandoned. |
| 289 | fMemoryAllocator.reset(); |
| 290 | } |
| 291 | |
| 292 | |
| 293 | void GrVkGpu::disconnect(DisconnectType type) { |
| 294 | INHERITED::disconnect(type); |
| 295 | if (!fDisconnected) { |
| 296 | this->destroyResources(); |
| 297 | |
| 298 | fSemaphoresToWaitOn.clear(); |
| 299 | fSemaphoresToSignal.clear(); |
| 300 | fMainCmdBuffer = nullptr; |
| 301 | fDisconnected = true; |
| 302 | } |
| 303 | } |
| 304 | |
| 305 | GrThreadSafePipelineBuilder* GrVkGpu::pipelineBuilder() { |
| 306 | return fResourceProvider.pipelineStateCache(); |
| 307 | } |
| 308 | |
| 309 | sk_sp<GrThreadSafePipelineBuilder> GrVkGpu::refPipelineBuilder() { |
| 310 | return fResourceProvider.refPipelineStateCache(); |
| 311 | } |
| 312 | |
| 313 | /////////////////////////////////////////////////////////////////////////////// |
| 314 | |
| 315 | GrOpsRenderPass* GrVkGpu::onGetOpsRenderPass( |
| 316 | GrRenderTarget* rt, |
| 317 | bool useMSAASurface, |
| 318 | GrAttachment* stencil, |
| 319 | GrSurfaceOrigin origin, |
| 320 | const SkIRect& bounds, |
| 321 | const GrOpsRenderPass::LoadAndStoreInfo& colorInfo, |
| 322 | const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo, |
| 323 | const TArray<GrSurfaceProxy*, true>& sampledProxies, |
| 324 | GrXferBarrierFlags renderPassXferBarriers) { |
| 325 | if (!fCachedOpsRenderPass) { |
| 326 | fCachedOpsRenderPass = std::make_unique<GrVkOpsRenderPass>(args: this); |
| 327 | } |
| 328 | |
| 329 | // For the given render target and requested render pass features we need to find a compatible |
| 330 | // framebuffer to use for the render pass. Technically it is the underlying VkRenderPass that |
| 331 | // is compatible, but that is part of the framebuffer that we get here. |
| 332 | GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt); |
| 333 | |
| 334 | SkASSERT(!useMSAASurface || |
| 335 | rt->numSamples() > 1 || |
| 336 | (this->vkCaps().supportsDiscardableMSAAForDMSAA() && |
| 337 | vkRT->resolveAttachment() && |
| 338 | vkRT->resolveAttachment()->supportsInputAttachmentUsage())); |
| 339 | |
| 340 | // Covert the GrXferBarrierFlags into render pass self dependency flags |
| 341 | GrVkRenderPass::SelfDependencyFlags selfDepFlags = GrVkRenderPass::SelfDependencyFlags::kNone; |
| 342 | if (renderPassXferBarriers & GrXferBarrierFlags::kBlend) { |
| 343 | selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForNonCoherentAdvBlend; |
| 344 | } |
| 345 | if (renderPassXferBarriers & GrXferBarrierFlags::kTexture) { |
| 346 | selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForInputAttachment; |
| 347 | } |
| 348 | |
| 349 | // Figure out if we need a resolve attachment for this render pass. A resolve attachment is |
| 350 | // needed if we are using msaa to draw with a discardable msaa attachment. If we are in this |
| 351 | // case we also need to update the color load/store ops since we don't want to ever load or |
| 352 | // store the msaa color attachment, but may need to for the resolve attachment. |
| 353 | GrOpsRenderPass::LoadAndStoreInfo localColorInfo = colorInfo; |
| 354 | bool withResolve = false; |
| 355 | GrVkRenderPass::LoadFromResolve loadFromResolve = GrVkRenderPass::LoadFromResolve::kNo; |
| 356 | GrOpsRenderPass::LoadAndStoreInfo resolveInfo{.fLoadOp: GrLoadOp::kLoad, .fStoreOp: GrStoreOp::kStore, .fClearColor: {}}; |
| 357 | if (useMSAASurface && this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) { |
| 358 | withResolve = true; |
| 359 | localColorInfo.fStoreOp = GrStoreOp::kDiscard; |
| 360 | if (colorInfo.fLoadOp == GrLoadOp::kLoad) { |
| 361 | loadFromResolve = GrVkRenderPass::LoadFromResolve::kLoad; |
| 362 | localColorInfo.fLoadOp = GrLoadOp::kDiscard; |
| 363 | } else { |
| 364 | resolveInfo.fLoadOp = GrLoadOp::kDiscard; |
| 365 | } |
| 366 | } |
| 367 | |
| 368 | // Get the framebuffer to use for the render pass |
| 369 | sk_sp<GrVkFramebuffer> framebuffer; |
| 370 | if (vkRT->wrapsSecondaryCommandBuffer()) { |
| 371 | framebuffer = vkRT->externalFramebuffer(); |
| 372 | } else { |
| 373 | auto fb = vkRT->getFramebuffer(withResolve, withStencil: SkToBool(x: stencil), selfDepFlags, |
| 374 | loadFromResolve); |
| 375 | framebuffer = sk_ref_sp(obj: fb); |
| 376 | } |
| 377 | if (!framebuffer) { |
| 378 | return nullptr; |
| 379 | } |
| 380 | |
| 381 | if (!fCachedOpsRenderPass->set(rt, std::move(framebuffer), origin, bounds, localColorInfo, |
| 382 | stencilInfo, resolveInfo, selfDepFlags, loadFromResolve, |
| 383 | sampledProxies)) { |
| 384 | return nullptr; |
| 385 | } |
| 386 | return fCachedOpsRenderPass.get(); |
| 387 | } |
| 388 | |
| 389 | bool GrVkGpu::submitCommandBuffer(SyncQueue sync) { |
| 390 | TRACE_EVENT0("skia.gpu" , TRACE_FUNC); |
| 391 | if (!this->currentCommandBuffer()) { |
| 392 | return false; |
| 393 | } |
| 394 | SkASSERT(!fCachedOpsRenderPass || !fCachedOpsRenderPass->isActive()); |
| 395 | |
| 396 | if (!this->currentCommandBuffer()->hasWork() && kForce_SyncQueue != sync && |
| 397 | !fSemaphoresToSignal.size() && !fSemaphoresToWaitOn.size()) { |
| 398 | // We may have added finished procs during the flush call. Since there is no actual work |
| 399 | // we are not submitting the command buffer and may never come back around to submit it. |
| 400 | // Thus we call all current finished procs manually, since the work has technically |
| 401 | // finished. |
| 402 | this->currentCommandBuffer()->callFinishedProcs(); |
| 403 | SkASSERT(fDrawables.empty()); |
| 404 | fResourceProvider.checkCommandBuffers(); |
| 405 | return true; |
| 406 | } |
| 407 | |
| 408 | fMainCmdBuffer->end(gpu: this); |
| 409 | SkASSERT(fMainCmdPool); |
| 410 | fMainCmdPool->close(); |
| 411 | bool didSubmit = fMainCmdBuffer->submitToQueue(gpu: this, queue: fQueue, signalSemaphores&: fSemaphoresToSignal, |
| 412 | waitSemaphores&: fSemaphoresToWaitOn); |
| 413 | |
| 414 | if (didSubmit && sync == kForce_SyncQueue) { |
| 415 | fMainCmdBuffer->forceSync(gpu: this); |
| 416 | } |
| 417 | |
| 418 | // We must delete any drawables that had to wait until submit to destroy. |
| 419 | fDrawables.clear(); |
| 420 | |
| 421 | // If we didn't submit the command buffer then we did not wait on any semaphores. We will |
| 422 | // continue to hold onto these semaphores and wait on them during the next command buffer |
| 423 | // submission. |
| 424 | if (didSubmit) { |
| 425 | for (int i = 0; i < fSemaphoresToWaitOn.size(); ++i) { |
| 426 | fSemaphoresToWaitOn[i]->unref(); |
| 427 | } |
| 428 | fSemaphoresToWaitOn.clear(); |
| 429 | } |
| 430 | |
| 431 | // Even if we did not submit the command buffer, we drop all the signal semaphores since we will |
| 432 | // not try to recover the work that wasn't submitted and instead just drop it all. The client |
| 433 | // will be notified that the semaphores were not submit so that they will not try to wait on |
| 434 | // them. |
| 435 | for (int i = 0; i < fSemaphoresToSignal.size(); ++i) { |
| 436 | fSemaphoresToSignal[i]->unref(); |
| 437 | } |
| 438 | fSemaphoresToSignal.clear(); |
| 439 | |
| 440 | // Release old command pool and create a new one |
| 441 | fMainCmdPool->unref(); |
| 442 | fMainCmdPool = fResourceProvider.findOrCreateCommandPool(); |
| 443 | if (fMainCmdPool) { |
| 444 | fMainCmdBuffer = fMainCmdPool->getPrimaryCommandBuffer(); |
| 445 | SkASSERT(fMainCmdBuffer); |
| 446 | fMainCmdBuffer->begin(gpu: this); |
| 447 | } else { |
| 448 | fMainCmdBuffer = nullptr; |
| 449 | } |
| 450 | // We must wait to call checkCommandBuffers until after we get a new command buffer. The |
| 451 | // checkCommandBuffers may trigger a releaseProc which may cause us to insert a barrier for a |
| 452 | // released GrVkImage. That barrier needs to be put into a new command buffer and not the old |
| 453 | // one that was just submitted. |
| 454 | fResourceProvider.checkCommandBuffers(); |
| 455 | return didSubmit; |
| 456 | } |
| 457 | |
| 458 | /////////////////////////////////////////////////////////////////////////////// |
| 459 | sk_sp<GrGpuBuffer> GrVkGpu::onCreateBuffer(size_t size, |
| 460 | GrGpuBufferType type, |
| 461 | GrAccessPattern accessPattern) { |
| 462 | #ifdef SK_DEBUG |
| 463 | switch (type) { |
| 464 | case GrGpuBufferType::kVertex: |
| 465 | case GrGpuBufferType::kIndex: |
| 466 | case GrGpuBufferType::kDrawIndirect: |
| 467 | SkASSERT(accessPattern == kDynamic_GrAccessPattern || |
| 468 | accessPattern == kStatic_GrAccessPattern); |
| 469 | break; |
| 470 | case GrGpuBufferType::kXferCpuToGpu: |
| 471 | SkASSERT(accessPattern == kDynamic_GrAccessPattern); |
| 472 | break; |
| 473 | case GrGpuBufferType::kXferGpuToCpu: |
| 474 | SkASSERT(accessPattern == kDynamic_GrAccessPattern || |
| 475 | accessPattern == kStream_GrAccessPattern); |
| 476 | break; |
| 477 | case GrGpuBufferType::kUniform: |
| 478 | SkASSERT(accessPattern == kDynamic_GrAccessPattern); |
| 479 | break; |
| 480 | } |
| 481 | #endif |
| 482 | return GrVkBuffer::Make(gpu: this, size, bufferType: type, accessPattern); |
| 483 | } |
| 484 | |
| 485 | bool GrVkGpu::onWritePixels(GrSurface* surface, |
| 486 | SkIRect rect, |
| 487 | GrColorType surfaceColorType, |
| 488 | GrColorType srcColorType, |
| 489 | const GrMipLevel texels[], |
| 490 | int mipLevelCount, |
| 491 | bool prepForTexSampling) { |
| 492 | GrVkTexture* texture = static_cast<GrVkTexture*>(surface->asTexture()); |
| 493 | if (!texture) { |
| 494 | return false; |
| 495 | } |
| 496 | GrVkImage* texImage = texture->textureImage(); |
| 497 | |
| 498 | // Make sure we have at least the base level |
| 499 | if (!mipLevelCount || !texels[0].fPixels) { |
| 500 | return false; |
| 501 | } |
| 502 | |
| 503 | SkASSERT(!skgpu::VkFormatIsCompressed(texImage->imageFormat())); |
| 504 | bool success = false; |
| 505 | bool linearTiling = texImage->isLinearTiled(); |
| 506 | if (linearTiling) { |
| 507 | if (mipLevelCount > 1) { |
| 508 | SkDebugf(format: "Can't upload mipmap data to linear tiled texture" ); |
| 509 | return false; |
| 510 | } |
| 511 | if (VK_IMAGE_LAYOUT_PREINITIALIZED != texImage->currentLayout()) { |
| 512 | // Need to change the layout to general in order to perform a host write |
| 513 | texImage->setImageLayout(gpu: this, |
| 514 | newLayout: VK_IMAGE_LAYOUT_GENERAL, |
| 515 | dstAccessMask: VK_ACCESS_HOST_WRITE_BIT, |
| 516 | dstStageMask: VK_PIPELINE_STAGE_HOST_BIT, |
| 517 | byRegion: false); |
| 518 | if (!this->submitCommandBuffer(sync: kForce_SyncQueue)) { |
| 519 | return false; |
| 520 | } |
| 521 | } |
| 522 | success = this->uploadTexDataLinear(tex: texImage, |
| 523 | rect, |
| 524 | colorType: srcColorType, |
| 525 | data: texels[0].fPixels, |
| 526 | rowBytes: texels[0].fRowBytes); |
| 527 | } else { |
| 528 | SkASSERT(mipLevelCount <= (int)texImage->mipLevels()); |
| 529 | success = this->uploadTexDataOptimal(tex: texImage, |
| 530 | rect, |
| 531 | colorType: srcColorType, |
| 532 | texels, |
| 533 | mipLevelCount); |
| 534 | if (1 == mipLevelCount) { |
| 535 | texture->markMipmapsDirty(); |
| 536 | } |
| 537 | } |
| 538 | |
| 539 | if (prepForTexSampling) { |
| 540 | texImage->setImageLayout(gpu: this, |
| 541 | newLayout: VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, |
| 542 | dstAccessMask: VK_ACCESS_SHADER_READ_BIT, |
| 543 | dstStageMask: VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, |
| 544 | byRegion: false); |
| 545 | } |
| 546 | |
| 547 | return success; |
| 548 | } |
| 549 | |
| 550 | // When we update vertex/index buffers via transfers we assume that they may have been used |
| 551 | // previously in draws and will be used again in draws afterwards. So we put a barrier before and |
| 552 | // after. If we had a mechanism for gathering the buffers that will be used in a GrVkOpsRenderPass |
| 553 | // *before* we begin a subpass we could do this lazily and non-redundantly by tracking the "last |
| 554 | // usage" on the GrVkBuffer. Then Pass 1 draw, xfer, xfer, xfer, Pass 2 draw would insert just two |
| 555 | // barriers: one before the first xfer and one before Pass 2. Currently, we'd use six barriers. |
| 556 | // Pass false as "after" before the transfer and true after the transfer. |
| 557 | static void add_transfer_dst_buffer_mem_barrier(GrVkGpu* gpu, |
| 558 | GrVkBuffer* dst, |
| 559 | size_t offset, |
| 560 | size_t size, |
| 561 | bool after) { |
| 562 | if (dst->intendedType() != GrGpuBufferType::kIndex && |
| 563 | dst->intendedType() != GrGpuBufferType::kVertex) { |
| 564 | return; |
| 565 | } |
| 566 | |
| 567 | VkAccessFlags srcAccessMask = dst->intendedType() == GrGpuBufferType::kIndex |
| 568 | ? VK_ACCESS_INDEX_READ_BIT |
| 569 | : VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT; |
| 570 | VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; |
| 571 | |
| 572 | VkPipelineStageFlagBits srcPipelineStageFlags = VK_PIPELINE_STAGE_VERTEX_INPUT_BIT; |
| 573 | VkPipelineStageFlagBits dstPipelineStageFlags = VK_PIPELINE_STAGE_TRANSFER_BIT; |
| 574 | |
| 575 | if (after) { |
| 576 | using std::swap; |
| 577 | swap(x&: srcAccessMask, y&: dstAccessMask ); |
| 578 | swap(x&: srcPipelineStageFlags, y&: dstPipelineStageFlags); |
| 579 | } |
| 580 | |
| 581 | VkBufferMemoryBarrier bufferMemoryBarrier = { |
| 582 | .sType: VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // sType |
| 583 | .pNext: nullptr, // pNext |
| 584 | .srcAccessMask: srcAccessMask, // srcAccessMask |
| 585 | .dstAccessMask: dstAccessMask, // dstAccessMask |
| 586 | VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex |
| 587 | VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex |
| 588 | .buffer: dst->vkBuffer(), // buffer |
| 589 | .offset: offset, // offset |
| 590 | .size: size, // size |
| 591 | }; |
| 592 | |
| 593 | gpu->addBufferMemoryBarrier(srcStageMask: srcPipelineStageFlags, |
| 594 | dstStageMask: dstPipelineStageFlags, |
| 595 | /*byRegion=*/false, |
| 596 | barrier: &bufferMemoryBarrier); |
| 597 | } |
| 598 | |
| 599 | bool GrVkGpu::onTransferFromBufferToBuffer(sk_sp<GrGpuBuffer> src, |
| 600 | size_t srcOffset, |
| 601 | sk_sp<GrGpuBuffer> dst, |
| 602 | size_t dstOffset, |
| 603 | size_t size) { |
| 604 | if (!this->currentCommandBuffer()) { |
| 605 | return false; |
| 606 | } |
| 607 | |
| 608 | VkBufferCopy copyRegion; |
| 609 | copyRegion.srcOffset = srcOffset; |
| 610 | copyRegion.dstOffset = dstOffset; |
| 611 | copyRegion.size = size; |
| 612 | |
| 613 | add_transfer_dst_buffer_mem_barrier(gpu: this, |
| 614 | dst: static_cast<GrVkBuffer*>(dst.get()), |
| 615 | offset: dstOffset, |
| 616 | size, |
| 617 | /*after=*/false); |
| 618 | this->currentCommandBuffer()->copyBuffer(gpu: this, srcBuffer: std::move(src), dstBuffer: dst, regionCount: 1, regions: ©Region); |
| 619 | add_transfer_dst_buffer_mem_barrier(gpu: this, |
| 620 | dst: static_cast<GrVkBuffer*>(dst.get()), |
| 621 | offset: dstOffset, |
| 622 | size, |
| 623 | /*after=*/true); |
| 624 | |
| 625 | return true; |
| 626 | } |
| 627 | |
| 628 | bool GrVkGpu::onTransferPixelsTo(GrTexture* texture, |
| 629 | SkIRect rect, |
| 630 | GrColorType surfaceColorType, |
| 631 | GrColorType bufferColorType, |
| 632 | sk_sp<GrGpuBuffer> transferBuffer, |
| 633 | size_t bufferOffset, |
| 634 | size_t rowBytes) { |
| 635 | if (!this->currentCommandBuffer()) { |
| 636 | return false; |
| 637 | } |
| 638 | |
| 639 | size_t bpp = GrColorTypeBytesPerPixel(ct: bufferColorType); |
| 640 | if (GrBackendFormatBytesPerPixel(format: texture->backendFormat()) != bpp) { |
| 641 | return false; |
| 642 | } |
| 643 | |
| 644 | // Vulkan only supports offsets that are both 4-byte aligned and aligned to a pixel. |
| 645 | if ((bufferOffset & 0x3) || (bufferOffset % bpp)) { |
| 646 | return false; |
| 647 | } |
| 648 | GrVkTexture* tex = static_cast<GrVkTexture*>(texture); |
| 649 | if (!tex) { |
| 650 | return false; |
| 651 | } |
| 652 | GrVkImage* vkImage = tex->textureImage(); |
| 653 | VkFormat format = vkImage->imageFormat(); |
| 654 | |
| 655 | // Can't transfer compressed data |
| 656 | SkASSERT(!skgpu::VkFormatIsCompressed(format)); |
| 657 | |
| 658 | if (!transferBuffer) { |
| 659 | return false; |
| 660 | } |
| 661 | |
| 662 | if (bufferColorType != this->vkCaps().transferColorType(format, surfaceColorType)) { |
| 663 | return false; |
| 664 | } |
| 665 | SkASSERT(skgpu::VkFormatBytesPerBlock(format) == GrColorTypeBytesPerPixel(bufferColorType)); |
| 666 | |
| 667 | SkASSERT(SkIRect::MakeSize(texture->dimensions()).contains(rect)); |
| 668 | |
| 669 | // Set up copy region |
| 670 | VkBufferImageCopy region; |
| 671 | memset(s: ®ion, c: 0, n: sizeof(VkBufferImageCopy)); |
| 672 | region.bufferOffset = bufferOffset; |
| 673 | region.bufferRowLength = (uint32_t)(rowBytes/bpp); |
| 674 | region.bufferImageHeight = 0; |
| 675 | region.imageSubresource = { .aspectMask: VK_IMAGE_ASPECT_COLOR_BIT, .mipLevel: 0, .baseArrayLayer: 0, .layerCount: 1 }; |
| 676 | region.imageOffset = { .x: rect.left(), .y: rect.top(), .z: 0 }; |
| 677 | region.imageExtent = { .width: (uint32_t)rect.width(), .height: (uint32_t)rect.height(), .depth: 1 }; |
| 678 | |
| 679 | // Change layout of our target so it can be copied to |
| 680 | vkImage->setImageLayout(gpu: this, |
| 681 | newLayout: VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, |
| 682 | dstAccessMask: VK_ACCESS_TRANSFER_WRITE_BIT, |
| 683 | dstStageMask: VK_PIPELINE_STAGE_TRANSFER_BIT, |
| 684 | byRegion: false); |
| 685 | |
| 686 | const GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get()); |
| 687 | |
| 688 | // Copy the buffer to the image. |
| 689 | this->currentCommandBuffer()->copyBufferToImage(gpu: this, |
| 690 | srcBuffer: vkBuffer->vkBuffer(), |
| 691 | dstImage: vkImage, |
| 692 | dstLayout: VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, |
| 693 | copyRegionCount: 1, |
| 694 | copyRegions: ®ion); |
| 695 | this->currentCommandBuffer()->addGrBuffer(buffer: std::move(transferBuffer)); |
| 696 | |
| 697 | tex->markMipmapsDirty(); |
| 698 | return true; |
| 699 | } |
| 700 | |
| 701 | bool GrVkGpu::onTransferPixelsFrom(GrSurface* surface, |
| 702 | SkIRect rect, |
| 703 | GrColorType surfaceColorType, |
| 704 | GrColorType bufferColorType, |
| 705 | sk_sp<GrGpuBuffer> transferBuffer, |
| 706 | size_t offset) { |
| 707 | if (!this->currentCommandBuffer()) { |
| 708 | return false; |
| 709 | } |
| 710 | SkASSERT(surface); |
| 711 | SkASSERT(transferBuffer); |
| 712 | if (fProtectedContext == GrProtected::kYes) { |
| 713 | return false; |
| 714 | } |
| 715 | |
| 716 | GrVkImage* srcImage; |
| 717 | if (GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget())) { |
| 718 | // Reading from render targets that wrap a secondary command buffer is not allowed since |
| 719 | // it would require us to know the VkImage, which we don't have, as well as need us to |
| 720 | // stop and start the VkRenderPass which we don't have access to. |
| 721 | if (rt->wrapsSecondaryCommandBuffer()) { |
| 722 | return false; |
| 723 | } |
| 724 | if (!rt->nonMSAAAttachment()) { |
| 725 | return false; |
| 726 | } |
| 727 | srcImage = rt->nonMSAAAttachment(); |
| 728 | } else { |
| 729 | SkASSERT(surface->asTexture()); |
| 730 | srcImage = static_cast<GrVkTexture*>(surface->asTexture())->textureImage(); |
| 731 | } |
| 732 | |
| 733 | VkFormat format = srcImage->imageFormat(); |
| 734 | if (bufferColorType != this->vkCaps().transferColorType(format, surfaceColorType)) { |
| 735 | return false; |
| 736 | } |
| 737 | SkASSERT(skgpu::VkFormatBytesPerBlock(format) == GrColorTypeBytesPerPixel(bufferColorType)); |
| 738 | |
| 739 | // Set up copy region |
| 740 | VkBufferImageCopy region; |
| 741 | memset(s: ®ion, c: 0, n: sizeof(VkBufferImageCopy)); |
| 742 | region.bufferOffset = offset; |
| 743 | region.bufferRowLength = rect.width(); |
| 744 | region.bufferImageHeight = 0; |
| 745 | region.imageSubresource = { .aspectMask: VK_IMAGE_ASPECT_COLOR_BIT, .mipLevel: 0, .baseArrayLayer: 0, .layerCount: 1 }; |
| 746 | region.imageOffset = {.x: rect.left(), .y: rect.top(), .z: 0}; |
| 747 | region.imageExtent = {.width: (uint32_t)rect.width(), .height: (uint32_t)rect.height(), .depth: 1}; |
| 748 | |
| 749 | srcImage->setImageLayout(gpu: this, |
| 750 | newLayout: VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, |
| 751 | dstAccessMask: VK_ACCESS_TRANSFER_READ_BIT, |
| 752 | dstStageMask: VK_PIPELINE_STAGE_TRANSFER_BIT, |
| 753 | byRegion: false); |
| 754 | |
| 755 | this->currentCommandBuffer()->copyImageToBuffer(gpu: this, srcImage, |
| 756 | srcLayout: VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, |
| 757 | dstBuffer: transferBuffer, copyRegionCount: 1, copyRegions: ®ion); |
| 758 | |
| 759 | GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get()); |
| 760 | // Make sure the copy to buffer has finished. |
| 761 | vkBuffer->addMemoryBarrier(srcAccessMask: VK_ACCESS_TRANSFER_WRITE_BIT, |
| 762 | dstAccesMask: VK_ACCESS_HOST_READ_BIT, |
| 763 | srcStageMask: VK_PIPELINE_STAGE_TRANSFER_BIT, |
| 764 | dstStageMask: VK_PIPELINE_STAGE_HOST_BIT, |
| 765 | byRegion: false); |
| 766 | return true; |
| 767 | } |
| 768 | |
| 769 | void GrVkGpu::resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect, |
| 770 | const SkIPoint& dstPoint) { |
| 771 | if (!this->currentCommandBuffer()) { |
| 772 | return; |
| 773 | } |
| 774 | |
| 775 | SkASSERT(dst); |
| 776 | SkASSERT(src && src->colorAttachment() && src->colorAttachment()->numSamples() > 1); |
| 777 | |
| 778 | VkImageResolve resolveInfo; |
| 779 | resolveInfo.srcSubresource = {.aspectMask: VK_IMAGE_ASPECT_COLOR_BIT, .mipLevel: 0, .baseArrayLayer: 0, .layerCount: 1}; |
| 780 | resolveInfo.srcOffset = {.x: srcRect.fLeft, .y: srcRect.fTop, .z: 0}; |
| 781 | resolveInfo.dstSubresource = {.aspectMask: VK_IMAGE_ASPECT_COLOR_BIT, .mipLevel: 0, .baseArrayLayer: 0, .layerCount: 1}; |
| 782 | resolveInfo.dstOffset = {.x: dstPoint.fX, .y: dstPoint.fY, .z: 0}; |
| 783 | resolveInfo.extent = {.width: (uint32_t)srcRect.width(), .height: (uint32_t)srcRect.height(), .depth: 1}; |
| 784 | |
| 785 | GrVkImage* dstImage; |
| 786 | GrRenderTarget* dstRT = dst->asRenderTarget(); |
| 787 | GrTexture* dstTex = dst->asTexture(); |
| 788 | if (dstTex) { |
| 789 | dstImage = static_cast<GrVkTexture*>(dstTex)->textureImage(); |
| 790 | } else { |
| 791 | SkASSERT(dst->asRenderTarget()); |
| 792 | dstImage = static_cast<GrVkRenderTarget*>(dstRT)->nonMSAAAttachment(); |
| 793 | } |
| 794 | SkASSERT(dstImage); |
| 795 | |
| 796 | dstImage->setImageLayout(gpu: this, |
| 797 | newLayout: VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, |
| 798 | dstAccessMask: VK_ACCESS_TRANSFER_WRITE_BIT, |
| 799 | dstStageMask: VK_PIPELINE_STAGE_TRANSFER_BIT, |
| 800 | byRegion: false); |
| 801 | |
| 802 | src->colorAttachment()->setImageLayout(gpu: this, |
| 803 | newLayout: VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, |
| 804 | dstAccessMask: VK_ACCESS_TRANSFER_READ_BIT, |
| 805 | dstStageMask: VK_PIPELINE_STAGE_TRANSFER_BIT, |
| 806 | byRegion: false); |
| 807 | this->currentCommandBuffer()->addGrSurface(surface: sk_ref_sp<const GrSurface>(obj: src->colorAttachment())); |
| 808 | this->currentCommandBuffer()->addGrSurface(surface: sk_ref_sp<const GrSurface>(obj: dst)); |
| 809 | this->currentCommandBuffer()->resolveImage(gpu: this, srcImage: *src->colorAttachment(), dstImage: *dstImage, regionCount: 1, |
| 810 | regions: &resolveInfo); |
| 811 | } |
| 812 | |
| 813 | void GrVkGpu::onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) { |
| 814 | SkASSERT(target->numSamples() > 1); |
| 815 | GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(target); |
| 816 | SkASSERT(rt->colorAttachmentView() && rt->resolveAttachmentView()); |
| 817 | |
| 818 | if (this->vkCaps().renderTargetSupportsDiscardableMSAA(rt)) { |
| 819 | // We would have resolved the RT during the render pass; |
| 820 | return; |
| 821 | } |
| 822 | |
| 823 | this->resolveImage(dst: target, src: rt, srcRect: resolveRect, |
| 824 | dstPoint: SkIPoint::Make(x: resolveRect.x(), y: resolveRect.y())); |
| 825 | } |
| 826 | |
| 827 | bool GrVkGpu::uploadTexDataLinear(GrVkImage* texImage, |
| 828 | SkIRect rect, |
| 829 | GrColorType dataColorType, |
| 830 | const void* data, |
| 831 | size_t rowBytes) { |
| 832 | SkASSERT(data); |
| 833 | SkASSERT(texImage->isLinearTiled()); |
| 834 | |
| 835 | SkASSERT(SkIRect::MakeSize(texImage->dimensions()).contains(rect)); |
| 836 | |
| 837 | size_t bpp = GrColorTypeBytesPerPixel(ct: dataColorType); |
| 838 | size_t trimRowBytes = rect.width() * bpp; |
| 839 | |
| 840 | SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == texImage->currentLayout() || |
| 841 | VK_IMAGE_LAYOUT_GENERAL == texImage->currentLayout()); |
| 842 | const VkImageSubresource subres = { |
| 843 | .aspectMask: VK_IMAGE_ASPECT_COLOR_BIT, |
| 844 | .mipLevel: 0, // mipLevel |
| 845 | .arrayLayer: 0, // arraySlice |
| 846 | }; |
| 847 | VkSubresourceLayout layout; |
| 848 | |
| 849 | const skgpu::VulkanInterface* interface = this->vkInterface(); |
| 850 | |
| 851 | GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice, |
| 852 | texImage->image(), |
| 853 | &subres, |
| 854 | &layout)); |
| 855 | |
| 856 | const skgpu::VulkanAlloc& alloc = texImage->alloc(); |
| 857 | if (VK_NULL_HANDLE == alloc.fMemory) { |
| 858 | return false; |
| 859 | } |
| 860 | VkDeviceSize offset = rect.top()*layout.rowPitch + rect.left()*bpp; |
| 861 | VkDeviceSize size = rect.height()*layout.rowPitch; |
| 862 | SkASSERT(size + offset <= alloc.fSize); |
| 863 | auto checkResult = [this](VkResult result) { |
| 864 | return this->checkVkResult(result); |
| 865 | }; |
| 866 | auto allocator = this->memoryAllocator(); |
| 867 | void* mapPtr = skgpu::VulkanMemory::MapAlloc(allocator, alloc, checkResult); |
| 868 | if (!mapPtr) { |
| 869 | return false; |
| 870 | } |
| 871 | mapPtr = reinterpret_cast<char*>(mapPtr) + offset; |
| 872 | |
| 873 | SkRectMemcpy(dst: mapPtr, |
| 874 | dstRB: static_cast<size_t>(layout.rowPitch), |
| 875 | src: data, |
| 876 | srcRB: rowBytes, |
| 877 | trimRowBytes, |
| 878 | rowCount: rect.height()); |
| 879 | |
| 880 | skgpu::VulkanMemory::FlushMappedAlloc(allocator, alloc, offset, size, checkResult); |
| 881 | skgpu::VulkanMemory::UnmapAlloc(allocator, alloc); |
| 882 | |
| 883 | return true; |
| 884 | } |
| 885 | |
| 886 | // This fills in the 'regions' vector in preparation for copying a buffer to an image. |
| 887 | // 'individualMipOffsets' is filled in as a side-effect. |
| 888 | static size_t fill_in_compressed_regions(GrStagingBufferManager* stagingBufferManager, |
| 889 | TArray<VkBufferImageCopy>* regions, |
| 890 | TArray<size_t>* individualMipOffsets, |
| 891 | GrStagingBufferManager::Slice* slice, |
| 892 | SkTextureCompressionType compression, |
| 893 | VkFormat vkFormat, |
| 894 | SkISize dimensions, |
| 895 | GrMipmapped mipmapped) { |
| 896 | SkASSERT(compression != SkTextureCompressionType::kNone); |
| 897 | int numMipLevels = 1; |
| 898 | if (mipmapped == GrMipmapped::kYes) { |
| 899 | numMipLevels = SkMipmap::ComputeLevelCount(baseWidth: dimensions.width(), baseHeight: dimensions.height()) + 1; |
| 900 | } |
| 901 | |
| 902 | regions->reserve_exact(n: regions->size() + numMipLevels); |
| 903 | individualMipOffsets->reserve_exact(n: individualMipOffsets->size() + numMipLevels); |
| 904 | |
| 905 | size_t bytesPerBlock = skgpu::VkFormatBytesPerBlock(vkFormat); |
| 906 | |
| 907 | size_t bufferSize = SkCompressedDataSize(compression, |
| 908 | baseDimensions: dimensions, |
| 909 | individualMipOffsets, |
| 910 | mipmapped: mipmapped == GrMipmapped::kYes); |
| 911 | SkASSERT(individualMipOffsets->size() == numMipLevels); |
| 912 | |
| 913 | // Get a staging buffer slice to hold our mip data. |
| 914 | // Vulkan requires offsets in the buffer to be aligned to multiple of the texel size and 4 |
| 915 | size_t alignment = bytesPerBlock; |
| 916 | switch (alignment & 0b11) { |
| 917 | case 0: break; // alignment is already a multiple of 4. |
| 918 | case 2: alignment *= 2; break; // alignment is a multiple of 2 but not 4. |
| 919 | default: alignment *= 4; break; // alignment is not a multiple of 2. |
| 920 | } |
| 921 | *slice = stagingBufferManager->allocateStagingBufferSlice(size: bufferSize, requiredAlignment: alignment); |
| 922 | if (!slice->fBuffer) { |
| 923 | return 0; |
| 924 | } |
| 925 | |
| 926 | for (int i = 0; i < numMipLevels; ++i) { |
| 927 | VkBufferImageCopy& region = regions->push_back(); |
| 928 | memset(s: ®ion, c: 0, n: sizeof(VkBufferImageCopy)); |
| 929 | region.bufferOffset = slice->fOffset + (*individualMipOffsets)[i]; |
| 930 | SkISize revisedDimensions = GrCompressedDimensions(compression, baseDimensions: dimensions); |
| 931 | region.bufferRowLength = revisedDimensions.width(); |
| 932 | region.bufferImageHeight = revisedDimensions.height(); |
| 933 | region.imageSubresource = {.aspectMask: VK_IMAGE_ASPECT_COLOR_BIT, .mipLevel: SkToU32(x: i), .baseArrayLayer: 0, .layerCount: 1}; |
| 934 | region.imageOffset = {.x: 0, .y: 0, .z: 0}; |
| 935 | region.imageExtent = {.width: SkToU32(x: dimensions.width()), |
| 936 | .height: SkToU32(x: dimensions.height()), .depth: 1}; |
| 937 | |
| 938 | dimensions = {.fWidth: std::max(a: 1, b: dimensions.width() /2), |
| 939 | .fHeight: std::max(a: 1, b: dimensions.height()/2)}; |
| 940 | } |
| 941 | |
| 942 | return bufferSize; |
| 943 | } |
| 944 | |
| 945 | bool GrVkGpu::uploadTexDataOptimal(GrVkImage* texImage, |
| 946 | SkIRect rect, |
| 947 | GrColorType dataColorType, |
| 948 | const GrMipLevel texels[], |
| 949 | int mipLevelCount) { |
| 950 | if (!this->currentCommandBuffer()) { |
| 951 | return false; |
| 952 | } |
| 953 | |
| 954 | SkASSERT(!texImage->isLinearTiled()); |
| 955 | // The assumption is either that we have no mipmaps, or that our rect is the entire texture |
| 956 | SkASSERT(mipLevelCount == 1 || rect == SkIRect::MakeSize(texImage->dimensions())); |
| 957 | |
| 958 | // We assume that if the texture has mip levels, we either upload to all the levels or just the |
| 959 | // first. |
| 960 | SkASSERT(mipLevelCount == 1 || mipLevelCount == (int)texImage->mipLevels()); |
| 961 | |
| 962 | SkASSERT(!rect.isEmpty()); |
| 963 | |
| 964 | SkASSERT(this->vkCaps().surfaceSupportsWritePixels(texImage)); |
| 965 | |
| 966 | SkASSERT(this->vkCaps().isVkFormatTexturable(texImage->imageFormat())); |
| 967 | size_t bpp = GrColorTypeBytesPerPixel(ct: dataColorType); |
| 968 | |
| 969 | // texels is const. |
| 970 | // But we may need to adjust the fPixels ptr based on the copyRect, or fRowBytes. |
| 971 | // Because of this we need to make a non-const shallow copy of texels. |
| 972 | AutoTArray<GrMipLevel> texelsShallowCopy(mipLevelCount); |
| 973 | std::copy_n(first: texels, orig_n: mipLevelCount, result: texelsShallowCopy.get()); |
| 974 | |
| 975 | TArray<size_t> individualMipOffsets; |
| 976 | size_t combinedBufferSize; |
| 977 | if (mipLevelCount > 1) { |
| 978 | combinedBufferSize = GrComputeTightCombinedBufferSize(bytesPerPixel: bpp, |
| 979 | baseDimensions: rect.size(), |
| 980 | individualMipOffsets: &individualMipOffsets, |
| 981 | mipLevelCount); |
| 982 | } else { |
| 983 | SkASSERT(texelsShallowCopy[0].fPixels && texelsShallowCopy[0].fRowBytes); |
| 984 | combinedBufferSize = rect.width()*rect.height()*bpp; |
| 985 | individualMipOffsets.push_back(t: 0); |
| 986 | } |
| 987 | SkASSERT(combinedBufferSize); |
| 988 | |
| 989 | // Get a staging buffer slice to hold our mip data. |
| 990 | // Vulkan requires offsets in the buffer to be aligned to multiple of the texel size and 4 |
| 991 | size_t alignment = bpp; |
| 992 | switch (alignment & 0b11) { |
| 993 | case 0: break; // alignment is already a multiple of 4. |
| 994 | case 2: alignment *= 2; break; // alignment is a multiple of 2 but not 4. |
| 995 | default: alignment *= 4; break; // alignment is not a multiple of 2. |
| 996 | } |
| 997 | GrStagingBufferManager::Slice slice = |
| 998 | fStagingBufferManager.allocateStagingBufferSlice(size: combinedBufferSize, requiredAlignment: alignment); |
| 999 | if (!slice.fBuffer) { |
| 1000 | return false; |
| 1001 | } |
| 1002 | |
| 1003 | int uploadLeft = rect.left(); |
| 1004 | int uploadTop = rect.top(); |
| 1005 | |
| 1006 | char* buffer = (char*) slice.fOffsetMapPtr; |
| 1007 | TArray<VkBufferImageCopy> regions(mipLevelCount); |
| 1008 | |
| 1009 | int currentWidth = rect.width(); |
| 1010 | int currentHeight = rect.height(); |
| 1011 | for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) { |
| 1012 | if (texelsShallowCopy[currentMipLevel].fPixels) { |
| 1013 | const size_t trimRowBytes = currentWidth * bpp; |
| 1014 | const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes; |
| 1015 | |
| 1016 | // copy data into the buffer, skipping the trailing bytes |
| 1017 | char* dst = buffer + individualMipOffsets[currentMipLevel]; |
| 1018 | const char* src = (const char*)texelsShallowCopy[currentMipLevel].fPixels; |
| 1019 | SkRectMemcpy(dst, dstRB: trimRowBytes, src, srcRB: rowBytes, trimRowBytes, rowCount: currentHeight); |
| 1020 | |
| 1021 | VkBufferImageCopy& region = regions.push_back(); |
| 1022 | memset(s: ®ion, c: 0, n: sizeof(VkBufferImageCopy)); |
| 1023 | region.bufferOffset = slice.fOffset + individualMipOffsets[currentMipLevel]; |
| 1024 | region.bufferRowLength = currentWidth; |
| 1025 | region.bufferImageHeight = currentHeight; |
| 1026 | region.imageSubresource = {.aspectMask: VK_IMAGE_ASPECT_COLOR_BIT, .mipLevel: SkToU32(x: currentMipLevel), .baseArrayLayer: 0, .layerCount: 1}; |
| 1027 | region.imageOffset = {.x: uploadLeft, .y: uploadTop, .z: 0}; |
| 1028 | region.imageExtent = {.width: (uint32_t)currentWidth, .height: (uint32_t)currentHeight, .depth: 1}; |
| 1029 | } |
| 1030 | |
| 1031 | currentWidth = std::max(a: 1, b: currentWidth/2); |
| 1032 | currentHeight = std::max(a: 1, b: currentHeight/2); |
| 1033 | } |
| 1034 | |
| 1035 | // Change layout of our target so it can be copied to |
| 1036 | texImage->setImageLayout(gpu: this, |
| 1037 | newLayout: VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, |
| 1038 | dstAccessMask: VK_ACCESS_TRANSFER_WRITE_BIT, |
| 1039 | dstStageMask: VK_PIPELINE_STAGE_TRANSFER_BIT, |
| 1040 | byRegion: false); |
| 1041 | |
| 1042 | // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer |
| 1043 | // because we don't need the command buffer to ref the buffer here. The reason being is that |
| 1044 | // the buffer is coming from the staging manager and the staging manager will make sure the |
| 1045 | // command buffer has a ref on the buffer. This avoids having to add and remove a ref for ever |
| 1046 | // upload in the frame. |
| 1047 | GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(slice.fBuffer); |
| 1048 | this->currentCommandBuffer()->copyBufferToImage(gpu: this, |
| 1049 | srcBuffer: vkBuffer->vkBuffer(), |
| 1050 | dstImage: texImage, |
| 1051 | dstLayout: VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, |
| 1052 | copyRegionCount: regions.size(), |
| 1053 | copyRegions: regions.begin()); |
| 1054 | return true; |
| 1055 | } |
| 1056 | |
| 1057 | // It's probably possible to roll this into uploadTexDataOptimal, |
| 1058 | // but for now it's easier to maintain as a separate entity. |
| 1059 | bool GrVkGpu::uploadTexDataCompressed(GrVkImage* uploadTexture, |
| 1060 | SkTextureCompressionType compression, VkFormat vkFormat, |
| 1061 | SkISize dimensions, GrMipmapped mipmapped, |
| 1062 | const void* data, size_t dataSize) { |
| 1063 | if (!this->currentCommandBuffer()) { |
| 1064 | return false; |
| 1065 | } |
| 1066 | SkASSERT(data); |
| 1067 | SkASSERT(!uploadTexture->isLinearTiled()); |
| 1068 | // For now the assumption is that our rect is the entire texture. |
| 1069 | // Compressed textures are read-only so this should be a reasonable assumption. |
| 1070 | SkASSERT(dimensions.fWidth == uploadTexture->width() && |
| 1071 | dimensions.fHeight == uploadTexture->height()); |
| 1072 | |
| 1073 | if (dimensions.fWidth == 0 || dimensions.fHeight == 0) { |
| 1074 | return false; |
| 1075 | } |
| 1076 | |
| 1077 | SkASSERT(uploadTexture->imageFormat() == vkFormat); |
| 1078 | SkASSERT(this->vkCaps().isVkFormatTexturable(vkFormat)); |
| 1079 | |
| 1080 | |
| 1081 | GrStagingBufferManager::Slice slice; |
| 1082 | TArray<VkBufferImageCopy> regions; |
| 1083 | TArray<size_t> individualMipOffsets; |
| 1084 | SkDEBUGCODE(size_t combinedBufferSize =) fill_in_compressed_regions(stagingBufferManager: &fStagingBufferManager, |
| 1085 | regions: ®ions, |
| 1086 | individualMipOffsets: &individualMipOffsets, |
| 1087 | slice: &slice, |
| 1088 | compression, |
| 1089 | vkFormat, |
| 1090 | dimensions, |
| 1091 | mipmapped); |
| 1092 | if (!slice.fBuffer) { |
| 1093 | return false; |
| 1094 | } |
| 1095 | SkASSERT(dataSize == combinedBufferSize); |
| 1096 | |
| 1097 | { |
| 1098 | char* buffer = (char*)slice.fOffsetMapPtr; |
| 1099 | memcpy(dest: buffer, src: data, n: dataSize); |
| 1100 | } |
| 1101 | |
| 1102 | // Change layout of our target so it can be copied to |
| 1103 | uploadTexture->setImageLayout(gpu: this, |
| 1104 | newLayout: VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, |
| 1105 | dstAccessMask: VK_ACCESS_TRANSFER_WRITE_BIT, |
| 1106 | dstStageMask: VK_PIPELINE_STAGE_TRANSFER_BIT, |
| 1107 | byRegion: false); |
| 1108 | |
| 1109 | // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer |
| 1110 | // because we don't need the command buffer to ref the buffer here. The reason being is that |
| 1111 | // the buffer is coming from the staging manager and the staging manager will make sure the |
| 1112 | // command buffer has a ref on the buffer. This avoids having to add and remove a ref for ever |
| 1113 | // upload in the frame. |
| 1114 | GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(slice.fBuffer); |
| 1115 | this->currentCommandBuffer()->copyBufferToImage(gpu: this, |
| 1116 | srcBuffer: vkBuffer->vkBuffer(), |
| 1117 | dstImage: uploadTexture, |
| 1118 | dstLayout: VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, |
| 1119 | copyRegionCount: regions.size(), |
| 1120 | copyRegions: regions.begin()); |
| 1121 | |
| 1122 | return true; |
| 1123 | } |
| 1124 | |
| 1125 | //////////////////////////////////////////////////////////////////////////////// |
| 1126 | // TODO: make this take a GrMipmapped |
| 1127 | sk_sp<GrTexture> GrVkGpu::onCreateTexture(SkISize dimensions, |
| 1128 | const GrBackendFormat& format, |
| 1129 | GrRenderable renderable, |
| 1130 | int renderTargetSampleCnt, |
| 1131 | skgpu::Budgeted budgeted, |
| 1132 | GrProtected isProtected, |
| 1133 | int mipLevelCount, |
| 1134 | uint32_t levelClearMask, |
| 1135 | std::string_view label) { |
| 1136 | VkFormat pixelFormat; |
| 1137 | SkAssertResult(format.asVkFormat(&pixelFormat)); |
| 1138 | SkASSERT(!skgpu::VkFormatIsCompressed(pixelFormat)); |
| 1139 | SkASSERT(mipLevelCount > 0); |
| 1140 | |
| 1141 | GrMipmapStatus mipmapStatus = |
| 1142 | mipLevelCount > 1 ? GrMipmapStatus::kDirty : GrMipmapStatus::kNotAllocated; |
| 1143 | |
| 1144 | sk_sp<GrVkTexture> tex; |
| 1145 | if (renderable == GrRenderable::kYes) { |
| 1146 | tex = GrVkTextureRenderTarget::MakeNewTextureRenderTarget( |
| 1147 | gpu: this, budgeted, dimensions, format: pixelFormat, mipLevels: mipLevelCount, sampleCnt: renderTargetSampleCnt, |
| 1148 | mipmapStatus, isProtected, label); |
| 1149 | } else { |
| 1150 | tex = GrVkTexture::MakeNewTexture(this, budgeted, dimensions, format: pixelFormat, |
| 1151 | mipLevels: mipLevelCount, isProtected, mipmapStatus, label); |
| 1152 | } |
| 1153 | |
| 1154 | if (!tex) { |
| 1155 | return nullptr; |
| 1156 | } |
| 1157 | |
| 1158 | if (levelClearMask) { |
| 1159 | if (!this->currentCommandBuffer()) { |
| 1160 | return nullptr; |
| 1161 | } |
| 1162 | STArray<1, VkImageSubresourceRange> ranges; |
| 1163 | bool inRange = false; |
| 1164 | GrVkImage* texImage = tex->textureImage(); |
| 1165 | for (uint32_t i = 0; i < texImage->mipLevels(); ++i) { |
| 1166 | if (levelClearMask & (1U << i)) { |
| 1167 | if (inRange) { |
| 1168 | ranges.back().levelCount++; |
| 1169 | } else { |
| 1170 | auto& range = ranges.push_back(); |
| 1171 | range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; |
| 1172 | range.baseArrayLayer = 0; |
| 1173 | range.baseMipLevel = i; |
| 1174 | range.layerCount = 1; |
| 1175 | range.levelCount = 1; |
| 1176 | inRange = true; |
| 1177 | } |
| 1178 | } else if (inRange) { |
| 1179 | inRange = false; |
| 1180 | } |
| 1181 | } |
| 1182 | SkASSERT(!ranges.empty()); |
| 1183 | static constexpr VkClearColorValue kZeroClearColor = {}; |
| 1184 | texImage->setImageLayout(gpu: this, newLayout: VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, |
| 1185 | dstAccessMask: VK_ACCESS_TRANSFER_WRITE_BIT, dstStageMask: VK_PIPELINE_STAGE_TRANSFER_BIT, byRegion: false); |
| 1186 | this->currentCommandBuffer()->clearColorImage(gpu: this, image: texImage, color: &kZeroClearColor, |
| 1187 | subRangeCount: ranges.size(), subRanges: ranges.begin()); |
| 1188 | } |
| 1189 | return std::move(tex); |
| 1190 | } |
| 1191 | |
| 1192 | sk_sp<GrTexture> GrVkGpu::onCreateCompressedTexture(SkISize dimensions, |
| 1193 | const GrBackendFormat& format, |
| 1194 | skgpu::Budgeted budgeted, |
| 1195 | GrMipmapped mipmapped, |
| 1196 | GrProtected isProtected, |
| 1197 | const void* data, |
| 1198 | size_t dataSize) { |
| 1199 | VkFormat pixelFormat; |
| 1200 | SkAssertResult(format.asVkFormat(&pixelFormat)); |
| 1201 | SkASSERT(skgpu::VkFormatIsCompressed(pixelFormat)); |
| 1202 | |
| 1203 | int numMipLevels = 1; |
| 1204 | if (mipmapped == GrMipmapped::kYes) { |
| 1205 | numMipLevels = SkMipmap::ComputeLevelCount(baseWidth: dimensions.width(), baseHeight: dimensions.height())+1; |
| 1206 | } |
| 1207 | |
| 1208 | GrMipmapStatus mipmapStatus = (mipmapped == GrMipmapped::kYes) ? GrMipmapStatus::kValid |
| 1209 | : GrMipmapStatus::kNotAllocated; |
| 1210 | |
| 1211 | auto tex = GrVkTexture::MakeNewTexture(this, |
| 1212 | budgeted, |
| 1213 | dimensions, |
| 1214 | format: pixelFormat, |
| 1215 | mipLevels: numMipLevels, |
| 1216 | isProtected, |
| 1217 | mipmapStatus, |
| 1218 | /*label=*/"VkGpu_CreateCompressedTexture" ); |
| 1219 | if (!tex) { |
| 1220 | return nullptr; |
| 1221 | } |
| 1222 | |
| 1223 | SkTextureCompressionType compression = GrBackendFormatToCompressionType(format); |
| 1224 | if (!this->uploadTexDataCompressed(uploadTexture: tex->textureImage(), compression, vkFormat: pixelFormat, |
| 1225 | dimensions, mipmapped, data, dataSize)) { |
| 1226 | return nullptr; |
| 1227 | } |
| 1228 | |
| 1229 | return std::move(tex); |
| 1230 | } |
| 1231 | |
| 1232 | //////////////////////////////////////////////////////////////////////////////// |
| 1233 | |
| 1234 | bool GrVkGpu::updateBuffer(sk_sp<GrVkBuffer> buffer, const void* src, |
| 1235 | VkDeviceSize offset, VkDeviceSize size) { |
| 1236 | if (!this->currentCommandBuffer()) { |
| 1237 | return false; |
| 1238 | } |
| 1239 | add_transfer_dst_buffer_mem_barrier(gpu: this, |
| 1240 | dst: static_cast<GrVkBuffer*>(buffer.get()), |
| 1241 | offset, |
| 1242 | size, |
| 1243 | /*after=*/false); |
| 1244 | this->currentCommandBuffer()->updateBuffer(gpu: this, dstBuffer: buffer, dstOffset: offset, dataSize: size, data: src); |
| 1245 | add_transfer_dst_buffer_mem_barrier(gpu: this, |
| 1246 | dst: static_cast<GrVkBuffer*>(buffer.get()), |
| 1247 | offset, |
| 1248 | size, |
| 1249 | /*after=*/true); |
| 1250 | |
| 1251 | return true; |
| 1252 | } |
| 1253 | |
| 1254 | bool GrVkGpu::zeroBuffer(sk_sp<GrGpuBuffer> buffer) { |
| 1255 | if (!this->currentCommandBuffer()) { |
| 1256 | return false; |
| 1257 | } |
| 1258 | |
| 1259 | add_transfer_dst_buffer_mem_barrier(gpu: this, |
| 1260 | dst: static_cast<GrVkBuffer*>(buffer.get()), |
| 1261 | /*offset=*/0, |
| 1262 | size: buffer->size(), |
| 1263 | /*after=*/false); |
| 1264 | this->currentCommandBuffer()->fillBuffer(gpu: this, |
| 1265 | buffer, |
| 1266 | /*offset=*/0, |
| 1267 | size: buffer->size(), |
| 1268 | /*data=*/0); |
| 1269 | add_transfer_dst_buffer_mem_barrier(gpu: this, |
| 1270 | dst: static_cast<GrVkBuffer*>(buffer.get()), |
| 1271 | /*offset=*/0, |
| 1272 | size: buffer->size(), |
| 1273 | /*after=*/true); |
| 1274 | |
| 1275 | return true; |
| 1276 | } |
| 1277 | |
| 1278 | //////////////////////////////////////////////////////////////////////////////// |
| 1279 | |
| 1280 | static bool check_image_info(const GrVkCaps& caps, |
| 1281 | const GrVkImageInfo& info, |
| 1282 | bool needsAllocation, |
| 1283 | uint32_t graphicsQueueIndex) { |
| 1284 | if (VK_NULL_HANDLE == info.fImage) { |
| 1285 | return false; |
| 1286 | } |
| 1287 | |
| 1288 | if (VK_NULL_HANDLE == info.fAlloc.fMemory && needsAllocation) { |
| 1289 | return false; |
| 1290 | } |
| 1291 | |
| 1292 | if (info.fImageLayout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR && !caps.supportsSwapchain()) { |
| 1293 | return false; |
| 1294 | } |
| 1295 | |
| 1296 | if (info.fCurrentQueueFamily != VK_QUEUE_FAMILY_IGNORED && |
| 1297 | info.fCurrentQueueFamily != VK_QUEUE_FAMILY_EXTERNAL && |
| 1298 | info.fCurrentQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) { |
| 1299 | if (info.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) { |
| 1300 | if (info.fCurrentQueueFamily != graphicsQueueIndex) { |
| 1301 | return false; |
| 1302 | } |
| 1303 | } else { |
| 1304 | return false; |
| 1305 | } |
| 1306 | } |
| 1307 | |
| 1308 | if (info.fYcbcrConversionInfo.isValid()) { |
| 1309 | if (!caps.supportsYcbcrConversion()) { |
| 1310 | return false; |
| 1311 | } |
| 1312 | if (info.fYcbcrConversionInfo.fExternalFormat != 0) { |
| 1313 | return true; |
| 1314 | } |
| 1315 | } |
| 1316 | |
| 1317 | // We currently require everything to be made with transfer bits set |
| 1318 | if (!SkToBool(x: info.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) || |
| 1319 | !SkToBool(x: info.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) { |
| 1320 | return false; |
| 1321 | } |
| 1322 | |
| 1323 | return true; |
| 1324 | } |
| 1325 | |
| 1326 | static bool check_tex_image_info(const GrVkCaps& caps, const GrVkImageInfo& info) { |
| 1327 | // We don't support directly importing multisampled textures for sampling from shaders. |
| 1328 | if (info.fSampleCount != 1) { |
| 1329 | return false; |
| 1330 | } |
| 1331 | |
| 1332 | if (info.fYcbcrConversionInfo.isValid() && info.fYcbcrConversionInfo.fExternalFormat != 0) { |
| 1333 | return true; |
| 1334 | } |
| 1335 | if (info.fImageTiling == VK_IMAGE_TILING_OPTIMAL) { |
| 1336 | if (!caps.isVkFormatTexturable(info.fFormat)) { |
| 1337 | return false; |
| 1338 | } |
| 1339 | } else if (info.fImageTiling == VK_IMAGE_TILING_LINEAR) { |
| 1340 | if (!caps.isVkFormatTexturableLinearly(format: info.fFormat)) { |
| 1341 | return false; |
| 1342 | } |
| 1343 | } else if (info.fImageTiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) { |
| 1344 | if (!caps.supportsDRMFormatModifiers()) { |
| 1345 | return false; |
| 1346 | } |
| 1347 | // To be technically correct we should query the vulkan support for VkFormat and |
| 1348 | // drmFormatModifier pairs to confirm the required feature support is there. However, we |
| 1349 | // currently don't have our caps and format tables set up to do this effeciently. So |
| 1350 | // instead we just rely on the client's passed in VkImageUsageFlags and assume they we set |
| 1351 | // up using valid features (checked below). In practice this should all be safe because |
| 1352 | // currently we are setting all drm format modifier textures to have a |
| 1353 | // GrTextureType::kExternal so we just really need to be able to read these video VkImage in |
| 1354 | // a shader. The video decoder isn't going to give us VkImages that don't support being |
| 1355 | // sampled. |
| 1356 | } else { |
| 1357 | SkUNREACHABLE; |
| 1358 | } |
| 1359 | |
| 1360 | // We currently require all textures to be made with sample support |
| 1361 | if (!SkToBool(x: info.fImageUsageFlags & VK_IMAGE_USAGE_SAMPLED_BIT)) { |
| 1362 | return false; |
| 1363 | } |
| 1364 | |
| 1365 | return true; |
| 1366 | } |
| 1367 | |
| 1368 | static bool check_rt_image_info(const GrVkCaps& caps, const GrVkImageInfo& info, bool resolveOnly) { |
| 1369 | if (!caps.isFormatRenderable(info.fFormat, sampleCount: info.fSampleCount)) { |
| 1370 | return false; |
| 1371 | } |
| 1372 | if (!resolveOnly && !SkToBool(x: info.fImageUsageFlags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)) { |
| 1373 | return false; |
| 1374 | } |
| 1375 | return true; |
| 1376 | } |
| 1377 | |
| 1378 | sk_sp<GrTexture> GrVkGpu::onWrapBackendTexture(const GrBackendTexture& backendTex, |
| 1379 | GrWrapOwnership ownership, |
| 1380 | GrWrapCacheable cacheable, |
| 1381 | GrIOType ioType) { |
| 1382 | GrVkImageInfo imageInfo; |
| 1383 | if (!backendTex.getVkImageInfo(&imageInfo)) { |
| 1384 | return nullptr; |
| 1385 | } |
| 1386 | |
| 1387 | if (!check_image_info(caps: this->vkCaps(), info: imageInfo, needsAllocation: kAdopt_GrWrapOwnership == ownership, |
| 1388 | graphicsQueueIndex: this->queueIndex())) { |
| 1389 | return nullptr; |
| 1390 | } |
| 1391 | |
| 1392 | if (!check_tex_image_info(caps: this->vkCaps(), info: imageInfo)) { |
| 1393 | return nullptr; |
| 1394 | } |
| 1395 | |
| 1396 | if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) { |
| 1397 | return nullptr; |
| 1398 | } |
| 1399 | |
| 1400 | sk_sp<skgpu::MutableTextureStateRef> mutableState = backendTex.getMutableState(); |
| 1401 | SkASSERT(mutableState); |
| 1402 | return GrVkTexture::MakeWrappedTexture(this, dimensions: backendTex.dimensions(), ownership, cacheable, |
| 1403 | ioType, imageInfo, std::move(mutableState)); |
| 1404 | } |
| 1405 | |
| 1406 | sk_sp<GrTexture> GrVkGpu::onWrapCompressedBackendTexture(const GrBackendTexture& beTex, |
| 1407 | GrWrapOwnership ownership, |
| 1408 | GrWrapCacheable cacheable) { |
| 1409 | return this->onWrapBackendTexture(backendTex: beTex, ownership, cacheable, ioType: kRead_GrIOType); |
| 1410 | } |
| 1411 | |
| 1412 | sk_sp<GrTexture> GrVkGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex, |
| 1413 | int sampleCnt, |
| 1414 | GrWrapOwnership ownership, |
| 1415 | GrWrapCacheable cacheable) { |
| 1416 | GrVkImageInfo imageInfo; |
| 1417 | if (!backendTex.getVkImageInfo(&imageInfo)) { |
| 1418 | return nullptr; |
| 1419 | } |
| 1420 | |
| 1421 | if (!check_image_info(caps: this->vkCaps(), info: imageInfo, needsAllocation: kAdopt_GrWrapOwnership == ownership, |
| 1422 | graphicsQueueIndex: this->queueIndex())) { |
| 1423 | return nullptr; |
| 1424 | } |
| 1425 | |
| 1426 | if (!check_tex_image_info(caps: this->vkCaps(), info: imageInfo)) { |
| 1427 | return nullptr; |
| 1428 | } |
| 1429 | // If sampleCnt is > 1 we will create an intermediate MSAA VkImage and then resolve into |
| 1430 | // the wrapped VkImage. |
| 1431 | bool resolveOnly = sampleCnt > 1; |
| 1432 | if (!check_rt_image_info(caps: this->vkCaps(), info: imageInfo, resolveOnly)) { |
| 1433 | return nullptr; |
| 1434 | } |
| 1435 | |
| 1436 | if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) { |
| 1437 | return nullptr; |
| 1438 | } |
| 1439 | |
| 1440 | sampleCnt = this->vkCaps().getRenderTargetSampleCount(requestedCount: sampleCnt, imageInfo.fFormat); |
| 1441 | |
| 1442 | sk_sp<skgpu::MutableTextureStateRef> mutableState = backendTex.getMutableState(); |
| 1443 | SkASSERT(mutableState); |
| 1444 | |
| 1445 | return GrVkTextureRenderTarget::MakeWrappedTextureRenderTarget(this, dimensions: backendTex.dimensions(), |
| 1446 | sampleCnt, ownership, cacheable, |
| 1447 | imageInfo, |
| 1448 | std::move(mutableState)); |
| 1449 | } |
| 1450 | |
| 1451 | sk_sp<GrRenderTarget> GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) { |
| 1452 | GrVkImageInfo info; |
| 1453 | if (!backendRT.getVkImageInfo(&info)) { |
| 1454 | return nullptr; |
| 1455 | } |
| 1456 | |
| 1457 | if (!check_image_info(caps: this->vkCaps(), info, needsAllocation: false, graphicsQueueIndex: this->queueIndex())) { |
| 1458 | return nullptr; |
| 1459 | } |
| 1460 | |
| 1461 | // We will always render directly to this VkImage. |
| 1462 | static bool kResolveOnly = false; |
| 1463 | if (!check_rt_image_info(caps: this->vkCaps(), info, resolveOnly: kResolveOnly)) { |
| 1464 | return nullptr; |
| 1465 | } |
| 1466 | |
| 1467 | if (backendRT.isProtected() && (fProtectedContext == GrProtected::kNo)) { |
| 1468 | return nullptr; |
| 1469 | } |
| 1470 | |
| 1471 | sk_sp<skgpu::MutableTextureStateRef> mutableState = backendRT.getMutableState(); |
| 1472 | SkASSERT(mutableState); |
| 1473 | |
| 1474 | sk_sp<GrVkRenderTarget> tgt = GrVkRenderTarget::MakeWrappedRenderTarget( |
| 1475 | this, backendRT.dimensions(), sampleCnt: backendRT.sampleCnt(), info, std::move(mutableState)); |
| 1476 | |
| 1477 | // We don't allow the client to supply a premade stencil buffer. We always create one if needed. |
| 1478 | SkASSERT(!backendRT.stencilBits()); |
| 1479 | if (tgt) { |
| 1480 | SkASSERT(tgt->canAttemptStencilAttachment(tgt->numSamples() > 1)); |
| 1481 | } |
| 1482 | |
| 1483 | return std::move(tgt); |
| 1484 | } |
| 1485 | |
| 1486 | sk_sp<GrRenderTarget> GrVkGpu::onWrapVulkanSecondaryCBAsRenderTarget( |
| 1487 | const SkImageInfo& imageInfo, const GrVkDrawableInfo& vkInfo) { |
| 1488 | int maxSize = this->caps()->maxTextureSize(); |
| 1489 | if (imageInfo.width() > maxSize || imageInfo.height() > maxSize) { |
| 1490 | return nullptr; |
| 1491 | } |
| 1492 | |
| 1493 | GrBackendFormat backendFormat = GrBackendFormat::MakeVk(format: vkInfo.fFormat); |
| 1494 | if (!backendFormat.isValid()) { |
| 1495 | return nullptr; |
| 1496 | } |
| 1497 | int sampleCnt = this->vkCaps().getRenderTargetSampleCount(requestedCount: 1, vkInfo.fFormat); |
| 1498 | if (!sampleCnt) { |
| 1499 | return nullptr; |
| 1500 | } |
| 1501 | |
| 1502 | return GrVkRenderTarget::MakeSecondaryCBRenderTarget(this, imageInfo.dimensions(), vkInfo); |
| 1503 | } |
| 1504 | |
| 1505 | bool GrVkGpu::loadMSAAFromResolve(GrVkCommandBuffer* commandBuffer, |
| 1506 | const GrVkRenderPass& renderPass, |
| 1507 | GrAttachment* dst, |
| 1508 | GrVkImage* src, |
| 1509 | const SkIRect& srcRect) { |
| 1510 | return fMSAALoadManager.loadMSAAFromResolve(gpu: this, commandBuffer, renderPass, dst, src, srcRect); |
| 1511 | } |
| 1512 | |
| 1513 | bool GrVkGpu::onRegenerateMipMapLevels(GrTexture* tex) { |
| 1514 | if (!this->currentCommandBuffer()) { |
| 1515 | return false; |
| 1516 | } |
| 1517 | auto* vkTex = static_cast<GrVkTexture*>(tex)->textureImage(); |
| 1518 | // don't do anything for linearly tiled textures (can't have mipmaps) |
| 1519 | if (vkTex->isLinearTiled()) { |
| 1520 | SkDebugf(format: "Trying to create mipmap for linear tiled texture" ); |
| 1521 | return false; |
| 1522 | } |
| 1523 | SkASSERT(tex->textureType() == GrTextureType::k2D); |
| 1524 | |
| 1525 | // determine if we can blit to and from this format |
| 1526 | const GrVkCaps& caps = this->vkCaps(); |
| 1527 | if (!caps.formatCanBeDstofBlit(format: vkTex->imageFormat(), linearTiled: false) || |
| 1528 | !caps.formatCanBeSrcofBlit(format: vkTex->imageFormat(), linearTiled: false) || |
| 1529 | !caps.mipmapSupport()) { |
| 1530 | return false; |
| 1531 | } |
| 1532 | |
| 1533 | int width = tex->width(); |
| 1534 | int height = tex->height(); |
| 1535 | VkImageBlit blitRegion; |
| 1536 | memset(s: &blitRegion, c: 0, n: sizeof(VkImageBlit)); |
| 1537 | |
| 1538 | // SkMipmap doesn't include the base level in the level count so we have to add 1 |
| 1539 | uint32_t levelCount = SkMipmap::ComputeLevelCount(baseWidth: tex->width(), baseHeight: tex->height()) + 1; |
| 1540 | SkASSERT(levelCount == vkTex->mipLevels()); |
| 1541 | |
| 1542 | // change layout of the layers so we can write to them. |
| 1543 | vkTex->setImageLayout(gpu: this, newLayout: VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, dstAccessMask: VK_ACCESS_TRANSFER_WRITE_BIT, |
| 1544 | dstStageMask: VK_PIPELINE_STAGE_TRANSFER_BIT, byRegion: false); |
| 1545 | |
| 1546 | // setup memory barrier |
| 1547 | SkASSERT(GrVkFormatIsSupported(vkTex->imageFormat())); |
| 1548 | VkImageMemoryBarrier imageMemoryBarrier = { |
| 1549 | .sType: VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType |
| 1550 | .pNext: nullptr, // pNext |
| 1551 | .srcAccessMask: VK_ACCESS_TRANSFER_WRITE_BIT, // srcAccessMask |
| 1552 | .dstAccessMask: VK_ACCESS_TRANSFER_READ_BIT, // dstAccessMask |
| 1553 | .oldLayout: VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // oldLayout |
| 1554 | .newLayout: VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // newLayout |
| 1555 | VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex |
| 1556 | VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex |
| 1557 | .image: vkTex->image(), // image |
| 1558 | .subresourceRange: {.aspectMask: VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel: 0, .levelCount: 1, .baseArrayLayer: 0, .layerCount: 1} // subresourceRange |
| 1559 | }; |
| 1560 | |
| 1561 | // Blit the miplevels |
| 1562 | uint32_t mipLevel = 1; |
| 1563 | while (mipLevel < levelCount) { |
| 1564 | int prevWidth = width; |
| 1565 | int prevHeight = height; |
| 1566 | width = std::max(a: 1, b: width / 2); |
| 1567 | height = std::max(a: 1, b: height / 2); |
| 1568 | |
| 1569 | imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1; |
| 1570 | this->addImageMemoryBarrier(vkTex->resource(), srcStageMask: VK_PIPELINE_STAGE_TRANSFER_BIT, |
| 1571 | dstStageMask: VK_PIPELINE_STAGE_TRANSFER_BIT, byRegion: false, barrier: &imageMemoryBarrier); |
| 1572 | |
| 1573 | blitRegion.srcSubresource = { .aspectMask: VK_IMAGE_ASPECT_COLOR_BIT, .mipLevel: mipLevel - 1, .baseArrayLayer: 0, .layerCount: 1 }; |
| 1574 | blitRegion.srcOffsets[0] = { .x: 0, .y: 0, .z: 0 }; |
| 1575 | blitRegion.srcOffsets[1] = { .x: prevWidth, .y: prevHeight, .z: 1 }; |
| 1576 | blitRegion.dstSubresource = { .aspectMask: VK_IMAGE_ASPECT_COLOR_BIT, .mipLevel: mipLevel, .baseArrayLayer: 0, .layerCount: 1 }; |
| 1577 | blitRegion.dstOffsets[0] = { .x: 0, .y: 0, .z: 0 }; |
| 1578 | blitRegion.dstOffsets[1] = { .x: width, .y: height, .z: 1 }; |
| 1579 | this->currentCommandBuffer()->blitImage(gpu: this, |
| 1580 | srcResource: vkTex->resource(), |
| 1581 | srcImage: vkTex->image(), |
| 1582 | srcLayout: VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, |
| 1583 | dstResource: vkTex->resource(), |
| 1584 | dstImage: vkTex->image(), |
| 1585 | dstLayout: VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, |
| 1586 | blitRegionCount: 1, |
| 1587 | blitRegions: &blitRegion, |
| 1588 | filter: VK_FILTER_LINEAR); |
| 1589 | ++mipLevel; |
| 1590 | } |
| 1591 | if (levelCount > 1) { |
| 1592 | // This barrier logically is not needed, but it changes the final level to the same layout |
| 1593 | // as all the others, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL. This makes tracking of the |
| 1594 | // layouts and future layout changes easier. The alternative here would be to track layout |
| 1595 | // and memory accesses per layer which doesn't seem work it. |
| 1596 | imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1; |
| 1597 | this->addImageMemoryBarrier(vkTex->resource(), srcStageMask: VK_PIPELINE_STAGE_TRANSFER_BIT, |
| 1598 | dstStageMask: VK_PIPELINE_STAGE_TRANSFER_BIT, byRegion: false, barrier: &imageMemoryBarrier); |
| 1599 | vkTex->updateImageLayout(newLayout: VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); |
| 1600 | } |
| 1601 | return true; |
| 1602 | } |
| 1603 | |
| 1604 | //////////////////////////////////////////////////////////////////////////////// |
| 1605 | |
| 1606 | sk_sp<GrAttachment> GrVkGpu::makeStencilAttachment(const GrBackendFormat& /*colorFormat*/, |
| 1607 | SkISize dimensions, int numStencilSamples) { |
| 1608 | VkFormat sFmt = this->vkCaps().preferredStencilFormat(); |
| 1609 | |
| 1610 | fStats.incStencilAttachmentCreates(); |
| 1611 | return GrVkImage::MakeStencil(gpu: this, dimensions, sampleCnt: numStencilSamples, format: sFmt); |
| 1612 | } |
| 1613 | |
| 1614 | sk_sp<GrAttachment> GrVkGpu::makeMSAAAttachment(SkISize dimensions, |
| 1615 | const GrBackendFormat& format, |
| 1616 | int numSamples, |
| 1617 | GrProtected isProtected, |
| 1618 | GrMemoryless memoryless) { |
| 1619 | VkFormat pixelFormat; |
| 1620 | SkAssertResult(format.asVkFormat(&pixelFormat)); |
| 1621 | SkASSERT(!skgpu::VkFormatIsCompressed(pixelFormat)); |
| 1622 | SkASSERT(this->vkCaps().isFormatRenderable(pixelFormat, numSamples)); |
| 1623 | |
| 1624 | fStats.incMSAAAttachmentCreates(); |
| 1625 | return GrVkImage::MakeMSAA(gpu: this, dimensions, numSamples, format: pixelFormat, isProtected, memoryless); |
| 1626 | } |
| 1627 | |
| 1628 | //////////////////////////////////////////////////////////////////////////////// |
| 1629 | |
| 1630 | bool copy_src_data(char* mapPtr, |
| 1631 | VkFormat vkFormat, |
| 1632 | const TArray<size_t>& individualMipOffsets, |
| 1633 | const GrPixmap srcData[], |
| 1634 | int numMipLevels) { |
| 1635 | SkASSERT(srcData && numMipLevels); |
| 1636 | SkASSERT(!skgpu::VkFormatIsCompressed(vkFormat)); |
| 1637 | SkASSERT(individualMipOffsets.size() == numMipLevels); |
| 1638 | SkASSERT(mapPtr); |
| 1639 | |
| 1640 | size_t bytesPerPixel = skgpu::VkFormatBytesPerBlock(vkFormat); |
| 1641 | |
| 1642 | for (int level = 0; level < numMipLevels; ++level) { |
| 1643 | const size_t trimRB = srcData[level].info().width() * bytesPerPixel; |
| 1644 | |
| 1645 | SkRectMemcpy(dst: mapPtr + individualMipOffsets[level], dstRB: trimRB, |
| 1646 | src: srcData[level].addr(), srcRB: srcData[level].rowBytes(), |
| 1647 | trimRowBytes: trimRB, rowCount: srcData[level].height()); |
| 1648 | } |
| 1649 | return true; |
| 1650 | } |
| 1651 | |
| 1652 | bool GrVkGpu::createVkImageForBackendSurface(VkFormat vkFormat, |
| 1653 | SkISize dimensions, |
| 1654 | int sampleCnt, |
| 1655 | GrTexturable texturable, |
| 1656 | GrRenderable renderable, |
| 1657 | GrMipmapped mipmapped, |
| 1658 | GrVkImageInfo* info, |
| 1659 | GrProtected isProtected) { |
| 1660 | SkASSERT(texturable == GrTexturable::kYes || renderable == GrRenderable::kYes); |
| 1661 | |
| 1662 | if (fProtectedContext != isProtected) { |
| 1663 | return false; |
| 1664 | } |
| 1665 | |
| 1666 | if (texturable == GrTexturable::kYes && !fVkCaps->isVkFormatTexturable(vkFormat)) { |
| 1667 | return false; |
| 1668 | } |
| 1669 | |
| 1670 | // MSAA images are only currently used by createTestingOnlyBackendRenderTarget. |
| 1671 | if (sampleCnt > 1 && (texturable == GrTexturable::kYes || renderable == GrRenderable::kNo)) { |
| 1672 | return false; |
| 1673 | } |
| 1674 | |
| 1675 | if (renderable == GrRenderable::kYes) { |
| 1676 | sampleCnt = fVkCaps->getRenderTargetSampleCount(requestedCount: sampleCnt, vkFormat); |
| 1677 | if (!sampleCnt) { |
| 1678 | return false; |
| 1679 | } |
| 1680 | } |
| 1681 | |
| 1682 | |
| 1683 | int numMipLevels = 1; |
| 1684 | if (mipmapped == GrMipmapped::kYes) { |
| 1685 | numMipLevels = SkMipmap::ComputeLevelCount(baseWidth: dimensions.width(), baseHeight: dimensions.height()) + 1; |
| 1686 | } |
| 1687 | |
| 1688 | VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | |
| 1689 | VK_IMAGE_USAGE_TRANSFER_DST_BIT; |
| 1690 | if (texturable == GrTexturable::kYes) { |
| 1691 | usageFlags |= VK_IMAGE_USAGE_SAMPLED_BIT; |
| 1692 | } |
| 1693 | if (renderable == GrRenderable::kYes) { |
| 1694 | usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; |
| 1695 | // We always make our render targets support being used as input attachments |
| 1696 | usageFlags |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT; |
| 1697 | } |
| 1698 | |
| 1699 | GrVkImage::ImageDesc imageDesc; |
| 1700 | imageDesc.fImageType = VK_IMAGE_TYPE_2D; |
| 1701 | imageDesc.fFormat = vkFormat; |
| 1702 | imageDesc.fWidth = dimensions.width(); |
| 1703 | imageDesc.fHeight = dimensions.height(); |
| 1704 | imageDesc.fLevels = numMipLevels; |
| 1705 | imageDesc.fSamples = sampleCnt; |
| 1706 | imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL; |
| 1707 | imageDesc.fUsageFlags = usageFlags; |
| 1708 | imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; |
| 1709 | imageDesc.fIsProtected = fProtectedContext; |
| 1710 | |
| 1711 | if (!GrVkImage::InitImageInfo(gpu: this, imageDesc, info)) { |
| 1712 | SkDebugf(format: "Failed to init image info\n" ); |
| 1713 | return false; |
| 1714 | } |
| 1715 | |
| 1716 | return true; |
| 1717 | } |
| 1718 | |
| 1719 | bool GrVkGpu::onClearBackendTexture(const GrBackendTexture& backendTexture, |
| 1720 | sk_sp<skgpu::RefCntedCallback> finishedCallback, |
| 1721 | std::array<float, 4> color) { |
| 1722 | GrVkImageInfo info; |
| 1723 | SkAssertResult(backendTexture.getVkImageInfo(&info)); |
| 1724 | |
| 1725 | sk_sp<skgpu::MutableTextureStateRef> mutableState = backendTexture.getMutableState(); |
| 1726 | SkASSERT(mutableState); |
| 1727 | sk_sp<GrVkTexture> texture = |
| 1728 | GrVkTexture::MakeWrappedTexture(this, dimensions: backendTexture.dimensions(), |
| 1729 | kBorrow_GrWrapOwnership, GrWrapCacheable::kNo, |
| 1730 | kRW_GrIOType, info, std::move(mutableState)); |
| 1731 | if (!texture) { |
| 1732 | return false; |
| 1733 | } |
| 1734 | GrVkImage* texImage = texture->textureImage(); |
| 1735 | |
| 1736 | GrVkPrimaryCommandBuffer* cmdBuffer = this->currentCommandBuffer(); |
| 1737 | if (!cmdBuffer) { |
| 1738 | return false; |
| 1739 | } |
| 1740 | |
| 1741 | texImage->setImageLayout(gpu: this, |
| 1742 | newLayout: VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, |
| 1743 | dstAccessMask: VK_ACCESS_TRANSFER_WRITE_BIT, |
| 1744 | dstStageMask: VK_PIPELINE_STAGE_TRANSFER_BIT, |
| 1745 | byRegion: false); |
| 1746 | |
| 1747 | // CmdClearColorImage doesn't work for compressed formats |
| 1748 | SkASSERT(!skgpu::VkFormatIsCompressed(info.fFormat)); |
| 1749 | |
| 1750 | VkClearColorValue vkColor; |
| 1751 | // If we ever support SINT or UINT formats this needs to be updated to use the int32 and |
| 1752 | // uint32 union members in those cases. |
| 1753 | vkColor.float32[0] = color[0]; |
| 1754 | vkColor.float32[1] = color[1]; |
| 1755 | vkColor.float32[2] = color[2]; |
| 1756 | vkColor.float32[3] = color[3]; |
| 1757 | VkImageSubresourceRange range; |
| 1758 | range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; |
| 1759 | range.baseArrayLayer = 0; |
| 1760 | range.baseMipLevel = 0; |
| 1761 | range.layerCount = 1; |
| 1762 | range.levelCount = info.fLevelCount; |
| 1763 | cmdBuffer->clearColorImage(gpu: this, image: texImage, color: &vkColor, subRangeCount: 1, subRanges: &range); |
| 1764 | |
| 1765 | // Change image layout to shader read since if we use this texture as a borrowed |
| 1766 | // texture within Ganesh we require that its layout be set to that |
| 1767 | texImage->setImageLayout(gpu: this, newLayout: VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, |
| 1768 | dstAccessMask: VK_ACCESS_SHADER_READ_BIT, dstStageMask: VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, |
| 1769 | byRegion: false); |
| 1770 | |
| 1771 | if (finishedCallback) { |
| 1772 | this->addFinishedCallback(finishedCallback: std::move(finishedCallback)); |
| 1773 | } |
| 1774 | return true; |
| 1775 | } |
| 1776 | |
| 1777 | GrBackendTexture GrVkGpu::onCreateBackendTexture(SkISize dimensions, |
| 1778 | const GrBackendFormat& format, |
| 1779 | GrRenderable renderable, |
| 1780 | GrMipmapped mipmapped, |
| 1781 | GrProtected isProtected, |
| 1782 | std::string_view label) { |
| 1783 | const GrVkCaps& caps = this->vkCaps(); |
| 1784 | |
| 1785 | if (fProtectedContext != isProtected) { |
| 1786 | return {}; |
| 1787 | } |
| 1788 | |
| 1789 | VkFormat vkFormat; |
| 1790 | if (!format.asVkFormat(&vkFormat)) { |
| 1791 | return {}; |
| 1792 | } |
| 1793 | |
| 1794 | // TODO: move the texturability check up to GrGpu::createBackendTexture and just assert here |
| 1795 | if (!caps.isVkFormatTexturable(vkFormat)) { |
| 1796 | return {}; |
| 1797 | } |
| 1798 | |
| 1799 | if (skgpu::VkFormatNeedsYcbcrSampler(format: vkFormat)) { |
| 1800 | return {}; |
| 1801 | } |
| 1802 | |
| 1803 | GrVkImageInfo info; |
| 1804 | if (!this->createVkImageForBackendSurface(vkFormat, dimensions, sampleCnt: 1, texturable: GrTexturable::kYes, |
| 1805 | renderable, mipmapped, info: &info, isProtected)) { |
| 1806 | return {}; |
| 1807 | } |
| 1808 | |
| 1809 | return GrBackendTexture(dimensions.width(), dimensions.height(), info); |
| 1810 | } |
| 1811 | |
| 1812 | GrBackendTexture GrVkGpu::onCreateCompressedBackendTexture( |
| 1813 | SkISize dimensions, const GrBackendFormat& format, GrMipmapped mipmapped, |
| 1814 | GrProtected isProtected) { |
| 1815 | return this->onCreateBackendTexture(dimensions, |
| 1816 | format, |
| 1817 | renderable: GrRenderable::kNo, |
| 1818 | mipmapped, |
| 1819 | isProtected, |
| 1820 | /*label=*/"VkGpu_CreateCompressedBackendTexture" ); |
| 1821 | } |
| 1822 | |
| 1823 | bool GrVkGpu::onUpdateCompressedBackendTexture(const GrBackendTexture& backendTexture, |
| 1824 | sk_sp<skgpu::RefCntedCallback> finishedCallback, |
| 1825 | const void* data, |
| 1826 | size_t size) { |
| 1827 | GrVkImageInfo info; |
| 1828 | SkAssertResult(backendTexture.getVkImageInfo(&info)); |
| 1829 | |
| 1830 | sk_sp<skgpu::MutableTextureStateRef> mutableState = backendTexture.getMutableState(); |
| 1831 | SkASSERT(mutableState); |
| 1832 | sk_sp<GrVkTexture> texture = GrVkTexture::MakeWrappedTexture(this, |
| 1833 | dimensions: backendTexture.dimensions(), |
| 1834 | kBorrow_GrWrapOwnership, |
| 1835 | GrWrapCacheable::kNo, |
| 1836 | kRW_GrIOType, |
| 1837 | info, |
| 1838 | std::move(mutableState)); |
| 1839 | if (!texture) { |
| 1840 | return false; |
| 1841 | } |
| 1842 | |
| 1843 | GrVkPrimaryCommandBuffer* cmdBuffer = this->currentCommandBuffer(); |
| 1844 | if (!cmdBuffer) { |
| 1845 | return false; |
| 1846 | } |
| 1847 | GrVkImage* image = texture->textureImage(); |
| 1848 | image->setImageLayout(gpu: this, |
| 1849 | newLayout: VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, |
| 1850 | dstAccessMask: VK_ACCESS_TRANSFER_WRITE_BIT, |
| 1851 | dstStageMask: VK_PIPELINE_STAGE_TRANSFER_BIT, |
| 1852 | byRegion: false); |
| 1853 | |
| 1854 | SkTextureCompressionType compression = |
| 1855 | GrBackendFormatToCompressionType(format: backendTexture.getBackendFormat()); |
| 1856 | |
| 1857 | TArray<VkBufferImageCopy> regions; |
| 1858 | TArray<size_t> individualMipOffsets; |
| 1859 | GrStagingBufferManager::Slice slice; |
| 1860 | |
| 1861 | fill_in_compressed_regions(stagingBufferManager: &fStagingBufferManager, |
| 1862 | regions: ®ions, |
| 1863 | individualMipOffsets: &individualMipOffsets, |
| 1864 | slice: &slice, |
| 1865 | compression, |
| 1866 | vkFormat: info.fFormat, |
| 1867 | dimensions: backendTexture.dimensions(), |
| 1868 | mipmapped: backendTexture.fMipmapped); |
| 1869 | |
| 1870 | if (!slice.fBuffer) { |
| 1871 | return false; |
| 1872 | } |
| 1873 | |
| 1874 | memcpy(dest: slice.fOffsetMapPtr, src: data, n: size); |
| 1875 | |
| 1876 | cmdBuffer->addGrSurface(surface: texture); |
| 1877 | // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer |
| 1878 | // because we don't need the command buffer to ref the buffer here. The reason being is that |
| 1879 | // the buffer is coming from the staging manager and the staging manager will make sure the |
| 1880 | // command buffer has a ref on the buffer. This avoids having to add and remove a ref for |
| 1881 | // every upload in the frame. |
| 1882 | cmdBuffer->copyBufferToImage(gpu: this, |
| 1883 | srcBuffer: static_cast<GrVkBuffer*>(slice.fBuffer)->vkBuffer(), |
| 1884 | dstImage: image, |
| 1885 | dstLayout: image->currentLayout(), |
| 1886 | copyRegionCount: regions.size(), |
| 1887 | copyRegions: regions.begin()); |
| 1888 | |
| 1889 | // Change image layout to shader read since if we use this texture as a borrowed |
| 1890 | // texture within Ganesh we require that its layout be set to that |
| 1891 | image->setImageLayout(gpu: this, |
| 1892 | newLayout: VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, |
| 1893 | dstAccessMask: VK_ACCESS_SHADER_READ_BIT, |
| 1894 | dstStageMask: VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, |
| 1895 | byRegion: false); |
| 1896 | |
| 1897 | if (finishedCallback) { |
| 1898 | this->addFinishedCallback(finishedCallback: std::move(finishedCallback)); |
| 1899 | } |
| 1900 | return true; |
| 1901 | } |
| 1902 | |
| 1903 | void set_layout_and_queue_from_mutable_state(GrVkGpu* gpu, GrVkImage* image, |
| 1904 | const skgpu::VulkanMutableTextureState& newState) { |
| 1905 | // Even though internally we use this helper for getting src access flags and stages they |
| 1906 | // can also be used for general dst flags since we don't know exactly what the client |
| 1907 | // plans on using the image for. |
| 1908 | VkImageLayout newLayout = newState.getImageLayout(); |
| 1909 | if (newLayout == VK_IMAGE_LAYOUT_UNDEFINED) { |
| 1910 | newLayout = image->currentLayout(); |
| 1911 | } |
| 1912 | VkPipelineStageFlags dstStage = GrVkImage::LayoutToPipelineSrcStageFlags(layout: newLayout); |
| 1913 | VkAccessFlags dstAccess = GrVkImage::LayoutToSrcAccessMask(layout: newLayout); |
| 1914 | |
| 1915 | uint32_t currentQueueFamilyIndex = image->currentQueueFamilyIndex(); |
| 1916 | uint32_t newQueueFamilyIndex = newState.getQueueFamilyIndex(); |
| 1917 | auto isSpecialQueue = [](uint32_t queueFamilyIndex) { |
| 1918 | return queueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL || |
| 1919 | queueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT; |
| 1920 | }; |
| 1921 | if (isSpecialQueue(currentQueueFamilyIndex) && isSpecialQueue(newQueueFamilyIndex)) { |
| 1922 | // It is illegal to have both the new and old queue be special queue families (i.e. external |
| 1923 | // or foreign). |
| 1924 | return; |
| 1925 | } |
| 1926 | |
| 1927 | image->setImageLayoutAndQueueIndex(gpu, newLayout, dstAccessMask: dstAccess, dstStageMask: dstStage, byRegion: false, |
| 1928 | newQueueFamilyIndex); |
| 1929 | } |
| 1930 | |
| 1931 | bool GrVkGpu::setBackendSurfaceState(GrVkImageInfo info, |
| 1932 | sk_sp<skgpu::MutableTextureStateRef> currentState, |
| 1933 | SkISize dimensions, |
| 1934 | const skgpu::VulkanMutableTextureState& newState, |
| 1935 | skgpu::MutableTextureState* previousState, |
| 1936 | sk_sp<skgpu::RefCntedCallback> finishedCallback) { |
| 1937 | sk_sp<GrVkImage> texture = GrVkImage::MakeWrapped(gpu: this, |
| 1938 | dimensions, |
| 1939 | info, |
| 1940 | std::move(currentState), |
| 1941 | attachmentUsages: GrVkImage::UsageFlags::kColorAttachment, |
| 1942 | kBorrow_GrWrapOwnership, |
| 1943 | GrWrapCacheable::kNo, |
| 1944 | label: "VkGpu_SetBackendSurfaceState" , |
| 1945 | /*forSecondaryCB=*/false); |
| 1946 | SkASSERT(texture); |
| 1947 | if (!texture) { |
| 1948 | return false; |
| 1949 | } |
| 1950 | if (previousState) { |
| 1951 | previousState->setVulkanState(layout: texture->currentLayout(), |
| 1952 | queueFamilyIndex: texture->currentQueueFamilyIndex()); |
| 1953 | } |
| 1954 | set_layout_and_queue_from_mutable_state(gpu: this, image: texture.get(), newState); |
| 1955 | if (finishedCallback) { |
| 1956 | this->addFinishedCallback(finishedCallback: std::move(finishedCallback)); |
| 1957 | } |
| 1958 | return true; |
| 1959 | } |
| 1960 | |
| 1961 | bool GrVkGpu::setBackendTextureState(const GrBackendTexture& backendTeture, |
| 1962 | const skgpu::MutableTextureState& newState, |
| 1963 | skgpu::MutableTextureState* previousState, |
| 1964 | sk_sp<skgpu::RefCntedCallback> finishedCallback) { |
| 1965 | GrVkImageInfo info; |
| 1966 | SkAssertResult(backendTeture.getVkImageInfo(&info)); |
| 1967 | sk_sp<skgpu::MutableTextureStateRef> currentState = backendTeture.getMutableState(); |
| 1968 | SkASSERT(currentState); |
| 1969 | SkASSERT(newState.isValid() && newState.fBackend == skgpu::BackendApi::kVulkan); |
| 1970 | return this->setBackendSurfaceState(info, currentState: std::move(currentState), dimensions: backendTeture.dimensions(), |
| 1971 | newState: newState.fVkState, previousState, |
| 1972 | finishedCallback: std::move(finishedCallback)); |
| 1973 | } |
| 1974 | |
| 1975 | bool GrVkGpu::setBackendRenderTargetState(const GrBackendRenderTarget& backendRenderTarget, |
| 1976 | const skgpu::MutableTextureState& newState, |
| 1977 | skgpu::MutableTextureState* previousState, |
| 1978 | sk_sp<skgpu::RefCntedCallback> finishedCallback) { |
| 1979 | GrVkImageInfo info; |
| 1980 | SkAssertResult(backendRenderTarget.getVkImageInfo(&info)); |
| 1981 | sk_sp<skgpu::MutableTextureStateRef> currentState = backendRenderTarget.getMutableState(); |
| 1982 | SkASSERT(currentState); |
| 1983 | SkASSERT(newState.fBackend == skgpu::BackendApi::kVulkan); |
| 1984 | return this->setBackendSurfaceState(info, currentState: std::move(currentState), |
| 1985 | dimensions: backendRenderTarget.dimensions(), newState: newState.fVkState, |
| 1986 | previousState, finishedCallback: std::move(finishedCallback)); |
| 1987 | } |
| 1988 | |
| 1989 | void GrVkGpu::xferBarrier(GrRenderTarget* rt, GrXferBarrierType barrierType) { |
| 1990 | GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt); |
| 1991 | VkPipelineStageFlags dstStage; |
| 1992 | VkAccessFlags dstAccess; |
| 1993 | if (barrierType == kBlend_GrXferBarrierType) { |
| 1994 | dstStage = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; |
| 1995 | dstAccess = VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT; |
| 1996 | } else { |
| 1997 | SkASSERT(barrierType == kTexture_GrXferBarrierType); |
| 1998 | dstStage = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; |
| 1999 | dstAccess = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT; |
| 2000 | } |
| 2001 | GrVkImage* image = vkRT->colorAttachment(); |
| 2002 | VkImageMemoryBarrier barrier; |
| 2003 | barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; |
| 2004 | barrier.pNext = nullptr; |
| 2005 | barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; |
| 2006 | barrier.dstAccessMask = dstAccess; |
| 2007 | barrier.oldLayout = image->currentLayout(); |
| 2008 | barrier.newLayout = barrier.oldLayout; |
| 2009 | barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; |
| 2010 | barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; |
| 2011 | barrier.image = image->image(); |
| 2012 | barrier.subresourceRange = {.aspectMask: VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel: 0, .levelCount: image->mipLevels(), .baseArrayLayer: 0, .layerCount: 1}; |
| 2013 | this->addImageMemoryBarrier(image->resource(), |
| 2014 | srcStageMask: VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, |
| 2015 | dstStageMask: dstStage, byRegion: true, barrier: &barrier); |
| 2016 | } |
| 2017 | |
| 2018 | void GrVkGpu::deleteBackendTexture(const GrBackendTexture& tex) { |
| 2019 | SkASSERT(GrBackendApi::kVulkan == tex.fBackend); |
| 2020 | |
| 2021 | GrVkImageInfo info; |
| 2022 | if (tex.getVkImageInfo(&info)) { |
| 2023 | GrVkImage::DestroyImageInfo(gpu: this, const_cast<GrVkImageInfo*>(&info)); |
| 2024 | } |
| 2025 | } |
| 2026 | |
| 2027 | bool GrVkGpu::compile(const GrProgramDesc& desc, const GrProgramInfo& programInfo) { |
| 2028 | GrVkRenderPass::AttachmentsDescriptor attachmentsDescriptor; |
| 2029 | GrVkRenderPass::AttachmentFlags attachmentFlags; |
| 2030 | GrVkRenderTarget::ReconstructAttachmentsDescriptor(vkCaps: this->vkCaps(), programInfo, |
| 2031 | desc: &attachmentsDescriptor, flags: &attachmentFlags); |
| 2032 | |
| 2033 | GrVkRenderPass::SelfDependencyFlags selfDepFlags = GrVkRenderPass::SelfDependencyFlags::kNone; |
| 2034 | if (programInfo.renderPassBarriers() & GrXferBarrierFlags::kBlend) { |
| 2035 | selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForNonCoherentAdvBlend; |
| 2036 | } |
| 2037 | if (programInfo.renderPassBarriers() & GrXferBarrierFlags::kTexture) { |
| 2038 | selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForInputAttachment; |
| 2039 | } |
| 2040 | |
| 2041 | GrVkRenderPass::LoadFromResolve loadFromResolve = GrVkRenderPass::LoadFromResolve::kNo; |
| 2042 | if (this->vkCaps().programInfoWillUseDiscardableMSAA(programInfo) && |
| 2043 | programInfo.colorLoadOp() == GrLoadOp::kLoad) { |
| 2044 | loadFromResolve = GrVkRenderPass::LoadFromResolve::kLoad; |
| 2045 | } |
| 2046 | sk_sp<const GrVkRenderPass> renderPass(this->resourceProvider().findCompatibleRenderPass( |
| 2047 | &attachmentsDescriptor, attachmentFlags, selfDepFlags, loadFromResolve)); |
| 2048 | if (!renderPass) { |
| 2049 | return false; |
| 2050 | } |
| 2051 | |
| 2052 | GrThreadSafePipelineBuilder::Stats::ProgramCacheResult stat; |
| 2053 | |
| 2054 | auto pipelineState = this->resourceProvider().findOrCreateCompatiblePipelineState( |
| 2055 | desc, |
| 2056 | programInfo, |
| 2057 | compatibleRenderPass: renderPass->vkRenderPass(), |
| 2058 | stat: &stat); |
| 2059 | if (!pipelineState) { |
| 2060 | return false; |
| 2061 | } |
| 2062 | |
| 2063 | return stat != GrThreadSafePipelineBuilder::Stats::ProgramCacheResult::kHit; |
| 2064 | } |
| 2065 | |
| 2066 | #if GR_TEST_UTILS |
| 2067 | bool GrVkGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const { |
| 2068 | SkASSERT(GrBackendApi::kVulkan == tex.fBackend); |
| 2069 | |
| 2070 | GrVkImageInfo backend; |
| 2071 | if (!tex.getVkImageInfo(&backend)) { |
| 2072 | return false; |
| 2073 | } |
| 2074 | |
| 2075 | if (backend.fImage && backend.fAlloc.fMemory) { |
| 2076 | VkMemoryRequirements req; |
| 2077 | memset(&req, 0, sizeof(req)); |
| 2078 | GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice, |
| 2079 | backend.fImage, |
| 2080 | &req)); |
| 2081 | // TODO: find a better check |
| 2082 | // This will probably fail with a different driver |
| 2083 | return (req.size > 0) && (req.size <= 8192 * 8192); |
| 2084 | } |
| 2085 | |
| 2086 | return false; |
| 2087 | } |
| 2088 | |
| 2089 | GrBackendRenderTarget GrVkGpu::createTestingOnlyBackendRenderTarget(SkISize dimensions, |
| 2090 | GrColorType ct, |
| 2091 | int sampleCnt, |
| 2092 | GrProtected isProtected) { |
| 2093 | if (dimensions.width() > this->caps()->maxRenderTargetSize() || |
| 2094 | dimensions.height() > this->caps()->maxRenderTargetSize()) { |
| 2095 | return {}; |
| 2096 | } |
| 2097 | |
| 2098 | VkFormat vkFormat = this->vkCaps().getFormatFromColorType(ct); |
| 2099 | |
| 2100 | GrVkImageInfo info; |
| 2101 | if (!this->createVkImageForBackendSurface(vkFormat, dimensions, sampleCnt, GrTexturable::kNo, |
| 2102 | GrRenderable::kYes, GrMipmapped::kNo, &info, |
| 2103 | isProtected)) { |
| 2104 | return {}; |
| 2105 | } |
| 2106 | return GrBackendRenderTarget(dimensions.width(), dimensions.height(), 0, info); |
| 2107 | } |
| 2108 | |
| 2109 | void GrVkGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& rt) { |
| 2110 | SkASSERT(GrBackendApi::kVulkan == rt.fBackend); |
| 2111 | |
| 2112 | GrVkImageInfo info; |
| 2113 | if (rt.getVkImageInfo(&info)) { |
| 2114 | // something in the command buffer may still be using this, so force submit |
| 2115 | SkAssertResult(this->submitCommandBuffer(kForce_SyncQueue)); |
| 2116 | GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(&info)); |
| 2117 | } |
| 2118 | } |
| 2119 | #endif |
| 2120 | |
| 2121 | //////////////////////////////////////////////////////////////////////////////// |
| 2122 | |
| 2123 | void GrVkGpu::addBufferMemoryBarrier(const GrManagedResource* resource, |
| 2124 | VkPipelineStageFlags srcStageMask, |
| 2125 | VkPipelineStageFlags dstStageMask, |
| 2126 | bool byRegion, |
| 2127 | VkBufferMemoryBarrier* barrier) const { |
| 2128 | if (!this->currentCommandBuffer()) { |
| 2129 | return; |
| 2130 | } |
| 2131 | SkASSERT(resource); |
| 2132 | this->currentCommandBuffer()->pipelineBarrier(gpu: this, |
| 2133 | resource, |
| 2134 | srcStageMask, |
| 2135 | dstStageMask, |
| 2136 | byRegion, |
| 2137 | barrierType: GrVkCommandBuffer::kBufferMemory_BarrierType, |
| 2138 | barrier); |
| 2139 | } |
| 2140 | void GrVkGpu::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask, |
| 2141 | VkPipelineStageFlags dstStageMask, |
| 2142 | bool byRegion, |
| 2143 | VkBufferMemoryBarrier* barrier) const { |
| 2144 | if (!this->currentCommandBuffer()) { |
| 2145 | return; |
| 2146 | } |
| 2147 | // We don't pass in a resource here to the command buffer. The command buffer only is using it |
| 2148 | // to hold a ref, but every place where we add a buffer memory barrier we are doing some other |
| 2149 | // command with the buffer on the command buffer. Thus those other commands will already cause |
| 2150 | // the command buffer to be holding a ref to the buffer. |
| 2151 | this->currentCommandBuffer()->pipelineBarrier(gpu: this, |
| 2152 | /*resource=*/nullptr, |
| 2153 | srcStageMask, |
| 2154 | dstStageMask, |
| 2155 | byRegion, |
| 2156 | barrierType: GrVkCommandBuffer::kBufferMemory_BarrierType, |
| 2157 | barrier); |
| 2158 | } |
| 2159 | |
| 2160 | void GrVkGpu::addImageMemoryBarrier(const GrManagedResource* resource, |
| 2161 | VkPipelineStageFlags srcStageMask, |
| 2162 | VkPipelineStageFlags dstStageMask, |
| 2163 | bool byRegion, |
| 2164 | VkImageMemoryBarrier* barrier) const { |
| 2165 | // If we are in the middle of destroying or abandoning the context we may hit a release proc |
| 2166 | // that triggers the destruction of a GrVkImage. This could cause us to try and transfer the |
| 2167 | // VkImage back to the original queue. In this state we don't submit anymore work and we may not |
| 2168 | // have a current command buffer. Thus we won't do the queue transfer. |
| 2169 | if (!this->currentCommandBuffer()) { |
| 2170 | return; |
| 2171 | } |
| 2172 | SkASSERT(resource); |
| 2173 | this->currentCommandBuffer()->pipelineBarrier(gpu: this, |
| 2174 | resource, |
| 2175 | srcStageMask, |
| 2176 | dstStageMask, |
| 2177 | byRegion, |
| 2178 | barrierType: GrVkCommandBuffer::kImageMemory_BarrierType, |
| 2179 | barrier); |
| 2180 | } |
| 2181 | |
| 2182 | void GrVkGpu::prepareSurfacesForBackendAccessAndStateUpdates( |
| 2183 | SkSpan<GrSurfaceProxy*> proxies, |
| 2184 | SkSurfaces::BackendSurfaceAccess access, |
| 2185 | const skgpu::MutableTextureState* newState) { |
| 2186 | // Submit the current command buffer to the Queue. Whether we inserted semaphores or not does |
| 2187 | // not effect what we do here. |
| 2188 | if (!proxies.empty() && (access == SkSurfaces::BackendSurfaceAccess::kPresent || newState)) { |
| 2189 | // We currently don't support passing in new surface state for multiple proxies here. The |
| 2190 | // only time we have multiple proxies is if we are flushing a yuv SkImage which won't have |
| 2191 | // state updates anyways. Additionally if we have a newState than we must not have any |
| 2192 | // BackendSurfaceAccess. |
| 2193 | SkASSERT(!newState || proxies.size() == 1); |
| 2194 | SkASSERT(!newState || access == SkSurfaces::BackendSurfaceAccess::kNoAccess); |
| 2195 | GrVkImage* image; |
| 2196 | for (GrSurfaceProxy* proxy : proxies) { |
| 2197 | SkASSERT(proxy->isInstantiated()); |
| 2198 | if (GrTexture* tex = proxy->peekTexture()) { |
| 2199 | image = static_cast<GrVkTexture*>(tex)->textureImage(); |
| 2200 | } else { |
| 2201 | GrRenderTarget* rt = proxy->peekRenderTarget(); |
| 2202 | SkASSERT(rt); |
| 2203 | GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt); |
| 2204 | image = vkRT->externalAttachment(); |
| 2205 | } |
| 2206 | if (newState) { |
| 2207 | const skgpu::VulkanMutableTextureState& newInfo = newState->fVkState; |
| 2208 | set_layout_and_queue_from_mutable_state(gpu: this, image, newState: newInfo); |
| 2209 | } else { |
| 2210 | SkASSERT(access == SkSurfaces::BackendSurfaceAccess::kPresent); |
| 2211 | image->prepareForPresent(gpu: this); |
| 2212 | } |
| 2213 | } |
| 2214 | } |
| 2215 | } |
| 2216 | |
| 2217 | void GrVkGpu::addFinishedProc(GrGpuFinishedProc finishedProc, |
| 2218 | GrGpuFinishedContext finishedContext) { |
| 2219 | SkASSERT(finishedProc); |
| 2220 | this->addFinishedCallback(finishedCallback: skgpu::RefCntedCallback::Make(proc: finishedProc, ctx: finishedContext)); |
| 2221 | } |
| 2222 | |
| 2223 | void GrVkGpu::addFinishedCallback(sk_sp<skgpu::RefCntedCallback> finishedCallback) { |
| 2224 | SkASSERT(finishedCallback); |
| 2225 | fResourceProvider.addFinishedProcToActiveCommandBuffers(finishedCallback: std::move(finishedCallback)); |
| 2226 | } |
| 2227 | |
| 2228 | void GrVkGpu::takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer) { |
| 2229 | this->currentCommandBuffer()->addGrBuffer(buffer: std::move(buffer)); |
| 2230 | } |
| 2231 | |
| 2232 | bool GrVkGpu::onSubmitToGpu(bool syncCpu) { |
| 2233 | if (syncCpu) { |
| 2234 | return this->submitCommandBuffer(sync: kForce_SyncQueue); |
| 2235 | } else { |
| 2236 | return this->submitCommandBuffer(sync: kSkip_SyncQueue); |
| 2237 | } |
| 2238 | } |
| 2239 | |
| 2240 | void GrVkGpu::finishOutstandingGpuWork() { |
| 2241 | VK_CALL(QueueWaitIdle(fQueue)); |
| 2242 | |
| 2243 | if (this->vkCaps().mustSyncCommandBuffersWithQueue()) { |
| 2244 | fResourceProvider.forceSyncAllCommandBuffers(); |
| 2245 | } |
| 2246 | } |
| 2247 | |
| 2248 | void GrVkGpu::onReportSubmitHistograms() { |
| 2249 | #if SK_HISTOGRAMS_ENABLED |
| 2250 | uint64_t allocatedMemory = 0, usedMemory = 0; |
| 2251 | std::tie(allocatedMemory, usedMemory) = fMemoryAllocator->totalAllocatedAndUsedMemory(); |
| 2252 | SkASSERT(usedMemory <= allocatedMemory); |
| 2253 | if (allocatedMemory > 0) { |
| 2254 | SK_HISTOGRAM_PERCENTAGE("VulkanMemoryAllocator.PercentUsed" , |
| 2255 | (usedMemory * 100) / allocatedMemory); |
| 2256 | } |
| 2257 | // allocatedMemory is in bytes and need to be reported it in kilobytes. SK_HISTOGRAM_MEMORY_KB |
| 2258 | // supports samples up to around 500MB which should support the amounts of memory we allocate. |
| 2259 | SK_HISTOGRAM_MEMORY_KB("VulkanMemoryAllocator.AmountAllocated" , allocatedMemory >> 10); |
| 2260 | #endif // SK_HISTOGRAMS_ENABLED |
| 2261 | } |
| 2262 | |
| 2263 | void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst, |
| 2264 | GrSurface* src, |
| 2265 | GrVkImage* dstImage, |
| 2266 | GrVkImage* srcImage, |
| 2267 | const SkIRect& srcRect, |
| 2268 | const SkIPoint& dstPoint) { |
| 2269 | if (!this->currentCommandBuffer()) { |
| 2270 | return; |
| 2271 | } |
| 2272 | |
| 2273 | #ifdef SK_DEBUG |
| 2274 | int dstSampleCnt = dstImage->numSamples(); |
| 2275 | int srcSampleCnt = srcImage->numSamples(); |
| 2276 | bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid(); |
| 2277 | bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid(); |
| 2278 | VkFormat dstFormat = dstImage->imageFormat(); |
| 2279 | VkFormat srcFormat; |
| 2280 | SkAssertResult(dst->backendFormat().asVkFormat(&srcFormat)); |
| 2281 | SkASSERT(this->vkCaps().canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr, |
| 2282 | srcFormat, srcSampleCnt, srcHasYcbcr)); |
| 2283 | #endif |
| 2284 | if (src->isProtected() && !dst->isProtected()) { |
| 2285 | SkDebugf(format: "Can't copy from protected memory to non-protected" ); |
| 2286 | return; |
| 2287 | } |
| 2288 | |
| 2289 | // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if |
| 2290 | // the cache is flushed since it is only being written to. |
| 2291 | dstImage->setImageLayout(gpu: this, |
| 2292 | newLayout: VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, |
| 2293 | dstAccessMask: VK_ACCESS_TRANSFER_WRITE_BIT, |
| 2294 | dstStageMask: VK_PIPELINE_STAGE_TRANSFER_BIT, |
| 2295 | byRegion: false); |
| 2296 | |
| 2297 | srcImage->setImageLayout(gpu: this, |
| 2298 | newLayout: VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, |
| 2299 | dstAccessMask: VK_ACCESS_TRANSFER_READ_BIT, |
| 2300 | dstStageMask: VK_PIPELINE_STAGE_TRANSFER_BIT, |
| 2301 | byRegion: false); |
| 2302 | |
| 2303 | VkImageCopy copyRegion; |
| 2304 | memset(s: ©Region, c: 0, n: sizeof(VkImageCopy)); |
| 2305 | copyRegion.srcSubresource = { .aspectMask: VK_IMAGE_ASPECT_COLOR_BIT, .mipLevel: 0, .baseArrayLayer: 0, .layerCount: 1 }; |
| 2306 | copyRegion.srcOffset = { .x: srcRect.fLeft, .y: srcRect.fTop, .z: 0 }; |
| 2307 | copyRegion.dstSubresource = { .aspectMask: VK_IMAGE_ASPECT_COLOR_BIT, .mipLevel: 0, .baseArrayLayer: 0, .layerCount: 1 }; |
| 2308 | copyRegion.dstOffset = { .x: dstPoint.fX, .y: dstPoint.fY, .z: 0 }; |
| 2309 | copyRegion.extent = { .width: (uint32_t)srcRect.width(), .height: (uint32_t)srcRect.height(), .depth: 1 }; |
| 2310 | |
| 2311 | this->currentCommandBuffer()->addGrSurface(surface: sk_ref_sp<const GrSurface>(obj: src)); |
| 2312 | this->currentCommandBuffer()->addGrSurface(surface: sk_ref_sp<const GrSurface>(obj: dst)); |
| 2313 | this->currentCommandBuffer()->copyImage(gpu: this, |
| 2314 | srcImage, |
| 2315 | srcLayout: VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, |
| 2316 | dstImage, |
| 2317 | dstLayout: VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, |
| 2318 | copyRegionCount: 1, |
| 2319 | copyRegions: ©Region); |
| 2320 | |
| 2321 | SkIRect dstRect = SkIRect::MakeXYWH(x: dstPoint.fX, y: dstPoint.fY, |
| 2322 | w: srcRect.width(), h: srcRect.height()); |
| 2323 | // The rect is already in device space so we pass in kTopLeft so no flip is done. |
| 2324 | this->didWriteToSurface(surface: dst, origin: kTopLeft_GrSurfaceOrigin, bounds: &dstRect); |
| 2325 | } |
| 2326 | |
| 2327 | void GrVkGpu::copySurfaceAsBlit(GrSurface* dst, |
| 2328 | GrSurface* src, |
| 2329 | GrVkImage* dstImage, |
| 2330 | GrVkImage* srcImage, |
| 2331 | const SkIRect& srcRect, |
| 2332 | const SkIRect& dstRect, |
| 2333 | GrSamplerState::Filter filter) { |
| 2334 | if (!this->currentCommandBuffer()) { |
| 2335 | return; |
| 2336 | } |
| 2337 | |
| 2338 | #ifdef SK_DEBUG |
| 2339 | int dstSampleCnt = dstImage->numSamples(); |
| 2340 | int srcSampleCnt = srcImage->numSamples(); |
| 2341 | bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid(); |
| 2342 | bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid(); |
| 2343 | VkFormat dstFormat = dstImage->imageFormat(); |
| 2344 | VkFormat srcFormat; |
| 2345 | SkAssertResult(dst->backendFormat().asVkFormat(&srcFormat)); |
| 2346 | SkASSERT(this->vkCaps().canCopyAsBlit(dstFormat, |
| 2347 | dstSampleCnt, |
| 2348 | dstImage->isLinearTiled(), |
| 2349 | dstHasYcbcr, |
| 2350 | srcFormat, |
| 2351 | srcSampleCnt, |
| 2352 | srcImage->isLinearTiled(), |
| 2353 | srcHasYcbcr)); |
| 2354 | |
| 2355 | #endif |
| 2356 | if (src->isProtected() && !dst->isProtected()) { |
| 2357 | SkDebugf(format: "Can't copy from protected memory to non-protected" ); |
| 2358 | return; |
| 2359 | } |
| 2360 | |
| 2361 | dstImage->setImageLayout(gpu: this, |
| 2362 | newLayout: VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, |
| 2363 | dstAccessMask: VK_ACCESS_TRANSFER_WRITE_BIT, |
| 2364 | dstStageMask: VK_PIPELINE_STAGE_TRANSFER_BIT, |
| 2365 | byRegion: false); |
| 2366 | |
| 2367 | srcImage->setImageLayout(gpu: this, |
| 2368 | newLayout: VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, |
| 2369 | dstAccessMask: VK_ACCESS_TRANSFER_READ_BIT, |
| 2370 | dstStageMask: VK_PIPELINE_STAGE_TRANSFER_BIT, |
| 2371 | byRegion: false); |
| 2372 | |
| 2373 | VkImageBlit blitRegion; |
| 2374 | memset(s: &blitRegion, c: 0, n: sizeof(VkImageBlit)); |
| 2375 | blitRegion.srcSubresource = { .aspectMask: VK_IMAGE_ASPECT_COLOR_BIT, .mipLevel: 0, .baseArrayLayer: 0, .layerCount: 1 }; |
| 2376 | blitRegion.srcOffsets[0] = { .x: srcRect.fLeft, .y: srcRect.fTop, .z: 0 }; |
| 2377 | blitRegion.srcOffsets[1] = { .x: srcRect.fRight, .y: srcRect.fBottom, .z: 1 }; |
| 2378 | blitRegion.dstSubresource = { .aspectMask: VK_IMAGE_ASPECT_COLOR_BIT, .mipLevel: 0, .baseArrayLayer: 0, .layerCount: 1 }; |
| 2379 | blitRegion.dstOffsets[0] = { .x: dstRect.fLeft, .y: dstRect.fTop, .z: 0 }; |
| 2380 | blitRegion.dstOffsets[1] = { .x: dstRect.fRight, .y: dstRect.fBottom, .z: 1 }; |
| 2381 | |
| 2382 | this->currentCommandBuffer()->addGrSurface(surface: sk_ref_sp<const GrSurface>(obj: src)); |
| 2383 | this->currentCommandBuffer()->addGrSurface(surface: sk_ref_sp<const GrSurface>(obj: dst)); |
| 2384 | this->currentCommandBuffer()->blitImage(gpu: this, |
| 2385 | srcImage: *srcImage, |
| 2386 | dstImage: *dstImage, |
| 2387 | blitRegionCount: 1, |
| 2388 | blitRegions: &blitRegion, |
| 2389 | filter: filter == GrSamplerState::Filter::kNearest ? |
| 2390 | VK_FILTER_NEAREST : VK_FILTER_LINEAR); |
| 2391 | |
| 2392 | // The rect is already in device space so we pass in kTopLeft so no flip is done. |
| 2393 | this->didWriteToSurface(surface: dst, origin: kTopLeft_GrSurfaceOrigin, bounds: &dstRect); |
| 2394 | } |
| 2395 | |
| 2396 | void GrVkGpu::copySurfaceAsResolve(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, |
| 2397 | const SkIPoint& dstPoint) { |
| 2398 | if (src->isProtected() && !dst->isProtected()) { |
| 2399 | SkDebugf(format: "Can't copy from protected memory to non-protected" ); |
| 2400 | return; |
| 2401 | } |
| 2402 | GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget()); |
| 2403 | this->resolveImage(dst, src: srcRT, srcRect, dstPoint); |
| 2404 | SkIRect dstRect = SkIRect::MakeXYWH(x: dstPoint.fX, y: dstPoint.fY, |
| 2405 | w: srcRect.width(), h: srcRect.height()); |
| 2406 | // The rect is already in device space so we pass in kTopLeft so no flip is done. |
| 2407 | this->didWriteToSurface(surface: dst, origin: kTopLeft_GrSurfaceOrigin, bounds: &dstRect); |
| 2408 | } |
| 2409 | |
| 2410 | bool GrVkGpu::onCopySurface(GrSurface* dst, const SkIRect& dstRect, |
| 2411 | GrSurface* src, const SkIRect& srcRect, |
| 2412 | GrSamplerState::Filter filter) { |
| 2413 | #ifdef SK_DEBUG |
| 2414 | if (GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget())) { |
| 2415 | SkASSERT(!srcRT->wrapsSecondaryCommandBuffer()); |
| 2416 | } |
| 2417 | if (GrVkRenderTarget* dstRT = static_cast<GrVkRenderTarget*>(dst->asRenderTarget())) { |
| 2418 | SkASSERT(!dstRT->wrapsSecondaryCommandBuffer()); |
| 2419 | } |
| 2420 | #endif |
| 2421 | if (src->isProtected() && !dst->isProtected()) { |
| 2422 | SkDebugf(format: "Can't copy from protected memory to non-protected" ); |
| 2423 | return false; |
| 2424 | } |
| 2425 | |
| 2426 | GrVkImage* dstImage; |
| 2427 | GrVkImage* srcImage; |
| 2428 | GrRenderTarget* dstRT = dst->asRenderTarget(); |
| 2429 | if (dstRT) { |
| 2430 | GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(dstRT); |
| 2431 | if (vkRT->wrapsSecondaryCommandBuffer()) { |
| 2432 | return false; |
| 2433 | } |
| 2434 | // This will technically return true for single sample rts that used DMSAA in which case we |
| 2435 | // don't have to pick the resolve attachment. But in that case the resolve and color |
| 2436 | // attachments will be the same anyways. |
| 2437 | if (this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) { |
| 2438 | dstImage = vkRT->resolveAttachment(); |
| 2439 | } else { |
| 2440 | dstImage = vkRT->colorAttachment(); |
| 2441 | } |
| 2442 | } else if (dst->asTexture()) { |
| 2443 | dstImage = static_cast<GrVkTexture*>(dst->asTexture())->textureImage(); |
| 2444 | } else { |
| 2445 | // The surface in a GrAttachment already |
| 2446 | dstImage = static_cast<GrVkImage*>(dst); |
| 2447 | } |
| 2448 | GrRenderTarget* srcRT = src->asRenderTarget(); |
| 2449 | if (srcRT) { |
| 2450 | GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(srcRT); |
| 2451 | // This will technically return true for single sample rts that used DMSAA in which case we |
| 2452 | // don't have to pick the resolve attachment. But in that case the resolve and color |
| 2453 | // attachments will be the same anyways. |
| 2454 | if (this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) { |
| 2455 | srcImage = vkRT->resolveAttachment(); |
| 2456 | } else { |
| 2457 | srcImage = vkRT->colorAttachment(); |
| 2458 | } |
| 2459 | } else if (src->asTexture()) { |
| 2460 | SkASSERT(src->asTexture()); |
| 2461 | srcImage = static_cast<GrVkTexture*>(src->asTexture())->textureImage(); |
| 2462 | } else { |
| 2463 | // The surface in a GrAttachment already |
| 2464 | srcImage = static_cast<GrVkImage*>(src); |
| 2465 | } |
| 2466 | |
| 2467 | VkFormat dstFormat = dstImage->imageFormat(); |
| 2468 | VkFormat srcFormat = srcImage->imageFormat(); |
| 2469 | |
| 2470 | int dstSampleCnt = dstImage->numSamples(); |
| 2471 | int srcSampleCnt = srcImage->numSamples(); |
| 2472 | |
| 2473 | bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid(); |
| 2474 | bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid(); |
| 2475 | |
| 2476 | if (srcRect.size() == dstRect.size()) { |
| 2477 | // Prefer resolves or copy-image commands when there is no scaling |
| 2478 | const SkIPoint dstPoint = dstRect.topLeft(); |
| 2479 | if (this->vkCaps().canCopyAsResolve(dstConfig: dstFormat, dstSampleCnt, dstHasYcbcr, |
| 2480 | srcConfig: srcFormat, srcSamplecnt: srcSampleCnt, srcHasYcbcr)) { |
| 2481 | this->copySurfaceAsResolve(dst, src, srcRect, dstPoint); |
| 2482 | return true; |
| 2483 | } |
| 2484 | |
| 2485 | if (this->vkCaps().canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr, |
| 2486 | srcFormat, srcSamplecnt: srcSampleCnt, srcHasYcbcr)) { |
| 2487 | this->copySurfaceAsCopyImage(dst, src, dstImage, srcImage, srcRect, dstPoint); |
| 2488 | return true; |
| 2489 | } |
| 2490 | } |
| 2491 | |
| 2492 | if (this->vkCaps().canCopyAsBlit(dstConfig: dstFormat, |
| 2493 | dstSampleCnt, |
| 2494 | dstIsLinear: dstImage->isLinearTiled(), |
| 2495 | dstHasYcbcr, |
| 2496 | srcConfig: srcFormat, |
| 2497 | srcSampleCnt, |
| 2498 | srcIsLinear: srcImage->isLinearTiled(), |
| 2499 | srcHasYcbcr)) { |
| 2500 | this->copySurfaceAsBlit(dst, src, dstImage, srcImage, srcRect, dstRect, filter); |
| 2501 | return true; |
| 2502 | } |
| 2503 | |
| 2504 | return false; |
| 2505 | } |
| 2506 | |
| 2507 | bool GrVkGpu::onReadPixels(GrSurface* surface, |
| 2508 | SkIRect rect, |
| 2509 | GrColorType surfaceColorType, |
| 2510 | GrColorType dstColorType, |
| 2511 | void* buffer, |
| 2512 | size_t rowBytes) { |
| 2513 | if (surface->isProtected()) { |
| 2514 | return false; |
| 2515 | } |
| 2516 | |
| 2517 | if (!this->currentCommandBuffer()) { |
| 2518 | return false; |
| 2519 | } |
| 2520 | |
| 2521 | GrVkImage* image = nullptr; |
| 2522 | GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget()); |
| 2523 | if (rt) { |
| 2524 | // Reading from render targets that wrap a secondary command buffer is not allowed since |
| 2525 | // it would require us to know the VkImage, which we don't have, as well as need us to |
| 2526 | // stop and start the VkRenderPass which we don't have access to. |
| 2527 | if (rt->wrapsSecondaryCommandBuffer()) { |
| 2528 | return false; |
| 2529 | } |
| 2530 | image = rt->nonMSAAAttachment(); |
| 2531 | } else { |
| 2532 | image = static_cast<GrVkTexture*>(surface->asTexture())->textureImage(); |
| 2533 | } |
| 2534 | |
| 2535 | if (!image) { |
| 2536 | return false; |
| 2537 | } |
| 2538 | |
| 2539 | if (dstColorType == GrColorType::kUnknown || |
| 2540 | dstColorType != this->vkCaps().transferColorType(image->imageFormat(), surfaceColorType)) { |
| 2541 | return false; |
| 2542 | } |
| 2543 | |
| 2544 | // Change layout of our target so it can be used as copy |
| 2545 | image->setImageLayout(gpu: this, |
| 2546 | newLayout: VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, |
| 2547 | dstAccessMask: VK_ACCESS_TRANSFER_READ_BIT, |
| 2548 | dstStageMask: VK_PIPELINE_STAGE_TRANSFER_BIT, |
| 2549 | byRegion: false); |
| 2550 | |
| 2551 | size_t bpp = GrColorTypeBytesPerPixel(ct: dstColorType); |
| 2552 | if (skgpu::VkFormatBytesPerBlock(vkFormat: image->imageFormat()) != bpp) { |
| 2553 | return false; |
| 2554 | } |
| 2555 | size_t tightRowBytes = bpp*rect.width(); |
| 2556 | |
| 2557 | VkBufferImageCopy region; |
| 2558 | memset(s: ®ion, c: 0, n: sizeof(VkBufferImageCopy)); |
| 2559 | VkOffset3D offset = { .x: rect.left(), .y: rect.top(), .z: 0 }; |
| 2560 | region.imageOffset = offset; |
| 2561 | region.imageExtent = { .width: (uint32_t)rect.width(), .height: (uint32_t)rect.height(), .depth: 1 }; |
| 2562 | |
| 2563 | size_t transBufferRowBytes = bpp * region.imageExtent.width; |
| 2564 | size_t imageRows = region.imageExtent.height; |
| 2565 | GrResourceProvider* resourceProvider = this->getContext()->priv().resourceProvider(); |
| 2566 | sk_sp<GrGpuBuffer> transferBuffer = resourceProvider->createBuffer( |
| 2567 | size: transBufferRowBytes * imageRows, |
| 2568 | GrGpuBufferType::kXferGpuToCpu, |
| 2569 | kDynamic_GrAccessPattern, |
| 2570 | GrResourceProvider::ZeroInit::kNo); |
| 2571 | |
| 2572 | if (!transferBuffer) { |
| 2573 | return false; |
| 2574 | } |
| 2575 | |
| 2576 | GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get()); |
| 2577 | |
| 2578 | // Copy the image to a buffer so we can map it to cpu memory |
| 2579 | region.bufferOffset = 0; |
| 2580 | region.bufferRowLength = 0; // Forces RowLength to be width. We handle the rowBytes below. |
| 2581 | region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images. |
| 2582 | region.imageSubresource = { .aspectMask: VK_IMAGE_ASPECT_COLOR_BIT, .mipLevel: 0, .baseArrayLayer: 0, .layerCount: 1 }; |
| 2583 | |
| 2584 | this->currentCommandBuffer()->copyImageToBuffer(gpu: this, |
| 2585 | srcImage: image, |
| 2586 | srcLayout: VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, |
| 2587 | dstBuffer: transferBuffer, |
| 2588 | copyRegionCount: 1, |
| 2589 | copyRegions: ®ion); |
| 2590 | |
| 2591 | // make sure the copy to buffer has finished |
| 2592 | vkBuffer->addMemoryBarrier(srcAccessMask: VK_ACCESS_TRANSFER_WRITE_BIT, |
| 2593 | dstAccesMask: VK_ACCESS_HOST_READ_BIT, |
| 2594 | srcStageMask: VK_PIPELINE_STAGE_TRANSFER_BIT, |
| 2595 | dstStageMask: VK_PIPELINE_STAGE_HOST_BIT, |
| 2596 | byRegion: false); |
| 2597 | |
| 2598 | // We need to submit the current command buffer to the Queue and make sure it finishes before |
| 2599 | // we can copy the data out of the buffer. |
| 2600 | if (!this->submitCommandBuffer(sync: kForce_SyncQueue)) { |
| 2601 | return false; |
| 2602 | } |
| 2603 | void* mappedMemory = transferBuffer->map(); |
| 2604 | if (!mappedMemory) { |
| 2605 | return false; |
| 2606 | } |
| 2607 | |
| 2608 | SkRectMemcpy(dst: buffer, dstRB: rowBytes, src: mappedMemory, srcRB: transBufferRowBytes, trimRowBytes: tightRowBytes, rowCount: rect.height()); |
| 2609 | |
| 2610 | transferBuffer->unmap(); |
| 2611 | return true; |
| 2612 | } |
| 2613 | |
| 2614 | bool GrVkGpu::beginRenderPass(const GrVkRenderPass* renderPass, |
| 2615 | sk_sp<const GrVkFramebuffer> framebuffer, |
| 2616 | const VkClearValue* colorClear, |
| 2617 | const GrSurface* target, |
| 2618 | const SkIRect& renderPassBounds, |
| 2619 | bool forSecondaryCB) { |
| 2620 | if (!this->currentCommandBuffer()) { |
| 2621 | return false; |
| 2622 | } |
| 2623 | SkASSERT (!framebuffer->isExternal()); |
| 2624 | |
| 2625 | #ifdef SK_DEBUG |
| 2626 | uint32_t index; |
| 2627 | bool result = renderPass->colorAttachmentIndex(index: &index); |
| 2628 | SkASSERT(result && 0 == index); |
| 2629 | result = renderPass->stencilAttachmentIndex(index: &index); |
| 2630 | if (result) { |
| 2631 | SkASSERT(1 == index); |
| 2632 | } |
| 2633 | #endif |
| 2634 | VkClearValue clears[3]; |
| 2635 | int stencilIndex = renderPass->hasResolveAttachment() ? 2 : 1; |
| 2636 | clears[0].color = colorClear->color; |
| 2637 | clears[stencilIndex].depthStencil.depth = 0.0f; |
| 2638 | clears[stencilIndex].depthStencil.stencil = 0; |
| 2639 | |
| 2640 | return this->currentCommandBuffer()->beginRenderPass( |
| 2641 | gpu: this, renderPass, std::move(framebuffer), clearValues: clears, target, bounds: renderPassBounds, forSecondaryCB); |
| 2642 | } |
| 2643 | |
| 2644 | void GrVkGpu::endRenderPass(GrRenderTarget* target, GrSurfaceOrigin origin, |
| 2645 | const SkIRect& bounds) { |
| 2646 | // We had a command buffer when we started the render pass, we should have one now as well. |
| 2647 | SkASSERT(this->currentCommandBuffer()); |
| 2648 | this->currentCommandBuffer()->endRenderPass(gpu: this); |
| 2649 | this->didWriteToSurface(surface: target, origin, bounds: &bounds); |
| 2650 | } |
| 2651 | |
| 2652 | bool GrVkGpu::checkVkResult(VkResult result) { |
| 2653 | switch (result) { |
| 2654 | case VK_SUCCESS: |
| 2655 | return true; |
| 2656 | case VK_ERROR_DEVICE_LOST: |
| 2657 | fDeviceIsLost = true; |
| 2658 | return false; |
| 2659 | case VK_ERROR_OUT_OF_DEVICE_MEMORY: |
| 2660 | case VK_ERROR_OUT_OF_HOST_MEMORY: |
| 2661 | this->setOOMed(); |
| 2662 | return false; |
| 2663 | default: |
| 2664 | return false; |
| 2665 | } |
| 2666 | } |
| 2667 | |
| 2668 | void GrVkGpu::submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer> buffer) { |
| 2669 | if (!this->currentCommandBuffer()) { |
| 2670 | return; |
| 2671 | } |
| 2672 | this->currentCommandBuffer()->executeCommands(gpu: this, secondaryBuffer: std::move(buffer)); |
| 2673 | } |
| 2674 | |
| 2675 | void GrVkGpu::submit(GrOpsRenderPass* renderPass) { |
| 2676 | SkASSERT(fCachedOpsRenderPass.get() == renderPass); |
| 2677 | |
| 2678 | fCachedOpsRenderPass->submit(); |
| 2679 | fCachedOpsRenderPass->reset(); |
| 2680 | } |
| 2681 | |
| 2682 | [[nodiscard]] GrFence GrVkGpu::insertFence() { |
| 2683 | VkFenceCreateInfo createInfo; |
| 2684 | memset(s: &createInfo, c: 0, n: sizeof(VkFenceCreateInfo)); |
| 2685 | createInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; |
| 2686 | createInfo.pNext = nullptr; |
| 2687 | createInfo.flags = 0; |
| 2688 | VkFence fence = VK_NULL_HANDLE; |
| 2689 | VkResult result; |
| 2690 | |
| 2691 | VK_CALL_RET(result, CreateFence(this->device(), &createInfo, nullptr, &fence)); |
| 2692 | if (result != VK_SUCCESS) { |
| 2693 | return 0; |
| 2694 | } |
| 2695 | VK_CALL_RET(result, QueueSubmit(this->queue(), 0, nullptr, fence)); |
| 2696 | if (result != VK_SUCCESS) { |
| 2697 | VK_CALL(DestroyFence(this->device(), fence, nullptr)); |
| 2698 | return 0; |
| 2699 | } |
| 2700 | |
| 2701 | static_assert(sizeof(GrFence) >= sizeof(VkFence)); |
| 2702 | return (GrFence)fence; |
| 2703 | } |
| 2704 | |
| 2705 | bool GrVkGpu::waitFence(GrFence fence) { |
| 2706 | SkASSERT(VK_NULL_HANDLE != (VkFence)fence); |
| 2707 | |
| 2708 | VkResult result; |
| 2709 | VK_CALL_RET(result, WaitForFences(this->device(), 1, (VkFence*)&fence, VK_TRUE, 0)); |
| 2710 | return (VK_SUCCESS == result); |
| 2711 | } |
| 2712 | |
| 2713 | void GrVkGpu::deleteFence(GrFence fence) { |
| 2714 | VK_CALL(DestroyFence(this->device(), (VkFence)fence, nullptr)); |
| 2715 | } |
| 2716 | |
| 2717 | [[nodiscard]] std::unique_ptr<GrSemaphore> GrVkGpu::makeSemaphore(bool isOwned) { |
| 2718 | return GrVkSemaphore::Make(gpu: this, isOwned); |
| 2719 | } |
| 2720 | |
| 2721 | std::unique_ptr<GrSemaphore> GrVkGpu::wrapBackendSemaphore(const GrBackendSemaphore& semaphore, |
| 2722 | GrSemaphoreWrapType wrapType, |
| 2723 | GrWrapOwnership ownership) { |
| 2724 | return GrVkSemaphore::MakeWrapped(this, semaphore.vkSemaphore(), wrapType, ownership); |
| 2725 | } |
| 2726 | |
| 2727 | void GrVkGpu::insertSemaphore(GrSemaphore* semaphore) { |
| 2728 | SkASSERT(semaphore); |
| 2729 | |
| 2730 | GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore); |
| 2731 | |
| 2732 | GrVkSemaphore::Resource* resource = vkSem->getResource(); |
| 2733 | if (resource->shouldSignal()) { |
| 2734 | resource->ref(); |
| 2735 | fSemaphoresToSignal.push_back(t: resource); |
| 2736 | } |
| 2737 | } |
| 2738 | |
| 2739 | void GrVkGpu::waitSemaphore(GrSemaphore* semaphore) { |
| 2740 | SkASSERT(semaphore); |
| 2741 | |
| 2742 | GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore); |
| 2743 | |
| 2744 | GrVkSemaphore::Resource* resource = vkSem->getResource(); |
| 2745 | if (resource->shouldWait()) { |
| 2746 | resource->ref(); |
| 2747 | fSemaphoresToWaitOn.push_back(t: resource); |
| 2748 | } |
| 2749 | } |
| 2750 | |
| 2751 | std::unique_ptr<GrSemaphore> GrVkGpu::prepareTextureForCrossContextUsage(GrTexture* texture) { |
| 2752 | SkASSERT(texture); |
| 2753 | GrVkImage* vkTexture = static_cast<GrVkTexture*>(texture)->textureImage(); |
| 2754 | vkTexture->setImageLayout(gpu: this, |
| 2755 | newLayout: VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, |
| 2756 | dstAccessMask: VK_ACCESS_SHADER_READ_BIT, |
| 2757 | dstStageMask: VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, |
| 2758 | byRegion: false); |
| 2759 | // TODO: should we have a way to notify the caller that this has failed? Currently if the submit |
| 2760 | // fails (caused by DEVICE_LOST) this will just cause us to fail the next use of the gpu. |
| 2761 | // Eventually we will abandon the whole GPU if this fails. |
| 2762 | this->submitToGpu(syncCpu: false); |
| 2763 | |
| 2764 | // The image layout change serves as a barrier, so no semaphore is needed. |
| 2765 | // If we ever decide we need to return a semaphore here, we need to make sure GrVkSemaphore is |
| 2766 | // thread safe so that only the first thread that tries to use the semaphore actually submits |
| 2767 | // it. This additionally would also require thread safety in command buffer submissions to |
| 2768 | // queues in general. |
| 2769 | return nullptr; |
| 2770 | } |
| 2771 | |
| 2772 | void GrVkGpu::addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable) { |
| 2773 | fDrawables.emplace_back(args: std::move(drawable)); |
| 2774 | } |
| 2775 | |
| 2776 | void GrVkGpu::storeVkPipelineCacheData() { |
| 2777 | if (this->getContext()->priv().getPersistentCache()) { |
| 2778 | this->resourceProvider().storePipelineCacheData(); |
| 2779 | } |
| 2780 | } |
| 2781 | |