#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![doc(html_logo_url = "https://raw.githubusercontent.com/gfx-rs/wgpu/master/logo.png")]
#![warn(missing_docs, unsafe_op_in_unsafe_fn)]
mod backend;
mod context;
pub mod util;
#[macro_use]
mod macros;
use std::{
any::Any,
borrow::Cow,
error,
fmt::{Debug, Display},
future::Future,
marker::PhantomData,
num::NonZeroU32,
ops::{Bound, Deref, DerefMut, Range, RangeBounds},
sync::Arc,
thread,
};
use context::{Context, DeviceRequest, DynContext, ObjectId};
use parking_lot::Mutex;
pub use wgt::{
AdapterInfo, AddressMode, AstcBlock, AstcChannel, Backend, Backends, BindGroupLayoutEntry,
BindingType, BlendComponent, BlendFactor, BlendOperation, BlendState, BufferAddress,
BufferBindingType, BufferSize, BufferUsages, Color, ColorTargetState, ColorWrites,
CommandBufferDescriptor, CompareFunction, CompositeAlphaMode, DepthBiasState,
DepthStencilState, DeviceType, DownlevelCapabilities, DownlevelFlags, Dx12Compiler,
DynamicOffset, Extent3d, Face, Features, FilterMode, FrontFace, ImageDataLayout,
ImageSubresourceRange, IndexFormat, InstanceDescriptor, Limits, MultisampleState, Origin2d,
Origin3d, PipelineStatisticsTypes, PolygonMode, PowerPreference, PredefinedColorSpace,
PresentMode, PresentationTimestamp, PrimitiveState, PrimitiveTopology, PushConstantRange,
QueryType, RenderBundleDepthStencil, SamplerBindingType, SamplerBorderColor, ShaderLocation,
ShaderModel, ShaderStages, StencilFaceState, StencilOperation, StencilState,
StorageTextureAccess, SurfaceCapabilities, SurfaceStatus, TextureAspect, TextureDimension,
TextureFormat, TextureFormatFeatureFlags, TextureFormatFeatures, TextureSampleType,
TextureUsages, TextureViewDimension, VertexAttribute, VertexFormat, VertexStepMode,
COPY_BUFFER_ALIGNMENT, COPY_BYTES_PER_ROW_ALIGNMENT, MAP_ALIGNMENT, PUSH_CONSTANT_ALIGNMENT,
QUERY_RESOLVE_BUFFER_ALIGNMENT, QUERY_SET_MAX_QUERIES, QUERY_SIZE, VERTEX_STRIDE_ALIGNMENT,
};
#[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
pub use wgt::{ExternalImageSource, ImageCopyExternalImage};
#[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
static_assertions::assert_impl_all!(ExternalImageSource: Send, Sync);
#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd)]
pub enum ErrorFilter {
OutOfMemory,
Validation,
}
static_assertions::assert_impl_all!(ErrorFilter: Send, Sync);
type C = dyn DynContext;
type Data = dyn Any + Send + Sync;
#[derive(Debug)]
pub struct Instance {
context: Arc<C>,
}
static_assertions::assert_impl_all!(Instance: Send, Sync);
#[derive(Debug)]
pub struct Adapter {
context: Arc<C>,
id: ObjectId,
data: Box<Data>,
}
static_assertions::assert_impl_all!(Adapter: Send, Sync);
impl Drop for Adapter {
fn drop(&mut self) {
if !thread::panicking() {
self.context.adapter_drop(&self.id, self.data.as_ref())
}
}
}
#[derive(Debug)]
pub struct Device {
context: Arc<C>,
id: ObjectId,
data: Box<Data>,
}
static_assertions::assert_impl_all!(Device: Send, Sync);
#[derive(Debug, Clone)]
pub struct SubmissionIndex(ObjectId, Arc<crate::Data>);
static_assertions::assert_impl_all!(SubmissionIndex: Send, Sync);
#[derive(Debug)]
struct MapContext {
total_size: BufferAddress,
initial_range: Range<BufferAddress>,
sub_ranges: Vec<Range<BufferAddress>>,
}
impl MapContext {
fn new(total_size: BufferAddress) -> Self {
Self {
total_size,
initial_range: 0..0,
sub_ranges: Vec::new(),
}
}
fn reset(&mut self) {
self.initial_range = 0..0;
assert!(
self.sub_ranges.is_empty(),
"You cannot unmap a buffer that still has accessible mapped views"
);
}
fn add(&mut self, offset: BufferAddress, size: Option<BufferSize>) -> BufferAddress {
let end = match size {
Some(s) => offset + s.get(),
None => self.initial_range.end,
};
assert!(self.initial_range.start <= offset && end <= self.initial_range.end);
for sub in self.sub_ranges.iter() {
assert!(
end <= sub.start || offset >= sub.end,
"Intersecting map range with {sub:?}"
);
}
self.sub_ranges.push(offset..end);
end
}
fn remove(&mut self, offset: BufferAddress, size: Option<BufferSize>) {
let end = match size {
Some(s) => offset + s.get(),
None => self.initial_range.end,
};
let index = self
.sub_ranges
.iter()
.position(|r| *r == (offset..end))
.expect("unable to remove range from map context");
self.sub_ranges.swap_remove(index);
}
}
#[derive(Debug)]
pub struct Buffer {
context: Arc<C>,
id: ObjectId,
data: Box<Data>,
map_context: Mutex<MapContext>,
size: wgt::BufferAddress,
usage: BufferUsages,
}
static_assertions::assert_impl_all!(Buffer: Send, Sync);
#[derive(Copy, Clone, Debug)]
pub struct BufferSlice<'a> {
buffer: &'a Buffer,
offset: BufferAddress,
size: Option<BufferSize>,
}
static_assertions::assert_impl_all!(BufferSlice: Send, Sync);
#[derive(Debug)]
pub struct Texture {
context: Arc<C>,
id: ObjectId,
data: Box<Data>,
owned: bool,
descriptor: TextureDescriptor<'static>,
}
static_assertions::assert_impl_all!(Texture: Send, Sync);
#[derive(Debug)]
pub struct TextureView {
context: Arc<C>,
id: ObjectId,
data: Box<Data>,
}
static_assertions::assert_impl_all!(TextureView: Send, Sync);
#[derive(Debug)]
pub struct Sampler {
context: Arc<C>,
id: ObjectId,
data: Box<Data>,
}
static_assertions::assert_impl_all!(Sampler: Send, Sync);
impl Drop for Sampler {
fn drop(&mut self) {
if !thread::panicking() {
self.context.sampler_drop(&self.id, self.data.as_ref());
}
}
}
pub type SurfaceConfiguration = wgt::SurfaceConfiguration<Vec<TextureFormat>>;
static_assertions::assert_impl_all!(SurfaceConfiguration: Send, Sync);
#[derive(Debug)]
pub struct Surface {
context: Arc<C>,
id: ObjectId,
data: Box<Data>,
config: Mutex<Option<SurfaceConfiguration>>,
}
static_assertions::assert_impl_all!(Surface: Send, Sync);
impl Drop for Surface {
fn drop(&mut self) {
if !thread::panicking() {
self.context.surface_drop(&self.id, self.data.as_ref())
}
}
}
#[derive(Debug)]
pub struct BindGroupLayout {
context: Arc<C>,
id: ObjectId,
data: Box<Data>,
}
static_assertions::assert_impl_all!(BindGroupLayout: Send, Sync);
impl Drop for BindGroupLayout {
fn drop(&mut self) {
if !thread::panicking() {
self.context
.bind_group_layout_drop(&self.id, self.data.as_ref());
}
}
}
#[derive(Debug)]
pub struct BindGroup {
context: Arc<C>,
id: ObjectId,
data: Box<Data>,
}
static_assertions::assert_impl_all!(BindGroup: Send, Sync);
impl Drop for BindGroup {
fn drop(&mut self) {
if !thread::panicking() {
self.context.bind_group_drop(&self.id, self.data.as_ref());
}
}
}
#[derive(Debug)]
pub struct ShaderModule {
context: Arc<C>,
id: ObjectId,
data: Box<Data>,
}
static_assertions::assert_impl_all!(ShaderModule: Send, Sync);
impl Drop for ShaderModule {
fn drop(&mut self) {
if !thread::panicking() {
self.context
.shader_module_drop(&self.id, self.data.as_ref());
}
}
}
#[cfg_attr(feature = "naga", allow(clippy::large_enum_variant))]
#[derive(Clone)]
#[non_exhaustive]
pub enum ShaderSource<'a> {
#[cfg(feature = "spirv")]
SpirV(Cow<'a, [u32]>),
#[cfg(feature = "glsl")]
Glsl {
shader: Cow<'a, str>,
stage: naga::ShaderStage,
defines: naga::FastHashMap<String, String>,
},
#[cfg(feature = "wgsl")]
Wgsl(Cow<'a, str>),
#[cfg(feature = "naga")]
Naga(Cow<'static, naga::Module>),
#[doc(hidden)]
Dummy(PhantomData<&'a ()>),
}
static_assertions::assert_impl_all!(ShaderSource: Send, Sync);
#[derive(Clone)]
pub struct ShaderModuleDescriptor<'a> {
pub label: Label<'a>,
pub source: ShaderSource<'a>,
}
static_assertions::assert_impl_all!(ShaderModuleDescriptor: Send, Sync);
pub struct ShaderModuleDescriptorSpirV<'a> {
pub label: Label<'a>,
pub source: Cow<'a, [u32]>,
}
static_assertions::assert_impl_all!(ShaderModuleDescriptorSpirV: Send, Sync);
#[derive(Debug)]
pub struct PipelineLayout {
context: Arc<C>,
id: ObjectId,
data: Box<Data>,
}
static_assertions::assert_impl_all!(PipelineLayout: Send, Sync);
impl Drop for PipelineLayout {
fn drop(&mut self) {
if !thread::panicking() {
self.context
.pipeline_layout_drop(&self.id, self.data.as_ref());
}
}
}
#[derive(Debug)]
pub struct RenderPipeline {
context: Arc<C>,
id: ObjectId,
data: Box<Data>,
}
static_assertions::assert_impl_all!(RenderPipeline: Send, Sync);
impl Drop for RenderPipeline {
fn drop(&mut self) {
if !thread::panicking() {
self.context
.render_pipeline_drop(&self.id, self.data.as_ref());
}
}
}
impl RenderPipeline {
pub fn get_bind_group_layout(&self, index: u32) -> BindGroupLayout {
let context = Arc::clone(&self.context);
let (id, data) =
self.context
.render_pipeline_get_bind_group_layout(&self.id, self.data.as_ref(), index);
BindGroupLayout { context, id, data }
}
}
#[derive(Debug)]
pub struct ComputePipeline {
context: Arc<C>,
id: ObjectId,
data: Box<Data>,
}
static_assertions::assert_impl_all!(ComputePipeline: Send, Sync);
impl Drop for ComputePipeline {
fn drop(&mut self) {
if !thread::panicking() {
self.context
.compute_pipeline_drop(&self.id, self.data.as_ref());
}
}
}
impl ComputePipeline {
pub fn get_bind_group_layout(&self, index: u32) -> BindGroupLayout {
let context = Arc::clone(&self.context);
let (id, data) = self.context.compute_pipeline_get_bind_group_layout(
&self.id,
self.data.as_ref(),
index,
);
BindGroupLayout { context, id, data }
}
}
#[derive(Debug)]
pub struct CommandBuffer {
context: Arc<C>,
id: Option<ObjectId>,
data: Option<Box<Data>>,
}
static_assertions::assert_impl_all!(CommandBuffer: Send, Sync);
impl Drop for CommandBuffer {
fn drop(&mut self) {
if !thread::panicking() {
if let Some(id) = self.id.take() {
self.context
.command_buffer_drop(&id, self.data.take().unwrap().as_ref());
}
}
}
}
#[derive(Debug)]
pub struct CommandEncoder {
context: Arc<C>,
id: Option<ObjectId>,
data: Box<Data>,
}
static_assertions::assert_impl_all!(CommandEncoder: Send, Sync);
impl Drop for CommandEncoder {
fn drop(&mut self) {
if !thread::panicking() {
if let Some(id) = self.id.take() {
self.context.command_encoder_drop(&id, self.data.as_ref());
}
}
}
}
#[derive(Debug)]
pub struct RenderPass<'a> {
id: ObjectId,
data: Box<Data>,
parent: &'a mut CommandEncoder,
}
#[derive(Debug)]
pub struct ComputePass<'a> {
id: ObjectId,
data: Box<Data>,
parent: &'a mut CommandEncoder,
}
#[derive(Debug)]
pub struct RenderBundleEncoder<'a> {
context: Arc<C>,
id: ObjectId,
data: Box<Data>,
parent: &'a Device,
_p: PhantomData<*const u8>,
}
static_assertions::assert_not_impl_any!(RenderBundleEncoder<'_>: Send, Sync);
#[derive(Debug)]
pub struct RenderBundle {
context: Arc<C>,
id: ObjectId,
data: Box<Data>,
}
static_assertions::assert_impl_all!(RenderBundle: Send, Sync);
impl Drop for RenderBundle {
fn drop(&mut self) {
if !thread::panicking() {
self.context
.render_bundle_drop(&self.id, self.data.as_ref());
}
}
}
pub struct QuerySet {
context: Arc<C>,
id: ObjectId,
data: Box<Data>,
}
static_assertions::assert_impl_all!(QuerySet: Send, Sync);
impl Drop for QuerySet {
fn drop(&mut self) {
if !thread::panicking() {
self.context.query_set_drop(&self.id, self.data.as_ref());
}
}
}
#[derive(Debug)]
pub struct Queue {
context: Arc<C>,
id: ObjectId,
data: Box<Data>,
}
static_assertions::assert_impl_all!(Queue: Send, Sync);
#[non_exhaustive]
#[derive(Clone, Debug)]
pub enum BindingResource<'a> {
Buffer(BufferBinding<'a>),
BufferArray(&'a [BufferBinding<'a>]),
Sampler(&'a Sampler),
SamplerArray(&'a [&'a Sampler]),
TextureView(&'a TextureView),
TextureViewArray(&'a [&'a TextureView]),
}
static_assertions::assert_impl_all!(BindingResource: Send, Sync);
#[derive(Clone, Debug)]
pub struct BufferBinding<'a> {
pub buffer: &'a Buffer,
pub offset: BufferAddress,
pub size: Option<BufferSize>,
}
static_assertions::assert_impl_all!(BufferBinding: Send, Sync);
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub enum LoadOp<V> {
Clear(V),
Load,
}
impl<V: Default> Default for LoadOp<V> {
fn default() -> Self {
Self::Clear(Default::default())
}
}
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub struct Operations<V> {
pub load: LoadOp<V>,
pub store: bool,
}
impl<V: Default> Default for Operations<V> {
fn default() -> Self {
Self {
load: Default::default(),
store: true,
}
}
}
#[derive(Clone, Debug)]
pub struct RenderPassColorAttachment<'tex> {
pub view: &'tex TextureView,
pub resolve_target: Option<&'tex TextureView>,
pub ops: Operations<Color>,
}
static_assertions::assert_impl_all!(RenderPassColorAttachment: Send, Sync);
#[derive(Clone, Debug)]
pub struct RenderPassDepthStencilAttachment<'tex> {
pub view: &'tex TextureView,
pub depth_ops: Option<Operations<f32>>,
pub stencil_ops: Option<Operations<u32>>,
}
static_assertions::assert_impl_all!(RenderPassDepthStencilAttachment: Send, Sync);
pub type Label<'a> = Option<&'a str>;
pub use wgt::RequestAdapterOptions as RequestAdapterOptionsBase;
pub type RequestAdapterOptions<'a> = RequestAdapterOptionsBase<&'a Surface>;
static_assertions::assert_impl_all!(RequestAdapterOptions: Send, Sync);
pub type DeviceDescriptor<'a> = wgt::DeviceDescriptor<Label<'a>>;
static_assertions::assert_impl_all!(DeviceDescriptor: Send, Sync);
pub type BufferDescriptor<'a> = wgt::BufferDescriptor<Label<'a>>;
static_assertions::assert_impl_all!(BufferDescriptor: Send, Sync);
pub type CommandEncoderDescriptor<'a> = wgt::CommandEncoderDescriptor<Label<'a>>;
static_assertions::assert_impl_all!(CommandEncoderDescriptor: Send, Sync);
pub type RenderBundleDescriptor<'a> = wgt::RenderBundleDescriptor<Label<'a>>;
static_assertions::assert_impl_all!(RenderBundleDescriptor: Send, Sync);
pub type TextureDescriptor<'a> = wgt::TextureDescriptor<Label<'a>, &'a [TextureFormat]>;
static_assertions::assert_impl_all!(TextureDescriptor: Send, Sync);
pub type QuerySetDescriptor<'a> = wgt::QuerySetDescriptor<Label<'a>>;
static_assertions::assert_impl_all!(QuerySetDescriptor: Send, Sync);
pub use wgt::Maintain as MaintainBase;
pub type Maintain = wgt::Maintain<SubmissionIndex>;
static_assertions::assert_impl_all!(Maintain: Send, Sync);
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub struct TextureViewDescriptor<'a> {
pub label: Label<'a>,
pub format: Option<TextureFormat>,
pub dimension: Option<TextureViewDimension>,
pub aspect: TextureAspect,
pub base_mip_level: u32,
pub mip_level_count: Option<u32>,
pub base_array_layer: u32,
pub array_layer_count: Option<u32>,
}
static_assertions::assert_impl_all!(TextureViewDescriptor: Send, Sync);
#[derive(Clone, Debug, Default)]
pub struct PipelineLayoutDescriptor<'a> {
pub label: Label<'a>,
pub bind_group_layouts: &'a [&'a BindGroupLayout],
pub push_constant_ranges: &'a [PushConstantRange],
}
static_assertions::assert_impl_all!(PipelineLayoutDescriptor: Send, Sync);
#[derive(Clone, Debug, PartialEq)]
pub struct SamplerDescriptor<'a> {
pub label: Label<'a>,
pub address_mode_u: AddressMode,
pub address_mode_v: AddressMode,
pub address_mode_w: AddressMode,
pub mag_filter: FilterMode,
pub min_filter: FilterMode,
pub mipmap_filter: FilterMode,
pub lod_min_clamp: f32,
pub lod_max_clamp: f32,
pub compare: Option<CompareFunction>,
pub anisotropy_clamp: u16,
pub border_color: Option<SamplerBorderColor>,
}
static_assertions::assert_impl_all!(SamplerDescriptor: Send, Sync);
impl Default for SamplerDescriptor<'_> {
fn default() -> Self {
Self {
label: None,
address_mode_u: Default::default(),
address_mode_v: Default::default(),
address_mode_w: Default::default(),
mag_filter: Default::default(),
min_filter: Default::default(),
mipmap_filter: Default::default(),
lod_min_clamp: 0.0,
lod_max_clamp: 32.0,
compare: None,
anisotropy_clamp: 1,
border_color: None,
}
}
}
#[derive(Clone, Debug)]
pub struct BindGroupEntry<'a> {
pub binding: u32,
pub resource: BindingResource<'a>,
}
static_assertions::assert_impl_all!(BindGroupEntry: Send, Sync);
#[derive(Clone, Debug)]
pub struct BindGroupDescriptor<'a> {
pub label: Label<'a>,
pub layout: &'a BindGroupLayout,
pub entries: &'a [BindGroupEntry<'a>],
}
static_assertions::assert_impl_all!(BindGroupDescriptor: Send, Sync);
#[derive(Clone, Debug, Default)]
pub struct RenderPassDescriptor<'tex, 'desc> {
pub label: Label<'desc>,
pub color_attachments: &'desc [Option<RenderPassColorAttachment<'tex>>],
pub depth_stencil_attachment: Option<RenderPassDepthStencilAttachment<'tex>>,
}
static_assertions::assert_impl_all!(RenderPassDescriptor: Send, Sync);
#[derive(Clone, Debug, Hash, Eq, PartialEq)]
pub struct VertexBufferLayout<'a> {
pub array_stride: BufferAddress,
pub step_mode: VertexStepMode,
pub attributes: &'a [VertexAttribute],
}
static_assertions::assert_impl_all!(VertexBufferLayout: Send, Sync);
#[derive(Clone, Debug)]
pub struct VertexState<'a> {
pub module: &'a ShaderModule,
pub entry_point: &'a str,
pub buffers: &'a [VertexBufferLayout<'a>],
}
static_assertions::assert_impl_all!(VertexState: Send, Sync);
#[derive(Clone, Debug)]
pub struct FragmentState<'a> {
pub module: &'a ShaderModule,
pub entry_point: &'a str,
pub targets: &'a [Option<ColorTargetState>],
}
static_assertions::assert_impl_all!(FragmentState: Send, Sync);
#[derive(Clone, Debug)]
pub struct RenderPipelineDescriptor<'a> {
pub label: Label<'a>,
pub layout: Option<&'a PipelineLayout>,
pub vertex: VertexState<'a>,
pub primitive: PrimitiveState,
pub depth_stencil: Option<DepthStencilState>,
pub multisample: MultisampleState,
pub fragment: Option<FragmentState<'a>>,
pub multiview: Option<NonZeroU32>,
}
static_assertions::assert_impl_all!(RenderPipelineDescriptor: Send, Sync);
#[derive(Clone, Debug, Default)]
pub struct ComputePassDescriptor<'a> {
pub label: Label<'a>,
}
static_assertions::assert_impl_all!(ComputePassDescriptor: Send, Sync);
#[derive(Clone, Debug)]
pub struct ComputePipelineDescriptor<'a> {
pub label: Label<'a>,
pub layout: Option<&'a PipelineLayout>,
pub module: &'a ShaderModule,
pub entry_point: &'a str,
}
static_assertions::assert_impl_all!(ComputePipelineDescriptor: Send, Sync);
pub use wgt::ImageCopyBuffer as ImageCopyBufferBase;
pub type ImageCopyBuffer<'a> = ImageCopyBufferBase<&'a Buffer>;
static_assertions::assert_impl_all!(ImageCopyBuffer: Send, Sync);
pub use wgt::ImageCopyTexture as ImageCopyTextureBase;
pub type ImageCopyTexture<'a> = ImageCopyTextureBase<&'a Texture>;
static_assertions::assert_impl_all!(ImageCopyTexture: Send, Sync);
pub use wgt::ImageCopyTextureTagged as ImageCopyTextureTaggedBase;
pub type ImageCopyTextureTagged<'a> = ImageCopyTextureTaggedBase<&'a Texture>;
static_assertions::assert_impl_all!(ImageCopyTexture: Send, Sync);
#[derive(Clone, Debug)]
pub struct BindGroupLayoutDescriptor<'a> {
pub label: Label<'a>,
pub entries: &'a [BindGroupLayoutEntry],
}
static_assertions::assert_impl_all!(BindGroupLayoutDescriptor: Send, Sync);
#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)]
pub struct RenderBundleEncoderDescriptor<'a> {
pub label: Label<'a>,
pub color_formats: &'a [Option<TextureFormat>],
pub depth_stencil: Option<RenderBundleDepthStencil>,
pub sample_count: u32,
pub multiview: Option<NonZeroU32>,
}
static_assertions::assert_impl_all!(RenderBundleEncoderDescriptor: Send, Sync);
#[derive(Debug)]
pub struct SurfaceTexture {
pub texture: Texture,
pub suboptimal: bool,
presented: bool,
detail: Box<dyn Any + Send + Sync>,
}
static_assertions::assert_impl_all!(SurfaceTexture: Send, Sync);
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum SurfaceError {
Timeout,
Outdated,
Lost,
OutOfMemory,
}
static_assertions::assert_impl_all!(SurfaceError: Send, Sync);
impl Display for SurfaceError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", match self {
Self::Timeout => "A timeout was encountered while trying to acquire the next frame",
Self::Outdated => "The underlying surface has changed, and therefore the swap chain must be updated",
Self::Lost => "The swap chain has been lost and needs to be recreated",
Self::OutOfMemory => "There is no more memory left to allocate a new frame",
})
}
}
impl error::Error for SurfaceError {}
impl Default for Instance {
fn default() -> Self {
Self::new(InstanceDescriptor::default())
}
}
impl Instance {
pub fn new(instance_desc: InstanceDescriptor) -> Self {
Self {
context: Arc::from(crate::backend::Context::init(instance_desc)),
}
}
#[cfg(any(
not(target_arch = "wasm32"),
target_os = "emscripten",
feature = "webgl"
))]
pub unsafe fn from_hal<A: wgc::hub::HalApi>(hal_instance: A::Instance) -> Self {
Self {
context: Arc::new(unsafe {
crate::backend::Context::from_hal_instance::<A>(hal_instance)
}),
}
}
#[cfg(any(
not(target_arch = "wasm32"),
target_os = "emscripten",
feature = "webgl"
))]
pub unsafe fn as_hal<A: wgc::hub::HalApi>(&self) -> Option<&A::Instance> {
unsafe {
self.context
.as_any()
.downcast_ref::<crate::backend::Context>()
.unwrap()
.instance_as_hal::<A>()
}
}
#[cfg(any(
not(target_arch = "wasm32"),
target_os = "emscripten",
feature = "webgl"
))]
pub unsafe fn from_core(core_instance: wgc::instance::Instance) -> Self {
Self {
context: Arc::new(unsafe {
crate::backend::Context::from_core_instance(core_instance)
}),
}
}
#[cfg(any(
not(target_arch = "wasm32"),
target_os = "emscripten",
feature = "webgl"
))]
pub fn enumerate_adapters(&self, backends: Backends) -> impl Iterator<Item = Adapter> {
let context = Arc::clone(&self.context);
self.context
.as_any()
.downcast_ref::<crate::backend::Context>()
.unwrap()
.enumerate_adapters(backends)
.into_iter()
.map(move |id| crate::Adapter {
context: Arc::clone(&context),
id: ObjectId::from(id),
data: Box::new(()),
})
}
pub fn request_adapter(
&self,
options: &RequestAdapterOptions,
) -> impl Future<Output = Option<Adapter>> + Send {
let context = Arc::clone(&self.context);
let adapter = self.context.instance_request_adapter(options);
async move {
adapter
.await
.map(|(id, data)| Adapter { context, id, data })
}
}
#[cfg(any(
not(target_arch = "wasm32"),
target_os = "emscripten",
feature = "webgl"
))]
pub unsafe fn create_adapter_from_hal<A: wgc::hub::HalApi>(
&self,
hal_adapter: hal::ExposedAdapter<A>,
) -> Adapter {
let context = Arc::clone(&self.context);
let id = unsafe {
context
.as_any()
.downcast_ref::<crate::backend::Context>()
.unwrap()
.create_adapter_from_hal(hal_adapter)
.into()
};
Adapter {
context,
id,
data: Box::new(()),
}
}
pub unsafe fn create_surface<
W: raw_window_handle::HasRawWindowHandle + raw_window_handle::HasRawDisplayHandle,
>(
&self,
window: &W,
) -> Result<Surface, CreateSurfaceError> {
let (id, data) = DynContext::instance_create_surface(
&*self.context,
raw_window_handle::HasRawDisplayHandle::raw_display_handle(window),
raw_window_handle::HasRawWindowHandle::raw_window_handle(window),
)?;
Ok(Surface {
context: Arc::clone(&self.context),
id,
data,
config: Mutex::new(None),
})
}
#[cfg(any(target_os = "ios", target_os = "macos"))]
pub unsafe fn create_surface_from_core_animation_layer(
&self,
layer: *mut std::ffi::c_void,
) -> Surface {
let surface = unsafe {
self.context
.as_any()
.downcast_ref::<crate::backend::Context>()
.unwrap()
.create_surface_from_core_animation_layer(layer)
};
Surface {
context: Arc::clone(&self.context),
id: ObjectId::from(surface.id()),
data: Box::new(surface),
config: Mutex::new(None),
}
}
#[cfg(target_os = "windows")]
pub unsafe fn create_surface_from_visual(&self, visual: *mut std::ffi::c_void) -> Surface {
let surface = unsafe {
self.context
.as_any()
.downcast_ref::<crate::backend::Context>()
.unwrap()
.create_surface_from_visual(visual)
};
Surface {
context: Arc::clone(&self.context),
id: ObjectId::from(surface.id()),
data: Box::new(surface),
config: Mutex::new(None),
}
}
#[cfg(target_os = "windows")]
pub unsafe fn create_surface_from_surface_handle(
&self,
surface_handle: *mut std::ffi::c_void,
) -> Surface {
let surface = unsafe {
self.context
.as_any()
.downcast_ref::<crate::backend::Context>()
.unwrap()
.create_surface_from_surface_handle(surface_handle)
};
Surface {
context: Arc::clone(&self.context),
id: ObjectId::from(surface.id()),
data: Box::new(surface),
config: Mutex::new(None),
}
}
#[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
pub fn create_surface_from_canvas(
&self,
canvas: web_sys::HtmlCanvasElement,
) -> Result<Surface, CreateSurfaceError> {
let surface = self
.context
.as_any()
.downcast_ref::<crate::backend::Context>()
.unwrap()
.instance_create_surface_from_canvas(canvas)?;
Ok(Surface {
context: Arc::clone(&self.context),
#[cfg(any(not(target_arch = "wasm32"), feature = "webgl"))]
id: ObjectId::from(surface.id()),
#[cfg(any(not(target_arch = "wasm32"), feature = "webgl"))]
data: Box::new(surface),
#[cfg(all(target_arch = "wasm32", not(feature = "webgl")))]
id: ObjectId::UNUSED,
#[cfg(all(target_arch = "wasm32", not(feature = "webgl")))]
data: Box::new(surface.1),
config: Mutex::new(None),
})
}
#[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
pub fn create_surface_from_offscreen_canvas(
&self,
canvas: web_sys::OffscreenCanvas,
) -> Result<Surface, CreateSurfaceError> {
let surface = self
.context
.as_any()
.downcast_ref::<crate::backend::Context>()
.unwrap()
.instance_create_surface_from_offscreen_canvas(canvas)?;
Ok(Surface {
context: Arc::clone(&self.context),
#[cfg(any(not(target_arch = "wasm32"), feature = "webgl"))]
id: ObjectId::from(surface.id()),
#[cfg(any(not(target_arch = "wasm32"), feature = "webgl"))]
data: Box::new(surface),
#[cfg(all(target_arch = "wasm32", not(feature = "webgl")))]
id: ObjectId::UNUSED,
#[cfg(all(target_arch = "wasm32", not(feature = "webgl")))]
data: Box::new(surface.1),
config: Mutex::new(None),
})
}
pub fn poll_all(&self, force_wait: bool) -> bool {
self.context.instance_poll_all_devices(force_wait)
}
#[cfg(any(
not(target_arch = "wasm32"),
target_os = "emscripten",
feature = "webgl"
))]
pub fn generate_report(&self) -> wgc::hub::GlobalReport {
self.context
.as_any()
.downcast_ref::<crate::backend::Context>()
.unwrap()
.generate_report()
}
}
impl Adapter {
pub fn request_device(
&self,
desc: &DeviceDescriptor,
trace_path: Option<&std::path::Path>,
) -> impl Future<Output = Result<(Device, Queue), RequestDeviceError>> + Send {
let context = Arc::clone(&self.context);
let device = DynContext::adapter_request_device(
&*self.context,
&self.id,
self.data.as_ref(),
desc,
trace_path,
);
async move {
device.await.map(
|DeviceRequest {
device_id,
device_data,
queue_id,
queue_data,
}| {
(
Device {
context: Arc::clone(&context),
id: device_id,
data: device_data,
},
Queue {
context,
id: queue_id,
data: queue_data,
},
)
},
)
}
}
#[cfg(any(
not(target_arch = "wasm32"),
target_os = "emscripten",
feature = "webgl"
))]
pub unsafe fn create_device_from_hal<A: wgc::hub::HalApi>(
&self,
hal_device: hal::OpenDevice<A>,
desc: &DeviceDescriptor,
trace_path: Option<&std::path::Path>,
) -> Result<(Device, Queue), RequestDeviceError> {
let context = Arc::clone(&self.context);
unsafe {
self.context
.as_any()
.downcast_ref::<crate::backend::Context>()
.unwrap()
.create_device_from_hal(&self.id.into(), hal_device, desc, trace_path)
}
.map(|(device, queue)| {
(
Device {
context: Arc::clone(&context),
id: device.id().into(),
data: Box::new(device),
},
Queue {
context,
id: queue.id().into(),
data: Box::new(queue),
},
)
})
}
#[cfg(any(
not(target_arch = "wasm32"),
target_os = "emscripten",
feature = "webgl"
))]
pub unsafe fn as_hal<A: wgc::hub::HalApi, F: FnOnce(Option<&A::Adapter>) -> R, R>(
&self,
hal_adapter_callback: F,
) -> R {
unsafe {
self.context
.as_any()
.downcast_ref::<crate::backend::Context>()
.unwrap()
.adapter_as_hal::<A, F, R>(self.id.into(), hal_adapter_callback)
}
}
pub fn is_surface_supported(&self, surface: &Surface) -> bool {
DynContext::adapter_is_surface_supported(
&*self.context,
&self.id,
self.data.as_ref(),
&surface.id,
surface.data.as_ref(),
)
}
pub fn features(&self) -> Features {
DynContext::adapter_features(&*self.context, &self.id, self.data.as_ref())
}
pub fn limits(&self) -> Limits {
DynContext::adapter_limits(&*self.context, &self.id, self.data.as_ref())
}
pub fn get_info(&self) -> AdapterInfo {
DynContext::adapter_get_info(&*self.context, &self.id, self.data.as_ref())
}
pub fn get_downlevel_capabilities(&self) -> DownlevelCapabilities {
DynContext::adapter_downlevel_capabilities(&*self.context, &self.id, self.data.as_ref())
}
pub fn get_texture_format_features(&self, format: TextureFormat) -> TextureFormatFeatures {
DynContext::adapter_get_texture_format_features(
&*self.context,
&self.id,
self.data.as_ref(),
format,
)
}
pub fn get_presentation_timestamp(&self) -> PresentationTimestamp {
DynContext::adapter_get_presentation_timestamp(&*self.context, &self.id, self.data.as_ref())
}
}
impl Device {
pub fn poll(&self, maintain: Maintain) -> bool {
DynContext::device_poll(&*self.context, &self.id, self.data.as_ref(), maintain)
}
pub fn features(&self) -> Features {
DynContext::device_features(&*self.context, &self.id, self.data.as_ref())
}
pub fn limits(&self) -> Limits {
DynContext::device_limits(&*self.context, &self.id, self.data.as_ref())
}
pub fn create_shader_module(&self, desc: ShaderModuleDescriptor) -> ShaderModule {
let (id, data) = DynContext::device_create_shader_module(
&*self.context,
&self.id,
self.data.as_ref(),
desc,
wgt::ShaderBoundChecks::new(),
);
ShaderModule {
context: Arc::clone(&self.context),
id,
data,
}
}
pub unsafe fn create_shader_module_unchecked(
&self,
desc: ShaderModuleDescriptor,
) -> ShaderModule {
let (id, data) = DynContext::device_create_shader_module(
&*self.context,
&self.id,
self.data.as_ref(),
desc,
unsafe { wgt::ShaderBoundChecks::unchecked() },
);
ShaderModule {
context: Arc::clone(&self.context),
id,
data,
}
}
pub unsafe fn create_shader_module_spirv(
&self,
desc: &ShaderModuleDescriptorSpirV,
) -> ShaderModule {
let (id, data) = unsafe {
DynContext::device_create_shader_module_spirv(
&*self.context,
&self.id,
self.data.as_ref(),
desc,
)
};
ShaderModule {
context: Arc::clone(&self.context),
id,
data,
}
}
pub fn create_command_encoder(&self, desc: &CommandEncoderDescriptor) -> CommandEncoder {
let (id, data) = DynContext::device_create_command_encoder(
&*self.context,
&self.id,
self.data.as_ref(),
desc,
);
CommandEncoder {
context: Arc::clone(&self.context),
id: Some(id),
data,
}
}
pub fn create_render_bundle_encoder(
&self,
desc: &RenderBundleEncoderDescriptor,
) -> RenderBundleEncoder {
let (id, data) = DynContext::device_create_render_bundle_encoder(
&*self.context,
&self.id,
self.data.as_ref(),
desc,
);
RenderBundleEncoder {
context: Arc::clone(&self.context),
id,
data,
parent: self,
_p: Default::default(),
}
}
pub fn create_bind_group(&self, desc: &BindGroupDescriptor) -> BindGroup {
let (id, data) = DynContext::device_create_bind_group(
&*self.context,
&self.id,
self.data.as_ref(),
desc,
);
BindGroup {
context: Arc::clone(&self.context),
id,
data,
}
}
pub fn create_bind_group_layout(&self, desc: &BindGroupLayoutDescriptor) -> BindGroupLayout {
let (id, data) = DynContext::device_create_bind_group_layout(
&*self.context,
&self.id,
self.data.as_ref(),
desc,
);
BindGroupLayout {
context: Arc::clone(&self.context),
id,
data,
}
}
pub fn create_pipeline_layout(&self, desc: &PipelineLayoutDescriptor) -> PipelineLayout {
let (id, data) = DynContext::device_create_pipeline_layout(
&*self.context,
&self.id,
self.data.as_ref(),
desc,
);
PipelineLayout {
context: Arc::clone(&self.context),
id,
data,
}
}
pub fn create_render_pipeline(&self, desc: &RenderPipelineDescriptor) -> RenderPipeline {
let (id, data) = DynContext::device_create_render_pipeline(
&*self.context,
&self.id,
self.data.as_ref(),
desc,
);
RenderPipeline {
context: Arc::clone(&self.context),
id,
data,
}
}
pub fn create_compute_pipeline(&self, desc: &ComputePipelineDescriptor) -> ComputePipeline {
let (id, data) = DynContext::device_create_compute_pipeline(
&*self.context,
&self.id,
self.data.as_ref(),
desc,
);
ComputePipeline {
context: Arc::clone(&self.context),
id,
data,
}
}
pub fn create_buffer(&self, desc: &BufferDescriptor) -> Buffer {
let mut map_context = MapContext::new(desc.size);
if desc.mapped_at_creation {
map_context.initial_range = 0..desc.size;
}
let (id, data) =
DynContext::device_create_buffer(&*self.context, &self.id, self.data.as_ref(), desc);
Buffer {
context: Arc::clone(&self.context),
id,
data,
map_context: Mutex::new(map_context),
size: desc.size,
usage: desc.usage,
}
}
pub fn create_texture(&self, desc: &TextureDescriptor) -> Texture {
let (id, data) =
DynContext::device_create_texture(&*self.context, &self.id, self.data.as_ref(), desc);
Texture {
context: Arc::clone(&self.context),
id,
data,
owned: true,
descriptor: TextureDescriptor {
label: None,
view_formats: &[],
..desc.clone()
},
}
}
#[cfg(any(
not(target_arch = "wasm32"),
target_os = "emscripten",
feature = "webgl"
))]
pub unsafe fn create_texture_from_hal<A: wgc::hub::HalApi>(
&self,
hal_texture: A::Texture,
desc: &TextureDescriptor,
) -> Texture {
let texture = unsafe {
self.context
.as_any()
.downcast_ref::<crate::backend::Context>()
.unwrap()
.create_texture_from_hal::<A>(
hal_texture,
self.data.as_ref().downcast_ref().unwrap(),
desc,
)
};
Texture {
context: Arc::clone(&self.context),
id: ObjectId::from(texture.id()),
data: Box::new(texture),
owned: true,
descriptor: TextureDescriptor {
label: None,
view_formats: &[],
..desc.clone()
},
}
}
pub fn create_sampler(&self, desc: &SamplerDescriptor) -> Sampler {
let (id, data) =
DynContext::device_create_sampler(&*self.context, &self.id, self.data.as_ref(), desc);
Sampler {
context: Arc::clone(&self.context),
id,
data,
}
}
pub fn create_query_set(&self, desc: &QuerySetDescriptor) -> QuerySet {
let (id, data) =
DynContext::device_create_query_set(&*self.context, &self.id, self.data.as_ref(), desc);
QuerySet {
context: Arc::clone(&self.context),
id,
data,
}
}
pub fn on_uncaptured_error(&self, handler: Box<dyn UncapturedErrorHandler>) {
self.context
.device_on_uncaptured_error(&self.id, self.data.as_ref(), handler);
}
pub fn push_error_scope(&self, filter: ErrorFilter) {
self.context
.device_push_error_scope(&self.id, self.data.as_ref(), filter);
}
pub fn pop_error_scope(&self) -> impl Future<Output = Option<Error>> + Send {
self.context
.device_pop_error_scope(&self.id, self.data.as_ref())
}
pub fn start_capture(&self) {
DynContext::device_start_capture(&*self.context, &self.id, self.data.as_ref())
}
pub fn stop_capture(&self) {
DynContext::device_stop_capture(&*self.context, &self.id, self.data.as_ref())
}
#[cfg(any(
not(target_arch = "wasm32"),
target_os = "emscripten",
feature = "webgl"
))]
pub unsafe fn as_hal<A: wgc::hub::HalApi, F: FnOnce(Option<&A::Device>) -> R, R>(
&self,
hal_device_callback: F,
) -> R {
unsafe {
self.context
.as_any()
.downcast_ref::<crate::backend::Context>()
.unwrap()
.device_as_hal::<A, F, R>(
self.data.as_ref().downcast_ref().unwrap(),
hal_device_callback,
)
}
}
}
impl Drop for Device {
fn drop(&mut self) {
if !thread::panicking() {
self.context.device_drop(&self.id, self.data.as_ref());
}
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct RequestDeviceError;
static_assertions::assert_impl_all!(RequestDeviceError: Send, Sync);
impl Display for RequestDeviceError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "Requesting a device failed")
}
}
impl error::Error for RequestDeviceError {}
#[derive(Clone, PartialEq, Eq, Debug)]
#[non_exhaustive]
pub struct CreateSurfaceError {
}
static_assertions::assert_impl_all!(CreateSurfaceError: Send, Sync);
impl Display for CreateSurfaceError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "Creating a surface failed")
}
}
impl error::Error for CreateSurfaceError {}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct BufferAsyncError;
static_assertions::assert_impl_all!(BufferAsyncError: Send, Sync);
impl Display for BufferAsyncError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "Error occurred when trying to async map a buffer")
}
}
impl error::Error for BufferAsyncError {}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum MapMode {
Read,
Write,
}
static_assertions::assert_impl_all!(MapMode: Send, Sync);
fn range_to_offset_size<S: RangeBounds<BufferAddress>>(
bounds: S,
) -> (BufferAddress, Option<BufferSize>) {
let offset = match bounds.start_bound() {
Bound::Included(&bound) => bound,
Bound::Excluded(&bound) => bound + 1,
Bound::Unbounded => 0,
};
let size = match bounds.end_bound() {
Bound::Included(&bound) => Some(bound + 1 - offset),
Bound::Excluded(&bound) => Some(bound - offset),
Bound::Unbounded => None,
}
.map(|size| BufferSize::new(size).expect("Buffer slices can not be empty"));
(offset, size)
}
#[cfg(test)]
mod tests {
use crate::BufferSize;
#[test]
fn range_to_offset_size_works() {
assert_eq!(crate::range_to_offset_size(0..2), (0, BufferSize::new(2)));
assert_eq!(crate::range_to_offset_size(2..5), (2, BufferSize::new(3)));
assert_eq!(crate::range_to_offset_size(..), (0, None));
assert_eq!(crate::range_to_offset_size(21..), (21, None));
assert_eq!(crate::range_to_offset_size(0..), (0, None));
assert_eq!(crate::range_to_offset_size(..21), (0, BufferSize::new(21)));
}
#[test]
#[should_panic]
fn range_to_offset_size_panics_for_empty_range() {
crate::range_to_offset_size(123..123);
}
#[test]
#[should_panic]
fn range_to_offset_size_panics_for_unbounded_empty_range() {
crate::range_to_offset_size(..0);
}
}
#[derive(Debug)]
pub struct BufferView<'a> {
slice: BufferSlice<'a>,
data: Box<dyn crate::context::BufferMappedRange>,
}
#[derive(Debug)]
pub struct BufferViewMut<'a> {
slice: BufferSlice<'a>,
data: Box<dyn crate::context::BufferMappedRange>,
readable: bool,
}
impl std::ops::Deref for BufferView<'_> {
type Target = [u8];
#[inline]
fn deref(&self) -> &[u8] {
self.data.slice()
}
}
impl AsRef<[u8]> for BufferView<'_> {
#[inline]
fn as_ref(&self) -> &[u8] {
self.data.slice()
}
}
impl AsMut<[u8]> for BufferViewMut<'_> {
#[inline]
fn as_mut(&mut self) -> &mut [u8] {
self.data.slice_mut()
}
}
impl Deref for BufferViewMut<'_> {
type Target = [u8];
fn deref(&self) -> &Self::Target {
if !self.readable {
log::warn!("Reading from a BufferViewMut is slow and not recommended.");
}
self.data.slice()
}
}
impl DerefMut for BufferViewMut<'_> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.data.slice_mut()
}
}
impl Drop for BufferView<'_> {
fn drop(&mut self) {
self.slice
.buffer
.map_context
.lock()
.remove(self.slice.offset, self.slice.size);
}
}
impl Drop for BufferViewMut<'_> {
fn drop(&mut self) {
self.slice
.buffer
.map_context
.lock()
.remove(self.slice.offset, self.slice.size);
}
}
impl Buffer {
pub fn as_entire_binding(&self) -> BindingResource {
BindingResource::Buffer(self.as_entire_buffer_binding())
}
pub fn as_entire_buffer_binding(&self) -> BufferBinding {
BufferBinding {
buffer: self,
offset: 0,
size: None,
}
}
pub fn slice<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferSlice {
let (offset, size) = range_to_offset_size(bounds);
BufferSlice {
buffer: self,
offset,
size,
}
}
pub fn unmap(&self) {
self.map_context.lock().reset();
DynContext::buffer_unmap(&*self.context, &self.id, self.data.as_ref());
}
pub fn destroy(&self) {
DynContext::buffer_destroy(&*self.context, &self.id, self.data.as_ref());
}
pub fn size(&self) -> BufferAddress {
self.size
}
pub fn usage(&self) -> BufferUsages {
self.usage
}
}
impl<'a> BufferSlice<'a> {
pub fn map_async(
&self,
mode: MapMode,
callback: impl FnOnce(Result<(), BufferAsyncError>) + Send + 'static,
) {
let mut mc = self.buffer.map_context.lock();
assert_eq!(
mc.initial_range,
0..0,
"Buffer {:?} is already mapped",
self.buffer.id
);
let end = match self.size {
Some(s) => self.offset + s.get(),
None => mc.total_size,
};
mc.initial_range = self.offset..end;
DynContext::buffer_map_async(
&*self.buffer.context,
&self.buffer.id,
self.buffer.data.as_ref(),
mode,
self.offset..end,
Box::new(callback),
)
}
pub fn get_mapped_range(&self) -> BufferView<'a> {
let end = self.buffer.map_context.lock().add(self.offset, self.size);
let data = DynContext::buffer_get_mapped_range(
&*self.buffer.context,
&self.buffer.id,
self.buffer.data.as_ref(),
self.offset..end,
);
BufferView { slice: *self, data }
}
pub fn get_mapped_range_mut(&self) -> BufferViewMut<'a> {
let end = self.buffer.map_context.lock().add(self.offset, self.size);
let data = DynContext::buffer_get_mapped_range(
&*self.buffer.context,
&self.buffer.id,
self.buffer.data.as_ref(),
self.offset..end,
);
BufferViewMut {
slice: *self,
data,
readable: self.buffer.usage.contains(BufferUsages::MAP_READ),
}
}
}
impl Drop for Buffer {
fn drop(&mut self) {
if !thread::panicking() {
self.context.buffer_drop(&self.id, self.data.as_ref());
}
}
}
impl Texture {
#[cfg(any(
not(target_arch = "wasm32"),
target_os = "emscripten",
feature = "webgl"
))]
pub unsafe fn as_hal<A: wgc::hub::HalApi, F: FnOnce(Option<&A::Texture>)>(
&self,
hal_texture_callback: F,
) {
let texture = self.data.as_ref().downcast_ref().unwrap();
unsafe {
self.context
.as_any()
.downcast_ref::<crate::backend::Context>()
.unwrap()
.texture_as_hal::<A, F>(texture, hal_texture_callback)
}
}
pub fn create_view(&self, desc: &TextureViewDescriptor) -> TextureView {
let (id, data) =
DynContext::texture_create_view(&*self.context, &self.id, self.data.as_ref(), desc);
TextureView {
context: Arc::clone(&self.context),
id,
data,
}
}
pub fn destroy(&self) {
DynContext::texture_destroy(&*self.context, &self.id, self.data.as_ref());
}
pub fn as_image_copy(&self) -> ImageCopyTexture {
ImageCopyTexture {
texture: self,
mip_level: 0,
origin: Origin3d::ZERO,
aspect: TextureAspect::All,
}
}
pub fn size(&self) -> Extent3d {
self.descriptor.size
}
pub fn width(&self) -> u32 {
self.descriptor.size.width
}
pub fn height(&self) -> u32 {
self.descriptor.size.height
}
pub fn depth_or_array_layers(&self) -> u32 {
self.descriptor.size.depth_or_array_layers
}
pub fn mip_level_count(&self) -> u32 {
self.descriptor.mip_level_count
}
pub fn sample_count(&self) -> u32 {
self.descriptor.sample_count
}
pub fn dimension(&self) -> TextureDimension {
self.descriptor.dimension
}
pub fn format(&self) -> TextureFormat {
self.descriptor.format
}
pub fn usage(&self) -> TextureUsages {
self.descriptor.usage
}
}
impl Drop for Texture {
fn drop(&mut self) {
if self.owned && !thread::panicking() {
self.context.texture_drop(&self.id, self.data.as_ref());
}
}
}
impl Drop for TextureView {
fn drop(&mut self) {
if !thread::panicking() {
self.context.texture_view_drop(&self.id, self.data.as_ref());
}
}
}
impl CommandEncoder {
pub fn finish(mut self) -> CommandBuffer {
let (id, data) = DynContext::command_encoder_finish(
&*self.context,
self.id.take().unwrap(),
self.data.as_mut(),
);
CommandBuffer {
context: Arc::clone(&self.context),
id: Some(id),
data: Some(data),
}
}
pub fn begin_render_pass<'pass>(
&'pass mut self,
desc: &RenderPassDescriptor<'pass, '_>,
) -> RenderPass<'pass> {
let id = self.id.as_ref().unwrap();
let (id, data) = DynContext::command_encoder_begin_render_pass(
&*self.context,
id,
self.data.as_ref(),
desc,
);
RenderPass {
id,
data,
parent: self,
}
}
pub fn begin_compute_pass(&mut self, desc: &ComputePassDescriptor) -> ComputePass {
let id = self.id.as_ref().unwrap();
let (id, data) = DynContext::command_encoder_begin_compute_pass(
&*self.context,
id,
self.data.as_ref(),
desc,
);
ComputePass {
id,
data,
parent: self,
}
}
pub fn copy_buffer_to_buffer(
&mut self,
source: &Buffer,
source_offset: BufferAddress,
destination: &Buffer,
destination_offset: BufferAddress,
copy_size: BufferAddress,
) {
DynContext::command_encoder_copy_buffer_to_buffer(
&*self.context,
self.id.as_ref().unwrap(),
self.data.as_ref(),
&source.id,
source.data.as_ref(),
source_offset,
&destination.id,
destination.data.as_ref(),
destination_offset,
copy_size,
);
}
pub fn copy_buffer_to_texture(
&mut self,
source: ImageCopyBuffer,
destination: ImageCopyTexture,
copy_size: Extent3d,
) {
DynContext::command_encoder_copy_buffer_to_texture(
&*self.context,
self.id.as_ref().unwrap(),
self.data.as_ref(),
source,
destination,
copy_size,
);
}
pub fn copy_texture_to_buffer(
&mut self,
source: ImageCopyTexture,
destination: ImageCopyBuffer,
copy_size: Extent3d,
) {
DynContext::command_encoder_copy_texture_to_buffer(
&*self.context,
self.id.as_ref().unwrap(),
self.data.as_ref(),
source,
destination,
copy_size,
);
}
pub fn copy_texture_to_texture(
&mut self,
source: ImageCopyTexture,
destination: ImageCopyTexture,
copy_size: Extent3d,
) {
DynContext::command_encoder_copy_texture_to_texture(
&*self.context,
self.id.as_ref().unwrap(),
self.data.as_ref(),
source,
destination,
copy_size,
);
}
pub fn clear_texture(&mut self, texture: &Texture, subresource_range: &ImageSubresourceRange) {
DynContext::command_encoder_clear_texture(
&*self.context,
self.id.as_ref().unwrap(),
self.data.as_ref(),
texture,
subresource_range,
);
}
pub fn clear_buffer(
&mut self,
buffer: &Buffer,
offset: BufferAddress,
size: Option<BufferSize>,
) {
DynContext::command_encoder_clear_buffer(
&*self.context,
self.id.as_ref().unwrap(),
self.data.as_ref(),
buffer,
offset,
size,
);
}
pub fn insert_debug_marker(&mut self, label: &str) {
let id = self.id.as_ref().unwrap();
DynContext::command_encoder_insert_debug_marker(
&*self.context,
id,
self.data.as_ref(),
label,
);
}
pub fn push_debug_group(&mut self, label: &str) {
let id = self.id.as_ref().unwrap();
DynContext::command_encoder_push_debug_group(&*self.context, id, self.data.as_ref(), label);
}
pub fn pop_debug_group(&mut self) {
let id = self.id.as_ref().unwrap();
DynContext::command_encoder_pop_debug_group(&*self.context, id, self.data.as_ref());
}
}
impl CommandEncoder {
pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32) {
DynContext::command_encoder_write_timestamp(
&*self.context,
self.id.as_ref().unwrap(),
self.data.as_mut(),
&query_set.id,
query_set.data.as_ref(),
query_index,
)
}
}
impl CommandEncoder {
pub fn resolve_query_set(
&mut self,
query_set: &QuerySet,
query_range: Range<u32>,
destination: &Buffer,
destination_offset: BufferAddress,
) {
DynContext::command_encoder_resolve_query_set(
&*self.context,
self.id.as_ref().unwrap(),
self.data.as_ref(),
&query_set.id,
query_set.data.as_ref(),
query_range.start,
query_range.end - query_range.start,
&destination.id,
destination.data.as_ref(),
destination_offset,
)
}
}
impl<'a> RenderPass<'a> {
pub fn set_bind_group(
&mut self,
index: u32,
bind_group: &'a BindGroup,
offsets: &[DynamicOffset],
) {
DynContext::render_pass_set_bind_group(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
index,
&bind_group.id,
bind_group.data.as_ref(),
offsets,
)
}
pub fn set_pipeline(&mut self, pipeline: &'a RenderPipeline) {
DynContext::render_pass_set_pipeline(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
&pipeline.id,
pipeline.data.as_ref(),
)
}
pub fn set_blend_constant(&mut self, color: Color) {
DynContext::render_pass_set_blend_constant(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
color,
)
}
pub fn set_index_buffer(&mut self, buffer_slice: BufferSlice<'a>, index_format: IndexFormat) {
DynContext::render_pass_set_index_buffer(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
&buffer_slice.buffer.id,
buffer_slice.buffer.data.as_ref(),
index_format,
buffer_slice.offset,
buffer_slice.size,
)
}
pub fn set_vertex_buffer(&mut self, slot: u32, buffer_slice: BufferSlice<'a>) {
DynContext::render_pass_set_vertex_buffer(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
slot,
&buffer_slice.buffer.id,
buffer_slice.buffer.data.as_ref(),
buffer_slice.offset,
buffer_slice.size,
)
}
pub fn set_scissor_rect(&mut self, x: u32, y: u32, width: u32, height: u32) {
DynContext::render_pass_set_scissor_rect(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
x,
y,
width,
height,
);
}
pub fn set_viewport(&mut self, x: f32, y: f32, w: f32, h: f32, min_depth: f32, max_depth: f32) {
DynContext::render_pass_set_viewport(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
x,
y,
w,
h,
min_depth,
max_depth,
);
}
pub fn set_stencil_reference(&mut self, reference: u32) {
DynContext::render_pass_set_stencil_reference(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
reference,
);
}
pub fn draw(&mut self, vertices: Range<u32>, instances: Range<u32>) {
DynContext::render_pass_draw(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
vertices,
instances,
)
}
pub fn insert_debug_marker(&mut self, label: &str) {
DynContext::render_pass_insert_debug_marker(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
label,
);
}
pub fn push_debug_group(&mut self, label: &str) {
DynContext::render_pass_push_debug_group(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
label,
);
}
pub fn pop_debug_group(&mut self) {
DynContext::render_pass_pop_debug_group(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
);
}
pub fn draw_indexed(&mut self, indices: Range<u32>, base_vertex: i32, instances: Range<u32>) {
DynContext::render_pass_draw_indexed(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
indices,
base_vertex,
instances,
);
}
pub fn draw_indirect(&mut self, indirect_buffer: &'a Buffer, indirect_offset: BufferAddress) {
DynContext::render_pass_draw_indirect(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
&indirect_buffer.id,
indirect_buffer.data.as_ref(),
indirect_offset,
);
}
pub fn draw_indexed_indirect(
&mut self,
indirect_buffer: &'a Buffer,
indirect_offset: BufferAddress,
) {
DynContext::render_pass_draw_indexed_indirect(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
&indirect_buffer.id,
indirect_buffer.data.as_ref(),
indirect_offset,
);
}
pub fn execute_bundles<I: IntoIterator<Item = &'a RenderBundle> + 'a>(
&mut self,
render_bundles: I,
) {
DynContext::render_pass_execute_bundles(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
Box::new(
render_bundles
.into_iter()
.map(|rb| (&rb.id, rb.data.as_ref())),
),
)
}
}
impl<'a> RenderPass<'a> {
pub fn multi_draw_indirect(
&mut self,
indirect_buffer: &'a Buffer,
indirect_offset: BufferAddress,
count: u32,
) {
DynContext::render_pass_multi_draw_indirect(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
&indirect_buffer.id,
indirect_buffer.data.as_ref(),
indirect_offset,
count,
);
}
pub fn multi_draw_indexed_indirect(
&mut self,
indirect_buffer: &'a Buffer,
indirect_offset: BufferAddress,
count: u32,
) {
DynContext::render_pass_multi_draw_indexed_indirect(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
&indirect_buffer.id,
indirect_buffer.data.as_ref(),
indirect_offset,
count,
);
}
}
impl<'a> RenderPass<'a> {
pub fn multi_draw_indirect_count(
&mut self,
indirect_buffer: &'a Buffer,
indirect_offset: BufferAddress,
count_buffer: &'a Buffer,
count_offset: BufferAddress,
max_count: u32,
) {
DynContext::render_pass_multi_draw_indirect_count(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
&indirect_buffer.id,
indirect_buffer.data.as_ref(),
indirect_offset,
&count_buffer.id,
count_buffer.data.as_ref(),
count_offset,
max_count,
);
}
pub fn multi_draw_indexed_indirect_count(
&mut self,
indirect_buffer: &'a Buffer,
indirect_offset: BufferAddress,
count_buffer: &'a Buffer,
count_offset: BufferAddress,
max_count: u32,
) {
DynContext::render_pass_multi_draw_indexed_indirect_count(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
&indirect_buffer.id,
indirect_buffer.data.as_ref(),
indirect_offset,
&count_buffer.id,
count_buffer.data.as_ref(),
count_offset,
max_count,
);
}
}
impl<'a> RenderPass<'a> {
pub fn set_push_constants(&mut self, stages: ShaderStages, offset: u32, data: &[u8]) {
DynContext::render_pass_set_push_constants(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
stages,
offset,
data,
);
}
}
impl<'a> RenderPass<'a> {
pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32) {
DynContext::render_pass_write_timestamp(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
&query_set.id,
query_set.data.as_ref(),
query_index,
)
}
}
impl<'a> RenderPass<'a> {
pub fn begin_pipeline_statistics_query(&mut self, query_set: &QuerySet, query_index: u32) {
DynContext::render_pass_begin_pipeline_statistics_query(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
&query_set.id,
query_set.data.as_ref(),
query_index,
);
}
pub fn end_pipeline_statistics_query(&mut self) {
DynContext::render_pass_end_pipeline_statistics_query(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
);
}
}
impl<'a> Drop for RenderPass<'a> {
fn drop(&mut self) {
if !thread::panicking() {
let parent_id = self.parent.id.as_ref().unwrap();
self.parent.context.command_encoder_end_render_pass(
parent_id,
self.parent.data.as_ref(),
&mut self.id,
self.data.as_mut(),
);
}
}
}
impl<'a> ComputePass<'a> {
pub fn set_bind_group(
&mut self,
index: u32,
bind_group: &'a BindGroup,
offsets: &[DynamicOffset],
) {
DynContext::compute_pass_set_bind_group(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
index,
&bind_group.id,
bind_group.data.as_ref(),
offsets,
);
}
pub fn set_pipeline(&mut self, pipeline: &'a ComputePipeline) {
DynContext::compute_pass_set_pipeline(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
&pipeline.id,
pipeline.data.as_ref(),
);
}
pub fn insert_debug_marker(&mut self, label: &str) {
DynContext::compute_pass_insert_debug_marker(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
label,
);
}
pub fn push_debug_group(&mut self, label: &str) {
DynContext::compute_pass_push_debug_group(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
label,
);
}
pub fn pop_debug_group(&mut self) {
DynContext::compute_pass_pop_debug_group(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
);
}
pub fn dispatch_workgroups(&mut self, x: u32, y: u32, z: u32) {
DynContext::compute_pass_dispatch_workgroups(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
x,
y,
z,
);
}
pub fn dispatch_workgroups_indirect(
&mut self,
indirect_buffer: &'a Buffer,
indirect_offset: BufferAddress,
) {
DynContext::compute_pass_dispatch_workgroups_indirect(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
&indirect_buffer.id,
indirect_buffer.data.as_ref(),
indirect_offset,
);
}
}
impl<'a> ComputePass<'a> {
pub fn set_push_constants(&mut self, offset: u32, data: &[u8]) {
DynContext::compute_pass_set_push_constants(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
offset,
data,
);
}
}
impl<'a> ComputePass<'a> {
pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32) {
DynContext::compute_pass_write_timestamp(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
&query_set.id,
query_set.data.as_ref(),
query_index,
)
}
}
impl<'a> ComputePass<'a> {
pub fn begin_pipeline_statistics_query(&mut self, query_set: &QuerySet, query_index: u32) {
DynContext::compute_pass_begin_pipeline_statistics_query(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
&query_set.id,
query_set.data.as_ref(),
query_index,
);
}
pub fn end_pipeline_statistics_query(&mut self) {
DynContext::compute_pass_end_pipeline_statistics_query(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
);
}
}
impl<'a> Drop for ComputePass<'a> {
fn drop(&mut self) {
if !thread::panicking() {
let parent_id = self.parent.id.as_ref().unwrap();
self.parent.context.command_encoder_end_compute_pass(
parent_id,
self.parent.data.as_ref(),
&mut self.id,
self.data.as_mut(),
);
}
}
}
impl<'a> RenderBundleEncoder<'a> {
pub fn finish(self, desc: &RenderBundleDescriptor) -> RenderBundle {
let (id, data) =
DynContext::render_bundle_encoder_finish(&*self.context, self.id, self.data, desc);
RenderBundle {
context: Arc::clone(&self.context),
id,
data,
}
}
pub fn set_bind_group(
&mut self,
index: u32,
bind_group: &'a BindGroup,
offsets: &[DynamicOffset],
) {
DynContext::render_bundle_encoder_set_bind_group(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
index,
&bind_group.id,
bind_group.data.as_ref(),
offsets,
)
}
pub fn set_pipeline(&mut self, pipeline: &'a RenderPipeline) {
DynContext::render_bundle_encoder_set_pipeline(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
&pipeline.id,
pipeline.data.as_ref(),
)
}
pub fn set_index_buffer(&mut self, buffer_slice: BufferSlice<'a>, index_format: IndexFormat) {
DynContext::render_bundle_encoder_set_index_buffer(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
&buffer_slice.buffer.id,
buffer_slice.buffer.data.as_ref(),
index_format,
buffer_slice.offset,
buffer_slice.size,
)
}
pub fn set_vertex_buffer(&mut self, slot: u32, buffer_slice: BufferSlice<'a>) {
DynContext::render_bundle_encoder_set_vertex_buffer(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
slot,
&buffer_slice.buffer.id,
buffer_slice.buffer.data.as_ref(),
buffer_slice.offset,
buffer_slice.size,
)
}
pub fn draw(&mut self, vertices: Range<u32>, instances: Range<u32>) {
DynContext::render_bundle_encoder_draw(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
vertices,
instances,
)
}
pub fn draw_indexed(&mut self, indices: Range<u32>, base_vertex: i32, instances: Range<u32>) {
DynContext::render_bundle_encoder_draw_indexed(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
indices,
base_vertex,
instances,
);
}
pub fn draw_indirect(&mut self, indirect_buffer: &'a Buffer, indirect_offset: BufferAddress) {
DynContext::render_bundle_encoder_draw_indirect(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
&indirect_buffer.id,
indirect_buffer.data.as_ref(),
indirect_offset,
);
}
pub fn draw_indexed_indirect(
&mut self,
indirect_buffer: &'a Buffer,
indirect_offset: BufferAddress,
) {
DynContext::render_bundle_encoder_draw_indexed_indirect(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
&indirect_buffer.id,
indirect_buffer.data.as_ref(),
indirect_offset,
);
}
}
impl<'a> RenderBundleEncoder<'a> {
pub fn set_push_constants(&mut self, stages: ShaderStages, offset: u32, data: &[u8]) {
DynContext::render_bundle_encoder_set_push_constants(
&*self.parent.context,
&mut self.id,
self.data.as_mut(),
stages,
offset,
data,
);
}
}
pub struct QueueWriteBufferView<'a> {
queue: &'a Queue,
buffer: &'a Buffer,
offset: BufferAddress,
inner: Box<dyn context::QueueWriteBuffer>,
}
static_assertions::assert_impl_all!(QueueWriteBufferView: Send, Sync);
impl Deref for QueueWriteBufferView<'_> {
type Target = [u8];
fn deref(&self) -> &Self::Target {
log::warn!("Reading from a QueueWriteBufferView won't yield the contents of the buffer and may be slow.");
self.inner.slice()
}
}
impl DerefMut for QueueWriteBufferView<'_> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.inner.slice_mut()
}
}
impl<'a> AsMut<[u8]> for QueueWriteBufferView<'a> {
fn as_mut(&mut self) -> &mut [u8] {
self.inner.slice_mut()
}
}
impl<'a> Drop for QueueWriteBufferView<'a> {
fn drop(&mut self) {
DynContext::queue_write_staging_buffer(
&*self.queue.context,
&self.queue.id,
self.queue.data.as_ref(),
&self.buffer.id,
self.buffer.data.as_ref(),
self.offset,
&*self.inner,
);
}
}
impl Queue {
pub fn write_buffer(&self, buffer: &Buffer, offset: BufferAddress, data: &[u8]) {
DynContext::queue_write_buffer(
&*self.context,
&self.id,
self.data.as_ref(),
&buffer.id,
buffer.data.as_ref(),
offset,
data,
)
}
#[must_use]
pub fn write_buffer_with<'a>(
&'a self,
buffer: &'a Buffer,
offset: BufferAddress,
size: BufferSize,
) -> Option<QueueWriteBufferView<'a>> {
profiling::scope!("Queue::write_buffer_with");
DynContext::queue_validate_write_buffer(
&*self.context,
&self.id,
self.data.as_ref(),
&buffer.id,
buffer.data.as_ref(),
offset,
size,
)?;
let staging_buffer = DynContext::queue_create_staging_buffer(
&*self.context,
&self.id,
self.data.as_ref(),
size,
)?;
Some(QueueWriteBufferView {
queue: self,
buffer,
offset,
inner: staging_buffer,
})
}
pub fn write_texture(
&self,
texture: ImageCopyTexture,
data: &[u8],
data_layout: ImageDataLayout,
size: Extent3d,
) {
DynContext::queue_write_texture(
&*self.context,
&self.id,
self.data.as_ref(),
texture,
data,
data_layout,
size,
)
}
#[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
pub fn copy_external_image_to_texture(
&self,
source: &wgt::ImageCopyExternalImage,
dest: ImageCopyTextureTagged,
size: Extent3d,
) {
DynContext::queue_copy_external_image_to_texture(
&*self.context,
&self.id,
self.data.as_ref(),
source,
dest,
size,
)
}
pub fn submit<I: IntoIterator<Item = CommandBuffer>>(
&self,
command_buffers: I,
) -> SubmissionIndex {
let (raw, data) = DynContext::queue_submit(
&*self.context,
&self.id,
self.data.as_ref(),
Box::new(
command_buffers
.into_iter()
.map(|mut comb| (comb.id.take().unwrap(), comb.data.take().unwrap())),
),
);
SubmissionIndex(raw, data)
}
pub fn get_timestamp_period(&self) -> f32 {
DynContext::queue_get_timestamp_period(&*self.context, &self.id, self.data.as_ref())
}
pub fn on_submitted_work_done(&self, callback: impl FnOnce() + Send + 'static) {
DynContext::queue_on_submitted_work_done(
&*self.context,
&self.id,
self.data.as_ref(),
Box::new(callback),
)
}
}
impl SurfaceTexture {
pub fn present(mut self) {
self.presented = true;
DynContext::surface_present(
&*self.texture.context,
&self.texture.id,
self.detail.as_ref(),
);
}
}
impl Drop for SurfaceTexture {
fn drop(&mut self) {
if !self.presented && !thread::panicking() {
DynContext::surface_texture_discard(
&*self.texture.context,
&self.texture.id,
self.detail.as_ref(),
);
}
}
}
impl Surface {
pub fn get_capabilities(&self, adapter: &Adapter) -> SurfaceCapabilities {
DynContext::surface_get_capabilities(
&*self.context,
&self.id,
self.data.as_ref(),
&adapter.id,
adapter.data.as_ref(),
)
}
pub fn get_default_config(
&self,
adapter: &Adapter,
width: u32,
height: u32,
) -> Option<SurfaceConfiguration> {
let caps = self.get_capabilities(adapter);
Some(SurfaceConfiguration {
usage: wgt::TextureUsages::RENDER_ATTACHMENT,
format: *caps.formats.get(0)?,
width,
height,
present_mode: *caps.present_modes.get(0)?,
alpha_mode: wgt::CompositeAlphaMode::Auto,
view_formats: vec![],
})
}
pub fn configure(&self, device: &Device, config: &SurfaceConfiguration) {
DynContext::surface_configure(
&*self.context,
&self.id,
self.data.as_ref(),
&device.id,
device.data.as_ref(),
config,
);
let mut conf = self.config.lock();
*conf = Some(config.clone());
}
pub fn get_current_texture(&self) -> Result<SurfaceTexture, SurfaceError> {
let (texture_id, texture_data, status, detail) =
DynContext::surface_get_current_texture(&*self.context, &self.id, self.data.as_ref());
let suboptimal = match status {
SurfaceStatus::Good => false,
SurfaceStatus::Suboptimal => true,
SurfaceStatus::Timeout => return Err(SurfaceError::Timeout),
SurfaceStatus::Outdated => return Err(SurfaceError::Outdated),
SurfaceStatus::Lost => return Err(SurfaceError::Lost),
};
let guard = self.config.lock();
let config = guard
.as_ref()
.expect("This surface has not been configured yet.");
let descriptor = TextureDescriptor {
label: None,
size: Extent3d {
width: config.width,
height: config.height,
depth_or_array_layers: 1,
},
format: config.format,
usage: config.usage,
mip_level_count: 1,
sample_count: 1,
dimension: TextureDimension::D2,
view_formats: &[],
};
texture_id
.zip(texture_data)
.map(|(id, data)| SurfaceTexture {
texture: Texture {
context: Arc::clone(&self.context),
id,
data,
owned: false,
descriptor,
},
suboptimal,
presented: false,
detail,
})
.ok_or(SurfaceError::Lost)
}
#[cfg(any(
not(target_arch = "wasm32"),
target_os = "emscripten",
feature = "webgl"
))]
pub unsafe fn as_hal_mut<A: wgc::hub::HalApi, F: FnOnce(Option<&mut A::Surface>) -> R, R>(
&mut self,
hal_surface_callback: F,
) -> R {
unsafe {
self.context
.as_any()
.downcast_ref::<crate::backend::Context>()
.unwrap()
.surface_as_hal_mut::<A, F, R>(
self.data.downcast_ref().unwrap(),
hal_surface_callback,
)
}
}
}
#[cfg(feature = "expose-ids")]
#[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
#[repr(transparent)]
pub struct Id<T>(core::num::NonZeroU64, std::marker::PhantomData<*mut T>);
#[cfg(feature = "expose-ids")]
unsafe impl<T> Send for Id<T> {}
#[cfg(feature = "expose-ids")]
unsafe impl<T> Sync for Id<T> {}
#[cfg(feature = "expose-ids")]
impl<T> Clone for Id<T> {
fn clone(&self) -> Self {
*self
}
}
#[cfg(feature = "expose-ids")]
impl<T> Copy for Id<T> {}
#[cfg(feature = "expose-ids")]
impl<T> Debug for Id<T> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
f.debug_tuple("Id").field(&self.0).finish()
}
}
#[cfg(feature = "expose-ids")]
impl<T> PartialEq for Id<T> {
fn eq(&self, other: &Id<T>) -> bool {
self.0 == other.0
}
}
#[cfg(feature = "expose-ids")]
impl<T> Eq for Id<T> {}
#[cfg(feature = "expose-ids")]
impl<T> std::hash::Hash for Id<T> {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.0.hash(state)
}
}
#[cfg(feature = "expose-ids")]
impl Adapter {
#[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
pub fn global_id(&self) -> Id<Adapter> {
Id(self.id.global_id(), std::marker::PhantomData)
}
}
#[cfg(feature = "expose-ids")]
impl Device {
#[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
pub fn global_id(&self) -> Id<Device> {
Id(self.id.global_id(), std::marker::PhantomData)
}
}
#[cfg(feature = "expose-ids")]
impl Queue {
#[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
pub fn global_id(&self) -> Id<Queue> {
Id(self.id.global_id(), std::marker::PhantomData)
}
}
#[cfg(feature = "expose-ids")]
impl ShaderModule {
#[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
pub fn global_id(&self) -> Id<ShaderModule> {
Id(self.id.global_id(), std::marker::PhantomData)
}
}
#[cfg(feature = "expose-ids")]
impl BindGroupLayout {
#[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
pub fn global_id(&self) -> Id<BindGroupLayout> {
Id(self.id.global_id(), std::marker::PhantomData)
}
}
#[cfg(feature = "expose-ids")]
impl BindGroup {
#[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
pub fn global_id(&self) -> Id<BindGroup> {
Id(self.id.global_id(), std::marker::PhantomData)
}
}
#[cfg(feature = "expose-ids")]
impl TextureView {
#[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
pub fn global_id(&self) -> Id<TextureView> {
Id(self.id.global_id(), std::marker::PhantomData)
}
}
#[cfg(feature = "expose-ids")]
impl Sampler {
#[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
pub fn global_id(&self) -> Id<Sampler> {
Id(self.id.global_id(), std::marker::PhantomData)
}
}
#[cfg(feature = "expose-ids")]
impl Buffer {
#[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
pub fn global_id(&self) -> Id<Buffer> {
Id(self.id.global_id(), std::marker::PhantomData)
}
}
#[cfg(feature = "expose-ids")]
impl Texture {
#[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
pub fn global_id(&self) -> Id<Texture> {
Id(self.id.global_id(), std::marker::PhantomData)
}
}
#[cfg(feature = "expose-ids")]
impl QuerySet {
#[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
pub fn global_id(&self) -> Id<QuerySet> {
Id(self.id.global_id(), std::marker::PhantomData)
}
}
#[cfg(feature = "expose-ids")]
impl PipelineLayout {
#[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
pub fn global_id(&self) -> Id<PipelineLayout> {
Id(self.id.global_id(), std::marker::PhantomData)
}
}
#[cfg(feature = "expose-ids")]
impl RenderPipeline {
#[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
pub fn global_id(&self) -> Id<RenderPipeline> {
Id(self.id.global_id(), std::marker::PhantomData)
}
}
#[cfg(feature = "expose-ids")]
impl ComputePipeline {
#[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
pub fn global_id(&self) -> Id<ComputePipeline> {
Id(self.id.global_id(), std::marker::PhantomData)
}
}
#[cfg(feature = "expose-ids")]
impl RenderBundle {
#[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
pub fn global_id(&self) -> Id<RenderBundle> {
Id(self.id.global_id(), std::marker::PhantomData)
}
}
#[cfg(feature = "expose-ids")]
impl Surface {
#[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
pub fn global_id(&self) -> Id<Surface> {
Id(self.id.global_id(), std::marker::PhantomData)
}
}
pub trait UncapturedErrorHandler: Fn(Error) + Send + 'static {}
impl<T> UncapturedErrorHandler for T where T: Fn(Error) + Send + 'static {}
#[derive(Debug)]
pub enum Error {
OutOfMemory {
source: Box<dyn error::Error + Send + 'static>,
},
Validation {
source: Box<dyn error::Error + Send + 'static>,
description: String,
},
}
static_assertions::assert_impl_all!(Error: Send);
impl error::Error for Error {
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
match self {
Error::OutOfMemory { source } => Some(source.as_ref()),
Error::Validation { source, .. } => Some(source.as_ref()),
}
}
}
impl Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Error::OutOfMemory { .. } => f.write_str("Out of Memory"),
Error::Validation { description, .. } => f.write_str(description),
}
}
}