rooms-launcher/iced_video_player/src/pipeline.rs

464 lines
16 KiB
Rust

use crate::video::Frame;
use iced::{wgpu::{self, PipelineCompilationOptions}, widget::shader::{Primitive, Storage, Viewport}};
use std::{
collections::{btree_map::Entry, BTreeMap},
num::NonZero,
sync::{
atomic::{AtomicBool, AtomicUsize, Ordering},
Arc, Mutex,
},
};
#[repr(C)]
struct Uniforms {
rect: [f32; 4],
// because wgpu min_uniform_buffer_offset_alignment
_pad: [u8; 240],
}
struct VideoEntry {
texture_y: wgpu::Texture,
texture_uv: wgpu::Texture,
instances: wgpu::Buffer,
bg0: wgpu::BindGroup,
alive: Arc<AtomicBool>,
prepare_index: AtomicUsize,
render_index: AtomicUsize,
}
struct VideoPipeline {
pipeline: wgpu::RenderPipeline,
bg0_layout: wgpu::BindGroupLayout,
sampler: wgpu::Sampler,
videos: BTreeMap<u64, VideoEntry>,
}
impl VideoPipeline {
fn new(device: &wgpu::Device, format: wgpu::TextureFormat) -> Self {
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("iced_video_player shader"),
source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()),
});
let bg0_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("iced_video_player bind group 0 layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
multisampled: false,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
multisampled: false,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 3,
visibility: wgpu::ShaderStages::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: true,
min_binding_size: None,
},
count: None,
},
],
});
let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("iced_video_player pipeline layout"),
bind_group_layouts: &[&bg0_layout],
push_constant_ranges: &[],
});
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("iced_video_player pipeline"),
layout: Some(&layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: Some("vs_main"),
buffers: &[],
compilation_options: PipelineCompilationOptions::default()
},
primitive: wgpu::PrimitiveState::default(),
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: false,
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: Some("fs_main"),
targets: &[Some(wgpu::ColorTargetState {
format,
blend: None,
write_mask: wgpu::ColorWrites::ALL,
})],
compilation_options: PipelineCompilationOptions::default(),
}),
multiview: None,
cache: None,
});
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: Some("iced_video_player sampler"),
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: 0.0,
lod_max_clamp: 1.0,
compare: None,
anisotropy_clamp: 1,
border_color: None,
});
VideoPipeline {
pipeline,
bg0_layout,
sampler,
videos: BTreeMap::new(),
}
}
fn upload(
&mut self,
device: &wgpu::Device,
queue: &wgpu::Queue,
video_id: u64,
alive: &Arc<AtomicBool>,
(width, height): (u32, u32),
frame: &[u8],
) {
if let Entry::Vacant(entry) = self.videos.entry(video_id) {
let texture_y = device.create_texture(&wgpu::TextureDescriptor {
label: Some("iced_video_player texture"),
size: wgpu::Extent3d {
width,
height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::R8Unorm,
usage: wgpu::TextureUsages::COPY_DST | wgpu::TextureUsages::TEXTURE_BINDING,
view_formats: &[],
});
let texture_uv = device.create_texture(&wgpu::TextureDescriptor {
label: Some("iced_video_player texture"),
size: wgpu::Extent3d {
width: width / 2,
height: height / 2,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rg8Unorm,
usage: wgpu::TextureUsages::COPY_DST | wgpu::TextureUsages::TEXTURE_BINDING,
view_formats: &[],
});
let view_y = texture_y.create_view(&wgpu::TextureViewDescriptor {
label: Some("iced_video_player texture view"),
format: None,
dimension: None,
aspect: wgpu::TextureAspect::All,
base_mip_level: 0,
mip_level_count: None,
base_array_layer: 0,
array_layer_count: None,
usage: None,
});
let view_uv = texture_uv.create_view(&wgpu::TextureViewDescriptor {
label: Some("iced_video_player texture view"),
format: None,
dimension: None,
aspect: wgpu::TextureAspect::All,
base_mip_level: 0,
mip_level_count: None,
base_array_layer: 0,
array_layer_count: None,
usage: None,
});
let instances = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("iced_video_player uniform buffer"),
size: 256 * std::mem::size_of::<Uniforms>() as u64, // max 256 video players per frame
usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::UNIFORM,
mapped_at_creation: false,
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("iced_video_player bind group"),
layout: &self.bg0_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&view_y),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::TextureView(&view_uv),
},
wgpu::BindGroupEntry {
binding: 2,
resource: wgpu::BindingResource::Sampler(&self.sampler),
},
wgpu::BindGroupEntry {
binding: 3,
resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
buffer: &instances,
offset: 0,
size: Some(NonZero::new(std::mem::size_of::<Uniforms>() as _).unwrap()),
}),
},
],
});
entry.insert(VideoEntry {
texture_y,
texture_uv,
instances,
bg0: bind_group,
alive: Arc::clone(alive),
prepare_index: AtomicUsize::new(0),
render_index: AtomicUsize::new(0),
});
}
let VideoEntry {
texture_y,
texture_uv,
..
} = self.videos.get(&video_id).unwrap();
queue.write_texture(
wgpu::TexelCopyTextureInfo {
texture: texture_y,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
&frame[..(width * height) as usize],
wgpu::TexelCopyBufferLayout {
offset: 0,
bytes_per_row: Some(width),
rows_per_image: Some(height),
},
wgpu::Extent3d {
width,
height,
depth_or_array_layers: 1,
},
);
queue.write_texture(
wgpu::TexelCopyTextureInfo {
texture: texture_uv,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
&frame[(width * height) as usize..],
wgpu::TexelCopyBufferLayout {
offset: 0,
bytes_per_row: Some(width),
rows_per_image: Some(height / 2),
},
wgpu::Extent3d {
width: width / 2,
height: height / 2,
depth_or_array_layers: 1,
},
);
}
fn cleanup(&mut self) {
let ids: Vec<_> = self
.videos
.iter()
.filter_map(|(id, entry)| (!entry.alive.load(Ordering::SeqCst)).then_some(*id))
.collect();
for id in ids {
if let Some(video) = self.videos.remove(&id) {
video.texture_y.destroy();
video.texture_uv.destroy();
video.instances.destroy();
}
}
}
fn prepare(&mut self, queue: &wgpu::Queue, video_id: u64, bounds: &iced::Rectangle) {
if let Some(video) = self.videos.get_mut(&video_id) {
let uniforms = Uniforms {
rect: [
bounds.x,
bounds.y,
bounds.x + bounds.width,
bounds.y + bounds.height,
],
_pad: [0; 240],
};
queue.write_buffer(
&video.instances,
(video.prepare_index.load(Ordering::Relaxed) * std::mem::size_of::<Uniforms>())
as u64,
unsafe {
std::slice::from_raw_parts(
&uniforms as *const _ as *const u8,
std::mem::size_of::<Uniforms>(),
)
},
);
video.prepare_index.fetch_add(1, Ordering::Relaxed);
video.render_index.store(0, Ordering::Relaxed);
}
self.cleanup();
}
fn draw(
&self,
target: &wgpu::TextureView,
encoder: &mut wgpu::CommandEncoder,
clip: &iced::Rectangle<u32>,
video_id: u64,
) {
if let Some(video) = self.videos.get(&video_id) {
let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("iced_video_player render pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: target,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Load,
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: None,
timestamp_writes: None,
occlusion_query_set: None,
});
pass.set_pipeline(&self.pipeline);
pass.set_bind_group(
0,
&video.bg0,
&[
(video.render_index.load(Ordering::Relaxed) * std::mem::size_of::<Uniforms>())
as u32,
],
);
pass.set_scissor_rect(clip.x as _, clip.y as _, clip.width as _, clip.height as _);
pass.draw(0..6, 0..1);
video.prepare_index.store(0, Ordering::Relaxed);
video.render_index.fetch_add(1, Ordering::Relaxed);
}
}
}
#[derive(Debug, Clone)]
pub(crate) struct VideoPrimitive {
video_id: u64,
alive: Arc<AtomicBool>,
frame: Arc<Mutex<Frame>>,
size: (u32, u32),
upload_frame: bool,
}
impl VideoPrimitive {
pub fn new(
video_id: u64,
alive: Arc<AtomicBool>,
frame: Arc<Mutex<Frame>>,
size: (u32, u32),
upload_frame: bool,
) -> Self {
VideoPrimitive {
video_id,
alive,
frame,
size,
upload_frame,
}
}
}
impl Primitive for VideoPrimitive {
fn prepare(
&self,
device: &wgpu::Device,
queue: &wgpu::Queue,
format: wgpu::TextureFormat,
storage: &mut Storage,
bounds: &iced::Rectangle,
viewport: &Viewport,
) {
if !storage.has::<VideoPipeline>() {
storage.store(VideoPipeline::new(device, format));
}
let pipeline = storage.get_mut::<VideoPipeline>().unwrap();
if self.upload_frame {
if let Some(readable) = self.frame.lock().expect("lock frame mutex").readable() {
pipeline.upload(
device,
queue,
self.video_id,
&self.alive,
self.size,
readable.as_slice(),
);
}
}
pipeline.prepare(
queue,
self.video_id,
&(*bounds
* iced::Transformation::orthographic(
viewport.logical_size().width as _,
viewport.logical_size().height as _,
)),
);
}
fn render(
&self,
encoder: &mut wgpu::CommandEncoder,
storage: &Storage,
target: &wgpu::TextureView,
clip_bounds: &iced::Rectangle<u32>,
) {
let pipeline = storage.get::<VideoPipeline>().unwrap();
pipeline.draw(target, encoder, clip_bounds, self.video_id);
}
}