Skip to content

Commit

Permalink
Add multithreaded_compute test
Browse files Browse the repository at this point in the history
  • Loading branch information
rukai committed May 8, 2019
1 parent 455f045 commit 37fed81
Show file tree
Hide file tree
Showing 3 changed files with 201 additions and 0 deletions.
101 changes: 101 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 4 additions & 0 deletions wgpu-rs/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -25,3 +25,7 @@ vulkan = ["wgn/gfx-backend-vulkan"]
[dependencies]
wgn = { package = "wgpu-native", version = "0.2.5", path = "../wgpu-native", features = ["local", "window-winit"] }
arrayvec = "0.4"

[dev-dependencies]
rayon = "1"
rusty-fork = "0.2"
96 changes: 96 additions & 0 deletions wgpu-rs/tests/multithreaded_compute.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
#[macro_use] extern crate rusty_fork;

rusty_fork_test! {
#![rusty_fork(timeout_ms = 10000)]
#[test]
#[cfg(any(feature = "vulkan", feature = "metal", feature = "dx12"))]
fn multithreaded_compute() {
use rayon::prelude::*;

let foo: Vec<i32> = (0..1000).collect();
foo.par_iter().for_each(|i| {
println!("{}", i);
let numbers = vec!(100, 100, 100);

let size = (numbers.len() * std::mem::size_of::<u32>()) as u32;

let instance = wgpu::Instance::new();
let adapter = instance.get_adapter(&wgpu::AdapterDescriptor {
power_preference: wgpu::PowerPreference::Default,
});
let mut device = adapter.create_device(&wgpu::DeviceDescriptor {
extensions: wgpu::Extensions {
anisotropic_filtering: false,
},
});

let cs_bytes = include_bytes!("./../../examples/data/collatz.comp.spv");
let cs_module = device.create_shader_module(cs_bytes);

let staging_buffer = device
.create_buffer_mapped(
numbers.len(),
wgpu::BufferUsageFlags::MAP_READ
| wgpu::BufferUsageFlags::TRANSFER_DST
| wgpu::BufferUsageFlags::TRANSFER_SRC,
)
.fill_from_slice(&numbers);

let storage_buffer = device.create_buffer(&wgpu::BufferDescriptor {
size,
usage: wgpu::BufferUsageFlags::STORAGE
| wgpu::BufferUsageFlags::TRANSFER_DST
| wgpu::BufferUsageFlags::TRANSFER_SRC,
});

let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[wgpu::BindGroupLayoutBinding {
binding: 0,
visibility: wgpu::ShaderStageFlags::COMPUTE,
ty: wgpu::BindingType::StorageBuffer,
}],
});

let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &bind_group_layout,
bindings: &[wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::Buffer {
buffer: &storage_buffer,
range: 0..size,
},
}],
});

let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[&bind_group_layout],
});

let compute_pipeline = device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
layout: &pipeline_layout,
compute_stage: wgpu::PipelineStageDescriptor {
module: &cs_module,
entry_point: "main",
},
});

let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { todo: 0 });
encoder.copy_buffer_to_buffer(&staging_buffer, 0, &storage_buffer, 0, size);
{
let mut cpass = encoder.begin_compute_pass();
cpass.set_pipeline(&compute_pipeline);
cpass.set_bind_group(0, &bind_group, &[]);
cpass.dispatch(numbers.len() as u32, 1, 1);
}
encoder.copy_buffer_to_buffer(&storage_buffer, 0, &staging_buffer, 0, size);

device.get_queue().submit(&[encoder.finish()]);

staging_buffer.map_read_async(0, size, |result: wgpu::BufferMapAsyncResult<&[u32]>| {
if let Ok(mapping) = result {
println!("Times: {:?}", mapping.data);
}
});
});
}
}

0 comments on commit 37fed81

Please sign in to comment.