diff --git a/functional-samples/sample.webgpu/README.md b/functional-samples/sample.webgpu/README.md new file mode 100644 index 00000000..492e0a2e --- /dev/null +++ b/functional-samples/sample.webgpu/README.md @@ -0,0 +1,17 @@ +# WebGPU extension sample + +This sample demonstrates how to use the [WebGPU API](https://webgpu.dev/) to generate a red triangle using an extension service worker. + +> [!WARNING] +> Service worker support in WebGPU is available in Chrome 123 with the "Experimental Web Platform Features" flag. + +## Overview + +In this sample, clicking the action button opens a red triangle image in a new tab. + +## Running this extension + +1. Clone this repository. +2. Load this directory in Chrome as an [unpacked extension](https://developer.chrome.com/docs/extensions/mv3/getstarted/development-basics/#load-unpacked). +3. Pin the extension from the extension menu. +4. Click the extension's action icon to open the red triangle in a new tab. diff --git a/functional-samples/sample.webgpu/manifest.json b/functional-samples/sample.webgpu/manifest.json new file mode 100644 index 00000000..c075e175 --- /dev/null +++ b/functional-samples/sample.webgpu/manifest.json @@ -0,0 +1,12 @@ +{ + "manifest_version": 3, + "name": "WebGPU Extension", + "description": "Generate a red triangle with WebGPU in an extension service worker.", + "version": "1.0", + "action": { + "default_title": "Click to see a red triangle" + }, + "background": { + "service_worker": "service-worker.js" + } +} diff --git a/functional-samples/sample.webgpu/service-worker.js b/functional-samples/sample.webgpu/service-worker.js new file mode 100644 index 00000000..052b86e8 --- /dev/null +++ b/functional-samples/sample.webgpu/service-worker.js @@ -0,0 +1,44 @@ +chrome.action.onClicked.addListener(async () => { + const adapter = await navigator.gpu.requestAdapter(); + const device = await adapter.requestDevice(); + + const canvas = new OffscreenCanvas(256, 256); + const context = canvas.getContext('webgpu'); + const format = navigator.gpu.getPreferredCanvasFormat(); + context.configure({ device, format }); + + const code = ` + @vertex fn vertexMain(@builtin(vertex_index) i : u32) -> + @builtin(position) vec4f { + const pos = array(vec2f(0, 1), vec2f(-1, -1), vec2f(1, -1)); + return vec4f(pos[i], 0, 1); + } + @fragment fn fragmentMain() -> @location(0) vec4f { + return vec4f(1, 0, 0, 1); + }`; + const module = device.createShaderModule({ code }); + const pipeline = await device.createRenderPipelineAsync({ + layout: 'auto', + vertex: { module }, + fragment: { module, targets: [{ format }] } + }); + const commandEncoder = device.createCommandEncoder(); + const colorAttachments = [ + { + view: context.getCurrentTexture().createView(), + loadOp: 'clear', + storeOp: 'store' + } + ]; + const passEncoder = commandEncoder.beginRenderPass({ colorAttachments }); + passEncoder.setPipeline(pipeline); + passEncoder.draw(3); + passEncoder.end(); + device.queue.submit([commandEncoder.finish()]); + + // Open canvas as an image in a new tab. + const blob = await canvas.convertToBlob(); + const reader = new FileReader(); + reader.onload = () => chrome.tabs.create({ url: reader.result }); + reader.readAsDataURL(blob); +});