diff --git a/README.md b/README.md index 25002db..0433092 100644 --- a/README.md +++ b/README.md @@ -1,28 +1,49 @@ -WebGL Deferred Shading -====================== +# University of Pennsylvania, CIS 565: GPU Programming and Architecture. +Project 5: WebGL Deferred Shading +=============== -**University of Pennsylvania, CIS 565: GPU Programming and Architecture, Project 5** +## User resources +- **Name:** David Grosman. +- **Tested on:** Microsoft Windows 7 Professional, i7-5600U @ 2.6GHz, 256GB, GeForce 840M (Personal laptop). -* (TODO) YOUR NAME HERE -* Tested on: (TODO) **Google Chrome 222.2** on - Windows 22, i7-2222 @ 2.22GHz 22GB, GTX 222 222MB (Moore 2222 Lab) +### Demo Video/GIF -### Live Online +[![](img/Video.gif)] -[![](img/thumb.png)](http://TODO.github.io/Project5B-WebGL-Deferred-Shading) +## Project description +This Project's purpose was to gain some experience with the basics of deferred shading and WebGL. I used GLSL and WebGL to implement a deferred shading pipeline and various lighting and visual effects. +In this project, I have implemented the following features: -### Demo Video/GIF +* Implement deferred Blinn-Phong shading (diffuse + specular) for point lights + * With normal mapping +* Implemented a Bloom Shader using post-process Gaussian blur. +* Scissor test optimization: I only accumulating shading from each point light source from a rectangle around the light. +* Optimized g-buffer format - reduced the number and size of g-buffers by: + * Packing values together into vec4s + * Using 2-component normals + * Reducing the number of properties passed via g-buffer by applying the normal map in the `copy` shader pass instead of copying both geometry normals and normal maps. + +### Performance Analysis +![](img/ScissorTestPerf.JPG) -[![](img/video.png)](TODO) +From the graph above, we notice that: + 1. Scissoring is a very optimization feature and scales well as the number of lights in the scene increases. + 2. The Sphere scissoring is faster with fewer lights. This is mostly because while sphere proxies reduces the shaded area, they are overall more expensive to render than simple quads. -### (TODO: Your README) +I have also noticed that applying the normal map in the `copy` shader pass instead of copying both geometry normals and normal maps is much faster (around 150% because of reduced memory bandwidth necessary to perform the operation). Using 2-component normals however is slower because of the mathematical operations necessary to compress the normal and to retrieve their Z component from their X and Y ones. -*DO NOT* leave the README to the last minute! It is a crucial part of the -project, and we will not be able to grade you without a good README. +### Running the code +If you have Python, you should be able to run `server.py` to start a server. +Then, open [`http://localhost:10565/`](http://localhost:10565/) in your browser. -This assignment has a considerable amount of performance analysis compared -to implementation work. Complete the implementation early to leave time! +This project requires a WebGL-capable web browser with support for +`WEBGL_draw_buffers`. You can check for support on +[WebGL Report](http://webglreport.com/). +Google Chrome seems to work best on all platforms. If you have problems running +the starter code, use Chrome or Chromium, and make sure you have updated your +browser and video drivers. Firefox's shader editor may require that you disable +WebGL debugging in `framework.js` (see below). ### Credits diff --git a/glsl/copy.frag.glsl b/glsl/copy.frag.glsl index 823ebcd..b91a1d4 100644 --- a/glsl/copy.frag.glsl +++ b/glsl/copy.frag.glsl @@ -10,11 +10,24 @@ varying vec3 v_position; varying vec3 v_normal; varying vec2 v_uv; -void main() { +vec3 applyNormalMap(vec3 geomnor, vec3 normap) { + normap = normap * 2.0 - 1.0; + vec3 up = normalize(vec3(0.001, 1, 0.001)); + vec3 surftan = normalize(cross(geomnor, up)); + vec3 surfbinor = cross(geomnor, surftan); + return normap.y * surftan + normap.x * surfbinor + normap.z * geomnor; +} + +void main() +{ // TODO: copy values into gl_FragData[0], [1], etc. // You can use the GLSL texture2D function to access the textures using // the UV in v_uv. + vec3 normalMap = texture2D(u_normap, v_uv).rgb; + vec3 objNormal = applyNormalMap (v_normal, normalMap); // this gives you the idea - // gl_FragData[0] = vec4( v_position, 1.0 ); + gl_FragData[0] = vec4(v_position, 1.0); + gl_FragData[1] = vec4(objNormal, 0.0); + gl_FragData[2] = texture2D(u_colmap, v_uv); } diff --git a/glsl/deferred/ambient.frag.glsl b/glsl/deferred/ambient.frag.glsl index 1fd4647..e7ea353 100644 --- a/glsl/deferred/ambient.frag.glsl +++ b/glsl/deferred/ambient.frag.glsl @@ -10,18 +10,18 @@ uniform sampler2D u_depth; varying vec2 v_uv; +const vec3 AMBIENT_COLOR = vec3(0.36, 0.36, 0.36); + void main() { - vec4 gb0 = texture2D(u_gbufs[0], v_uv); - vec4 gb1 = texture2D(u_gbufs[1], v_uv); vec4 gb2 = texture2D(u_gbufs[2], v_uv); - vec4 gb3 = texture2D(u_gbufs[3], v_uv); float depth = texture2D(u_depth, v_uv).x; - // TODO: Extract needed properties from the g-buffers into local variables if (depth == 1.0) { gl_FragColor = vec4(0, 0, 0, 0); // set alpha to 0 return; } - gl_FragColor = vec4(0.1, 0.1, 0.1, 1); // TODO: replace this + vec3 mtrlClr = gb2.rgb; // The color map - unlit "albedo" (surface color) + + gl_FragColor = vec4(AMBIENT_COLOR * mtrlClr, 1.0); } diff --git a/glsl/deferred/blinnphong-pointlight.frag.glsl b/glsl/deferred/blinnphong-pointlight.frag.glsl index b24a54a..5e5e401 100644 --- a/glsl/deferred/blinnphong-pointlight.frag.glsl +++ b/glsl/deferred/blinnphong-pointlight.frag.glsl @@ -4,29 +4,18 @@ precision highp int; #define NUM_GBUFFERS 4 -uniform vec3 u_lightCol; +uniform vec3 u_cameraPos; uniform vec3 u_lightPos; +uniform vec3 u_lightCol; uniform float u_lightRad; + uniform sampler2D u_gbufs[NUM_GBUFFERS]; uniform sampler2D u_depth; varying vec2 v_uv; -vec3 applyNormalMap(vec3 geomnor, vec3 normap) { - normap = normap * 2.0 - 1.0; - vec3 up = normalize(vec3(0.001, 1, 0.001)); - vec3 surftan = normalize(cross(geomnor, up)); - vec3 surfbinor = cross(geomnor, surftan); - return normap.y * surftan + normap.x * surfbinor + normap.z * geomnor; -} - void main() { - vec4 gb0 = texture2D(u_gbufs[0], v_uv); - vec4 gb1 = texture2D(u_gbufs[1], v_uv); - vec4 gb2 = texture2D(u_gbufs[2], v_uv); - vec4 gb3 = texture2D(u_gbufs[3], v_uv); - float depth = texture2D(u_depth, v_uv).x; - // TODO: Extract needed properties from the g-buffers into local variables + float depth = texture2D(u_depth, v_uv).x; // If nothing was rendered to this pixel, set alpha to 0 so that the // postprocessing step can render the sky color. @@ -35,5 +24,30 @@ void main() { return; } - gl_FragColor = vec4(0, 0, 1, 1); // TODO: perform lighting calculations + vec4 gb0 = texture2D(u_gbufs[0], v_uv); + vec4 gb1 = texture2D(u_gbufs[1], v_uv); + vec4 gb2 = texture2D(u_gbufs[2], v_uv); + vec4 gb3 = texture2D(u_gbufs[3], v_uv); + + vec3 objPos = gb0.xyz; // World-space position + vec3 objClr = gb2.rgb; // The color map - unlit "albedo" (surface color) + vec3 objNormal = gb1.xyz; // The true normals as we want to light them - with the normal map applied to the geometry normals (applyNormalMap above) + + vec3 lightDir = normalize(objPos - u_lightPos); + vec3 viewDir = normalize(objPos - u_cameraPos); + vec3 lightReflDir = normalize(reflect(lightDir, objNormal)); + + // Calculate Diffuse Term: + float Idiff = max(-dot(objNormal,lightDir), 0.0); + Idiff = clamp(Idiff, 0.0, 1.0); + + // Calculate Specular Term: + float Ispec = pow( max( dot(lightReflDir,-viewDir), 0.0), 15.0 ); + Ispec = clamp(Ispec, 0.0, 1.0); + + float distLightToObj = distance(u_lightPos, objPos); + float attenuation = 1.0 - clamp( pow( distLightToObj / u_lightRad, 2.0), 0.0, 1.0 ); + + // write Total Color: + gl_FragColor = vec4( attenuation * u_lightCol * (objClr*Idiff+vec3(1,1,1)*Ispec), 1); } diff --git a/glsl/deferred/debug.frag.glsl b/glsl/deferred/debug.frag.glsl index 007466f..ab6e4fa 100644 --- a/glsl/deferred/debug.frag.glsl +++ b/glsl/deferred/debug.frag.glsl @@ -12,14 +12,6 @@ varying vec2 v_uv; const vec4 SKY_COLOR = vec4(0.66, 0.73, 1.0, 1.0); -vec3 applyNormalMap(vec3 geomnor, vec3 normap) { - normap = normap * 2.0 - 1.0; - vec3 up = normalize(vec3(0.001, 1, 0.001)); - vec3 surftan = normalize(cross(geomnor, up)); - vec3 surfbinor = cross(geomnor, surftan); - return normap.y * surftan + normap.x * surfbinor + normap.z * geomnor; -} - void main() { vec4 gb0 = texture2D(u_gbufs[0], v_uv); vec4 gb1 = texture2D(u_gbufs[1], v_uv); @@ -29,25 +21,25 @@ void main() { // TODO: Extract needed properties from the g-buffers into local variables // These definitions are suggested for starting out, but you will probably want to change them. vec3 pos = gb0.xyz; // World-space position - vec3 geomnor = gb1.xyz; // Normals of the geometry as defined, without normal mapping + vec3 nor = gb1.xyz; // The true normals as we want to light them - with the normal map applied to the geometry normals (applyNormalMap above) vec3 colmap = gb2.rgb; // The color map - unlit "albedo" (surface color) vec3 normap = gb3.xyz; // The raw normal map (normals relative to the surface they're on) - vec3 nor = applyNormalMap (geomnor, normap); // The true normals as we want to light them - with the normal map applied to the geometry normals (applyNormalMap above) + // TODO: uncomment if (u_debug == 0) { gl_FragColor = vec4(vec3(depth), 1.0); } else if (u_debug == 1) { - // gl_FragColor = vec4(abs(pos) * 0.1, 1.0); - } else if (u_debug == 2) { - // gl_FragColor = vec4(abs(geomnor), 1.0); - } else if (u_debug == 3) { - // gl_FragColor = vec4(colmap, 1.0); + gl_FragColor = vec4(abs(pos) * 0.1, 1.0); + } /*else if (u_debug == 2) { + gl_FragColor = vec4(abs(geomnor), 1.0); + } */else if (u_debug == 3) { + gl_FragColor = vec4(colmap, 1.0); } else if (u_debug == 4) { - // gl_FragColor = vec4(normap, 1.0); + gl_FragColor = vec4(normap, 1.0); } else if (u_debug == 5) { - // gl_FragColor = vec4(abs(nor), 1.0); + gl_FragColor = vec4(abs(nor), 1.0); } else { - gl_FragColor = vec4(1, 0, 1, 1); + gl_FragColor = vec4(0, 0, 1, 1); } } diff --git a/glsl/post/brightnessExtractor.frag.glsl b/glsl/post/brightnessExtractor.frag.glsl new file mode 100644 index 0000000..620566a --- /dev/null +++ b/glsl/post/brightnessExtractor.frag.glsl @@ -0,0 +1,21 @@ +#version 100 +precision highp float; +precision highp int; + +uniform sampler2D u_color; + +varying vec2 v_uv; + +const vec4 SKY_COLOR = vec4(0.01, 0.14, 0.42, 1.0); + +void main() { + vec4 color = texture2D(u_color, v_uv); + + float brightness = dot(color.rgb, vec3(0.2126, 0.7152, 0.0722)); + if(brightness > 1.0) { + gl_FragColor = color; + } + else{ + gl_FragColor = vec4(0, 0, 0, 1); + } +} diff --git a/glsl/post/one.frag.glsl b/glsl/post/final.frag.glsl similarity index 100% rename from glsl/post/one.frag.glsl rename to glsl/post/final.frag.glsl diff --git a/glsl/post/gaussBlur.frag.glsl b/glsl/post/gaussBlur.frag.glsl new file mode 100644 index 0000000..06c12df --- /dev/null +++ b/glsl/post/gaussBlur.frag.glsl @@ -0,0 +1,37 @@ +#version 100 +precision highp float; +precision highp int; + +uniform sampler2D u_color; +uniform bool u_horizontal; +uniform vec2 u_texelSize; +uniform float u_kernelWeights[5]; + +varying vec2 v_uv; + +void main() +{ + vec2 tex_offset = 1.0 / u_texelSize; // gets size of single texel + vec3 result = texture2D(u_color, v_uv).rgb; // current fragment's contribution + if(u_horizontal) + { + for(int i = 1; i < 5; ++i) + { + float fI = float(i); + + result += texture2D(u_color, v_uv + vec2(tex_offset.x * fI, 0.0)).rgb * u_kernelWeights[i]; + result += texture2D(u_color, v_uv - vec2(tex_offset.x * fI, 0.0)).rgb * u_kernelWeights[i]; + } + } + else + { + for(int i = 1; i < 5; ++i) + { + float fI = float(i); + + result += texture2D(u_color, v_uv + vec2(0.0, tex_offset.y * fI)).rgb * u_kernelWeights[i]; + result += texture2D(u_color, v_uv - vec2(0.0, tex_offset.y * fI)).rgb * u_kernelWeights[i]; + } + } + gl_FragColor = vec4(result, 1.0); +} diff --git a/glsl/red.frag.glsl b/glsl/red.frag.glsl index f8ef1ec..e170381 100644 --- a/glsl/red.frag.glsl +++ b/glsl/red.frag.glsl @@ -3,5 +3,5 @@ precision highp float; precision highp int; void main() { - gl_FragColor = vec4(1, 0, 0, 1); + gl_FragColor = vec4(1, 0, 0, 0.001); } diff --git a/glsl/sphere.vert.glsl b/glsl/sphere.vert.glsl new file mode 100644 index 0000000..8a77dab --- /dev/null +++ b/glsl/sphere.vert.glsl @@ -0,0 +1,13 @@ +#version 100 +precision highp float; +precision highp int; + +uniform mat4 u_cameraMtx; +uniform mat4 u_worldMtx; + +attribute vec3 a_position; +varying vec2 v_uv; + +void main() { + gl_Position = u_cameraMtx * u_worldMtx * vec4(a_position, 1.0); +} diff --git a/img/PerfSheet.xlsx b/img/PerfSheet.xlsx new file mode 100644 index 0000000..413c553 Binary files /dev/null and b/img/PerfSheet.xlsx differ diff --git a/img/ScissorTestPerf.JPG b/img/ScissorTestPerf.JPG new file mode 100644 index 0000000..095dc19 Binary files /dev/null and b/img/ScissorTestPerf.JPG differ diff --git a/img/Video.gif b/img/Video.gif new file mode 100644 index 0000000..251d943 Binary files /dev/null and b/img/Video.gif differ diff --git a/js/deferredRender.js b/js/deferredRender.js index bb3edd4..3a2ea78 100644 --- a/js/deferredRender.js +++ b/js/deferredRender.js @@ -5,12 +5,12 @@ R.deferredRender = function(state) { if (!aborted && ( !R.progCopy || - !R.progRed || + !R.progRedQuad || + !R.progRedSphere || !R.progClear || !R.prog_Ambient || !R.prog_BlinnPhong_PointLight || - !R.prog_Debug || - !R.progPost1)) { + !R.prog_Debug)) { console.log('waiting for programs to load...'); return; } @@ -24,30 +24,20 @@ } // Execute deferred shading pipeline - - // CHECKITOUT: START HERE! You can even uncomment this: - //debugger; - - { // TODO: this block should be removed after testing renderFullScreenQuad - gl.bindFramebuffer(gl.FRAMEBUFFER, null); - // TODO: Implement/test renderFullScreenQuad first - renderFullScreenQuad(R.progRed); - return; - } - R.pass_copy.render(state); - if (cfg && cfg.debugView >= 0) { + if (cfg && cfg.debugView >= 0 && cfg.debugView != 6) { // Do a debug render instead of a regular render // Don't do any post-processing in debug mode R.pass_debug.render(state); } else { + // * Deferred pass and postprocessing pass(es) - // TODO: uncomment these - // R.pass_deferred.render(state); - // R.pass_post1.render(state); - - // OPTIONAL TODO: call more postprocessing passes, if any + var colorTex = R.pass_deferred.render(state); + if (cfg && cfg.enableBloom == true) { + colorTex = R.pass_bloom.render(R.pass_deferred.colorTex); + } + R.pass_final.render(colorTex); } }; @@ -56,34 +46,27 @@ */ R.pass_copy.render = function(state) { // * Bind the framebuffer R.pass_copy.fbo - // TODO: uncomment - // gl.bindFramebuffer(gl.FRAMEBUFFER,R.pass_copy.fbo); - + gl.bindFramebuffer(gl.FRAMEBUFFER,R.pass_copy.fbo); // * Clear screen using R.progClear - // TODO: uncomment - // renderFullScreenQuad(R.progClear); + renderFullScreenQuad(R.progClear); // * Clear depth buffer to value 1.0 using gl.clearDepth and gl.clear - // TODO: uncomment - // gl.clearDepth(1.0); - // gl.clear(gl.DEPTH_BUFFER_BIT); + gl.clearDepth(1.0); + gl.clear(gl.DEPTH_BUFFER_BIT); // * "Use" the program R.progCopy.prog - // TODO: uncomment - // gl.useProgram(R.progCopy.prog); + gl.useProgram(R.progCopy.prog); // TODO: Go write code in glsl/copy.frag.glsl var m = state.cameraMat.elements; // * Upload the camera matrix m to the uniform R.progCopy.u_cameraMat // using gl.uniformMatrix4fv - // TODO: uncomment - // gl.uniformMatrix4fv(R.progCopy.u_cameraMat, false, m); + gl.uniformMatrix4fv(R.progCopy.u_cameraMat, false, m); // * Draw the scene - // TODO: uncomment - // drawScene(state); + drawScene(state); }; var drawScene = function(state) { @@ -101,17 +84,15 @@ R.pass_debug.render = function(state) { // * Unbind any framebuffer, so we can write to the screen // TODO: uncomment - // gl.bindFramebuffer(gl.FRAMEBUFFER, null); + gl.bindFramebuffer(gl.FRAMEBUFFER, null); // * Bind/setup the debug "lighting" pass // * Tell shader which debug view to use - // TODO: uncomment - // bindTexturesForLightPass(R.prog_Debug); - // gl.uniform1i(R.prog_Debug.u_debug, cfg.debugView); + bindTexturesForLightPass(R.prog_Debug); + gl.uniform1i(R.prog_Debug.u_debug, cfg.debugView); // * Render a fullscreen quad to perform shading on - // TODO: uncomment - // renderFullScreenQuad(R.prog_Debug); + renderFullScreenQuad(R.prog_Debug); }; /** @@ -125,17 +106,16 @@ gl.clearColor(0.0, 0.0, 0.0, 0.0); gl.clearDepth(1.0); gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT); - + // * _ADD_ together the result of each lighting pass // Enable blending and use gl.blendFunc to blend with: // color = 1 * src_color + 1 * dst_color // Here is a wonderful demo of showing how blend function works: // http://mrdoob.github.io/webgl-blendfunctions/blendfunc.html - // TODO: uncomment - // gl.enable(gl.BLEND); - // gl.blendEquation( gl.FUNC_ADD ); - // gl.blendFunc(gl.ONE,gl.ONE); + gl.enable(gl.BLEND); + gl.blendEquation( gl.FUNC_ADD ); + gl.blendFunc(gl.ONE,gl.ONE); // * Bind/setup the ambient pass, and render using fullscreen quad bindTexturesForLightPass(R.prog_Ambient); @@ -144,22 +124,144 @@ // * Bind/setup the Blinn-Phong pass, and render using fullscreen quad bindTexturesForLightPass(R.prog_BlinnPhong_PointLight); - // TODO: add a loop here, over the values in R.lights, which sets the - // uniforms R.prog_BlinnPhong_PointLight.u_lightPos/Col/Rad etc., - // then does renderFullScreenQuad(R.prog_BlinnPhong_PointLight). + var shaderProg = R.prog_BlinnPhong_PointLight; + if (cfg && cfg.debugView == 6) { + shaderProg = R.progRedQuad; + if( cfg.scissorMode == MyScissorEnum.ScissorSphere ) + shaderProg = R.progRedSphere; + } + + if (cfg.scissorMode == MyScissorEnum.ScissorQuad) { + gl.enable(gl.SCISSOR_TEST); + } + + var camPos = vec3.create(); + camPos =[state.cameraPos.x, state.cameraPos.y, state.cameraPos.z]; + gl.uniform3fv(R.prog_BlinnPhong_PointLight.u_cameraPos, camPos); + + for (var i = 0; i < R.lights.length; i++) { + // Set the uniforms of R.prog_BlinnPhong_PointLight, + + gl.uniform3fv(shaderProg.u_lightPos, R.lights[i].pos); + gl.uniform3fv(shaderProg.u_lightCol, R.lights[i].col); + gl.uniform1f(shaderProg.u_lightRad, R.lights[i].rad); + if (cfg.scissorMode == MyScissorEnum.ScissorQuad) { + var sc = getScissorForLight(state.viewMat, state.projMat, R.lights[i]); + if (sc) { // sc is an array [xmin, ymin, width, height] + gl.scissor(sc[0], sc[1], sc[2], sc[3]); + + } + else { // getScissorForLight returns null if the scissor is off the screen. + continue; + } + } + // then does renderFullScreenQuad(R.prog_BlinnPhong_PointLight). + if (cfg.scissorMode == MyScissorEnum.ScissorSphere) { + renderSphere(state, shaderProg, R.lights[i], R.sphereModel); + } + else{ + renderFullScreenQuad(shaderProg); + } + } + + if (cfg.scissorMode == MyScissorEnum.ScissorQuad) { + gl.disable(gl.SCISSOR_TEST); + } + + // Disable blending so that it doesn't affect other code + gl.disable(gl.BLEND); + return R.pass_deferred.colorTex; + }; + + /** + * 'Post-Bloom' pass: Perform (first) pass of post-processing + */ + R.pass_bloom.render = function(colorTex) { + + // * Disable the framebuffer depth + gl.disable(gl.DEPTH_TEST); + + // * Bind the postprocessing shader program + gl.useProgram(R.prog_brightner.prog); + + // * Bind brightFbo. + gl.bindFramebuffer(gl.FRAMEBUFFER, R.pass_bloom.bloomFbo[0]); + + // * Bind the deferred pass's color output as a texture input + // Set gl.TEXTURE0 as the gl.activeTexture unit + gl.activeTexture(gl.TEXTURE0); + + // Bind the TEXTURE_2D, R.pass_deferred.colorTex to the active texture unit + gl.bindTexture(gl.TEXTURE_2D, colorTex); + + // Configure the R.progPost1.u_color uniform to point at texture unit 0 + gl.uniform1i(R.prog_brightner.u_color, 0); - // TODO: In the lighting loop, use the scissor test optimization - // Enable gl.SCISSOR_TEST, render all lights, then disable it. - // - // getScissorForLight returns null if the scissor is off the screen. - // Otherwise, it returns an array [xmin, ymin, width, height]. - // - // var sc = getScissorForLight(state.viewMat, state.projMat, light); + // * Render a fullscreen quad to perform shading on + renderFullScreenQuad(R.prog_brightner); + // * Bind the postprocessing shader program + gl.useProgram(R.prog_gaussBlur.prog); + + gl.uniform1i(R.prog_gaussBlur.u_color, 0); + gl.uniform1fv(R.prog_gaussBlur.u_kernel, + [1.0, 0.1945946, 0.1216216, 0.054054, 0.016216]); + gl.uniform2f(R.prog_gaussBlur.u_texSize, width, height); + + var horizontalBlur = 1; + for (var i = 0; i < 2 * cfg.bloomIterations; i++) { + + // * Bind blurFbo. + gl.bindFramebuffer(gl.FRAMEBUFFER, R.pass_bloom.bloomFbo[horizontalBlur]); + gl.activeTexture(gl.TEXTURE0); + gl.bindTexture(gl.TEXTURE_2D, R.pass_bloom.bloomTex[1-horizontalBlur]); + gl.uniform1i(R.prog_gaussBlur.horizontal, horizontalBlur); + + renderFullScreenQuad(R.prog_gaussBlur); + horizontalBlur = 1 - horizontalBlur; + } + + gl.bindFramebuffer(gl.FRAMEBUFFER, R.pass_bloom.bloomFbo[1-horizontalBlur]); + // Enable blending and use gl.blendFunc to blend with: + gl.enable(gl.BLEND); + gl.blendEquation( gl.FUNC_ADD ); + gl.blendFunc(gl.ONE,gl.ONE); + + gl.useProgram(R.prog_final.prog); + + gl.bindTexture(gl.TEXTURE_2D, colorTex); + gl.activeTexture(gl.TEXTURE0); + + gl.uniform1i(R.prog_final.u_color, 0); + renderFullScreenQuad(R.prog_final); + // Disable blending so that it doesn't affect other code gl.disable(gl.BLEND); + + gl.enable(gl.DEPTH_TEST); + return R.pass_bloom.bloomTex[0]; }; + R.pass_final.render = function(colorTex) { + + // * Disable the framebuffer depth + gl.disable(gl.DEPTH_TEST); + + gl.useProgram(R.prog_final.prog); + + // * Unbind any framebuffer, so we can write to the screen + gl.bindFramebuffer(gl.FRAMEBUFFER, null); + + gl.activeTexture(gl.TEXTURE0); + gl.bindTexture(gl.TEXTURE_2D, colorTex); + gl.uniform1i(R.prog_final.u_color, 0); + + // * Render a fullscreen quad to perform shading on + renderFullScreenQuad(R.prog_final); + + gl.enable(gl.DEPTH_TEST); + } + var bindTexturesForLightPass = function(prog) { gl.useProgram(prog.prog); @@ -175,34 +277,45 @@ gl.uniform1i(prog.u_depth, R.NUM_GBUFFERS); }; - /** - * 'post1' pass: Perform (first) pass of post-processing - */ - R.pass_post1.render = function(state) { - // * Unbind any existing framebuffer (if there are no more passes) - gl.bindFramebuffer(gl.FRAMEBUFFER, null); + var renderSphere = function(state, prog, light, model) { - // * Clear the framebuffer depth to 1.0 - gl.clearDepth(1.0); - gl.clear(gl.DEPTH_BUFFER_BIT); + var worldMtx = new THREE.Matrix4(); + { + var scaleMtx = new THREE.Matrix4(); + var transMtx = new THREE.Matrix4(); - // * Bind the postprocessing shader program - gl.useProgram(R.progPost1.prog); + scaleMtx.makeScale(light.rad, light.rad, light.rad); + transMtx.makeTranslation(light.pos[0], light.pos[1], light.pos[2]); + + worldMtx.multiplyMatrices(transMtx, scaleMtx); + } + + // Bind the program to use to draw the quad + gl.useProgram(prog.prog); - // * Bind the deferred pass's color output as a texture input - // Set gl.TEXTURE0 as the gl.activeTexture unit - // TODO: uncomment - // gl.activeTexture(gl.TEXTURE0); + // * Upload the camera matrix m to the uniform R.progCopy.u_cameraMat + // using gl.uniformMatrix4fv + gl.uniformMatrix4fv(prog.u_cameraMtx, false, state.cameraMat.elements); + gl.uniformMatrix4fv(prog.u_worldMtx,false, worldMtx.elements); - // Bind the TEXTURE_2D, R.pass_deferred.colorTex to the active texture unit - // TODO: uncomment - // gl.bindTexture(gl.TEXTURE_2D, R.pass_deferred.colorTex); + // Bind the VBO as the gl.ARRAY_BUFFER + gl.bindBuffer(gl.ARRAY_BUFFER, model.position); - // Configure the R.progPost1.u_color uniform to point at texture unit 0 - gl.uniform1i(R.progPost1.u_color, 0); + // Enable the bound buffer as the vertex attrib array for + // prog.a_position, using gl.enableVertexAttribArray + gl.enableVertexAttribArray(prog.a_position); - // * Render a fullscreen quad to perform shading on - renderFullScreenQuad(R.progPost1); + // Use gl.vertexAttribPointer to tell WebGL the type/layout for + // prog.a_position's access pattern. + gl.vertexAttribPointer(prog.a_position, 3, gl.FLOAT, gl.FALSE, 0, 0); + + gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, model.idx); + + // Use gl.drawArrays (or gl.drawElements) to draw your quad. + gl.drawElements(gl.TRIANGLES, model.elemCount, gl.UNSIGNED_INT, 0); + + // Unbind the array buffer. + gl.bindBuffer(gl.ARRAY_BUFFER, null); }; var renderFullScreenQuad = (function() { @@ -230,12 +343,12 @@ // Bind the VBO as the gl.ARRAY_BUFFER // TODO: uncomment - // gl.bindBuffer(gl.ARRAY_BUFFER,vbo); + gl.bindBuffer(gl.ARRAY_BUFFER,vbo); // Upload the positions array to the currently-bound array buffer // using gl.bufferData in static draw mode. // TODO: uncomment - // gl.bufferData(gl.ARRAY_BUFFER,positions,gl.STATIC_DRAW); + gl.bufferData(gl.ARRAY_BUFFER,positions,gl.STATIC_DRAW); }; return function(prog) { @@ -249,21 +362,21 @@ // Bind the VBO as the gl.ARRAY_BUFFER // TODO: uncomment - // gl.bindBuffer(gl.ARRAY_BUFFER, vbo); + gl.bindBuffer(gl.ARRAY_BUFFER, vbo); // Enable the bound buffer as the vertex attrib array for // prog.a_position, using gl.enableVertexAttribArray // TODO: uncomment - // gl.enableVertexAttribArray(prog.a_position); + gl.enableVertexAttribArray(prog.a_position); // Use gl.vertexAttribPointer to tell WebGL the type/layout for // prog.a_position's access pattern. // TODO: uncomment - // gl.vertexAttribPointer(prog.a_position, 3, gl.FLOAT, gl.FALSE, 0, 0); + gl.vertexAttribPointer(prog.a_position, 3, gl.FLOAT, gl.FALSE, 0, 0); // Use gl.drawArrays (or gl.drawElements) to draw your quad. // TODO: uncomment - // gl.drawArrays(gl.TRIANGLE_STRIP, 0, 4); + gl.drawArrays(gl.TRIANGLE_STRIP, 0, 4); // Unbind the array buffer. gl.bindBuffer(gl.ARRAY_BUFFER, null); diff --git a/js/deferredSetup.js b/js/deferredSetup.js index 65136e0..d90f1dd 100644 --- a/js/deferredSetup.js +++ b/js/deferredSetup.js @@ -5,7 +5,8 @@ R.pass_copy = {}; R.pass_debug = {}; R.pass_deferred = {}; - R.pass_post1 = {}; + R.pass_bloom = {}; + R.pass_final = {}; R.lights = []; R.NUM_GBUFFERS = 4; @@ -18,6 +19,7 @@ loadAllShaderPrograms(); R.pass_copy.setup(); R.pass_deferred.setup(); + R.pass_bloom.setup(); }; // TODO: Edit if you want to change the light initial positions @@ -25,7 +27,7 @@ R.light_max = [14, 18, 6]; R.light_dt = -0.03; R.LIGHT_RADIUS = 4.0; - R.NUM_LIGHTS = 20; // TODO: test with MORE lights! + R.NUM_LIGHTS = 10; // TODO: test with MORE lights! var setupLights = function() { Math.seedrandom(0); @@ -98,6 +100,27 @@ gl.bindFramebuffer(gl.FRAMEBUFFER, null); }; + /** + * Create/configure framebuffer of post bloom stage. + */ + R.pass_bloom.setup = function() { + + R.pass_bloom.bloomFbo = [gl.createFramebuffer(), gl.createFramebuffer()]; + R.pass_bloom.bloomTex = [ + createAndBindColorTargetTexture( + R.pass_bloom.bloomFbo[0], gl_draw_buffers.COLOR_ATTACHMENT0_WEBGL), + createAndBindColorTargetTexture( + R.pass_bloom.bloomFbo[1], gl_draw_buffers.COLOR_ATTACHMENT0_WEBGL)]; + abortIfFramebufferIncomplete(R.pass_bloom.bloomFbo[0]); + abortIfFramebufferIncomplete(R.pass_bloom.bloomFbo[1]); + + // * Tell the WEBGL_draw_buffers extension which FBO attachments are + // being used. (This extension allows for multiple render targets.) + gl_draw_buffers.drawBuffersWEBGL([gl_draw_buffers.COLOR_ATTACHMENT0_WEBGL]); + + gl.bindFramebuffer(gl.FRAMEBUFFER, null); + }; + /** * Loads all of the shader programs used in the pipeline. */ @@ -119,10 +142,22 @@ R.progCopy = p; }); + loadShaderProgram(gl, 'glsl/sphere.vert.glsl', 'glsl/red.frag.glsl', + function(prog) { + // Create an object to hold info about this shader program + var p = { prog: prog }; + p.u_cameraMtx = gl.getUniformLocation(prog, 'u_cameraMtx'); + p.u_worldMtx = gl.getUniformLocation(prog, 'u_worldMtx'); + + R.progRedSphere = p; + }); + loadShaderProgram(gl, 'glsl/quad.vert.glsl', 'glsl/red.frag.glsl', function(prog) { // Create an object to hold info about this shader program - R.progRed = { prog: prog }; + var p = { prog: prog }; + + R.progRedQuad = p; }); loadShaderProgram(gl, 'glsl/quad.vert.glsl', 'glsl/clear.frag.glsl', @@ -138,9 +173,10 @@ loadDeferredProgram('blinnphong-pointlight', function(p) { // Save the object into this variable for access later - p.u_lightPos = gl.getUniformLocation(p.prog, 'u_lightPos'); - p.u_lightCol = gl.getUniformLocation(p.prog, 'u_lightCol'); - p.u_lightRad = gl.getUniformLocation(p.prog, 'u_lightRad'); + p.u_cameraPos = gl.getUniformLocation(p.prog, 'u_cameraPos'); + p.u_lightPos = gl.getUniformLocation(p.prog, 'u_lightPos'); + p.u_lightCol = gl.getUniformLocation(p.prog, 'u_lightCol'); + p.u_lightRad = gl.getUniformLocation(p.prog, 'u_lightRad'); R.prog_BlinnPhong_PointLight = p; }); @@ -150,10 +186,25 @@ R.prog_Debug = p; }); - loadPostProgram('one', function(p) { + loadPostProgram('brightnessExtractor', function(p) { + p.u_color = gl.getUniformLocation(p.prog, 'u_color'); + // Save the object into this variable for access later + R.prog_brightner = p; + }); + + loadPostProgram('gaussBlur', function(p) { + p.u_color = gl.getUniformLocation(p.prog, 'u_color'); + p.u_horizontal = gl.getUniformLocation(p.prog, 'u_horizontal'); + p.u_texelSize = gl.getUniformLocation(p.prog, 'u_texelSize'); + p.u_kernelWeights = gl.getUniformLocation(p.prog, 'u_kernelWeights'); + // Save the object into this variable for access later + R.prog_gaussBlur = p; + }); + + loadPostProgram('final', function(p) { p.u_color = gl.getUniformLocation(p.prog, 'u_color'); // Save the object into this variable for access later - R.progPost1 = p; + R.prog_final = p; }); // TODO: If you add more passes, load and set up their shader programs. diff --git a/js/framework.js b/js/framework.js index 435bd43..968c0b7 100644 --- a/js/framework.js +++ b/js/framework.js @@ -115,7 +115,7 @@ var width, height; controls.panSpeed = 2.0; // Add sphere geometry to the scene so it gets initialized - var sph = new THREE.Mesh(new THREE.SphereGeometry(1, 8, 6)); + var sph = new THREE.Mesh(new THREE.SphereGeometry(1, 16, 12)); scene.add(sph); renderer.render(scene, camera); uploadModel(sph, function(m) { @@ -123,7 +123,7 @@ var width, height; }); // var glTFURL = 'models/glTF-duck/duck.gltf'; - var glTFURL = 'models/glTF-sponza-kai-fix/sponza.gltf'; + var glTFURL = 'models/gltf-sponza-kai-fix/sponza.gltf'; var glTFLoader = new MinimalGLTFLoader.glTFLoader(gl); glTFLoader.loadGLTF(glTFURL, function (glTF) { var curScene = glTF.scenes[glTF.defaultScene]; @@ -249,6 +249,8 @@ var width, height; idx: indicesBuffer, + interleaved: true, + attributes: vertexBuffer, posInfo: {size: posInfo.size, type: posInfo.type, stride: posInfo.stride, offset: posInfo.offset}, norInfo: {size: norInfo.size, type: norInfo.type, stride: norInfo.stride, offset: norInfo.offset}, @@ -276,7 +278,7 @@ var width, height; gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT); R.deferredSetup(); - + //renderer.render(R.sphereModel, camera); requestAnimationFrame(update); }; @@ -325,9 +327,21 @@ var width, height; gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, gidx); gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, idx, gl.STATIC_DRAW); + // var m = { + // idx: gidx, + // elemCount: idx.length, + // position: gposition, + // normal: gnormal, + // uv: guv + // }; + + // adapt to new readyModelForDraw and drawReadyModel (glTF version) var m = { idx: gidx, elemCount: idx.length, + + interleaved: false, + position: gposition, normal: gnormal, uv: guv @@ -340,4 +354,4 @@ var width, height; }; window.handle_load.push(init); -})(); +})(); \ No newline at end of file diff --git a/js/ui.js b/js/ui.js index abd6119..1e9ab65 100644 --- a/js/ui.js +++ b/js/ui.js @@ -1,13 +1,20 @@ var cfg; +MyScissorEnum = { + None : 0, + ScissorQuad : 1, + ScissorSphere : 2, +}; + (function() { 'use strict'; var Cfg = function() { // TODO: Define config fields and defaults here - this.debugView = -1; - this.debugScissor = false; - this.enableEffect0 = false; + this.debugView = 0; + this.scissorMode = MyScissorEnum.None; + this.enableBloom = false; + this.bloomIterations = 5; }; var init = function() { @@ -22,13 +29,20 @@ var cfg; '2 Geometry normal': 2, '3 Color map': 3, '4 Normal map': 4, - '5 Surface normal': 5 + '5 Surface normal': 5, + '6 Scissor map': 6, + }); + + gui.add(cfg, 'scissorMode', { + '0 None': 0, + '1 Quad': 1, + '2 Sphere': 2, }); - gui.add(cfg, 'debugScissor'); - var eff0 = gui.addFolder('EFFECT NAME HERE'); - eff0.open(); - eff0.add(cfg, 'enableEffect0'); + var effect_bloom = gui.addFolder('Bloom'); + effect_bloom.open(); + effect_bloom.add(cfg, 'enableBloom'); + effect_bloom.add(cfg, 'bloomIterations', 0, 25); // TODO: add more effects toggles and parameters here }; diff --git a/js/util.js b/js/util.js index 8f43d38..c218f10 100644 --- a/js/util.js +++ b/js/util.js @@ -92,16 +92,35 @@ window.readyModelForDraw = function(prog, m) { gl.uniform1i(prog.u_normap, 1); } - gl.bindBuffer(gl.ARRAY_BUFFER, m.attributes); + if (m.interleaved) { + gl.bindBuffer(gl.ARRAY_BUFFER, m.attributes); - gl.enableVertexAttribArray(prog.a_position); - gl.vertexAttribPointer(prog.a_position, m.posInfo.size, m.posInfo.type, false, m.posInfo.stride, m.posInfo.offset); - - gl.enableVertexAttribArray(prog.a_normal); - gl.vertexAttribPointer(prog.a_normal, m.norInfo.size, m.norInfo.type, false, m.norInfo.stride, m.norInfo.offset); + gl.enableVertexAttribArray(prog.a_position); + gl.vertexAttribPointer(prog.a_position, m.posInfo.size, m.posInfo.type, false, m.posInfo.stride, m.posInfo.offset); + + gl.enableVertexAttribArray(prog.a_normal); + gl.vertexAttribPointer(prog.a_normal, m.norInfo.size, m.norInfo.type, false, m.norInfo.stride, m.norInfo.offset); + + gl.enableVertexAttribArray(prog.a_uv); + gl.vertexAttribPointer(prog.a_uv, m.uvInfo.size, m.uvInfo.type, false, m.uvInfo.stride, m.uvInfo.offset); + } else { + gl.enableVertexAttribArray(prog.a_position); + gl.bindBuffer(gl.ARRAY_BUFFER, m.position); + gl.vertexAttribPointer(prog.a_position, 3, gl.FLOAT, false, 0, 0); + + if (prog.a_normal >= 0 && m.normal) { + gl.enableVertexAttribArray(prog.a_normal); + gl.bindBuffer(gl.ARRAY_BUFFER, m.normal); + gl.vertexAttribPointer(prog.a_normal, 3, gl.FLOAT, false, 0, 0); + } - gl.enableVertexAttribArray(prog.a_uv); - gl.vertexAttribPointer(prog.a_uv, m.uvInfo.size, m.uvInfo.type, false, m.uvInfo.stride, m.uvInfo.offset); + if (prog.a_uv >= 0 && m.uv) { + gl.enableVertexAttribArray(prog.a_uv); + gl.bindBuffer(gl.ARRAY_BUFFER, m.uv); + gl.vertexAttribPointer(prog.a_uv, 2, gl.FLOAT, false, 0, 0); + } + } + gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, m.idx); }; @@ -110,40 +129,55 @@ window.drawReadyModel = function(m) { // TODO for TA in future: matrix transform for multiple hierachy gltf models // reference: https://github.com/CIS565-Fall-2016/Project5A-WebGL-Forward-Plus-Shading-with-glTF/blob/master/js/forwardPlusRenderer/forwardPlusRenderer.js#L201 - gl.drawElements(m.gltf.mode, m.gltf.indices.length, m.gltf.indicesComponentType, 0); + if (m.gltf) { + gl.drawElements(m.gltf.mode, m.gltf.indices.length, m.gltf.indicesComponentType, 0); + } else { + gl.drawElements(gl.TRIANGLES, m.elemCount, gl.UNSIGNED_INT, 0); + } }; window.getScissorForLight = (function() { // Pre-allocate for performance - avoids additional allocation var a = new THREE.Vector4(0, 0, 0, 0); var b = new THREE.Vector4(0, 0, 0, 0); - var minpt = new THREE.Vector2(0, 0); - var maxpt = new THREE.Vector2(0, 0); + const neg_one = new THREE.Vector2(-1, -1); + const pos_one = new THREE.Vector2(1, 1); var ret = [0, 0, 0, 0]; - return function(view, proj, l) { - // front bottom-left corner of sphere's bounding cube - a.fromArray(l.pos); - a.w = 1; - a.applyMatrix4(view); - a.x -= l.rad; - a.y -= l.rad; - a.z += l.rad; - a.applyMatrix4(proj); - a.divideScalar(a.w); + const n_cube_vertices = 8; + var offsets = Array(n_cube_vertices); + var pos = new THREE.Vector4(0, 0, 0, 0); + return function(view, proj, light) { // front bottom-left corner of sphere's bounding cube - b.fromArray(l.pos); - b.w = 1; - b.applyMatrix4(view); - b.x += l.rad; - b.y += l.rad; - b.z += l.rad; - b.applyMatrix4(proj); - b.divideScalar(b.w); - - minpt.set(Math.max(-1, a.x), Math.max(-1, a.y)); - maxpt.set(Math.min( 1, b.x), Math.min( 1, b.y)); + + // The following results in: + // [ -1, -1, -1, 0 ] <== back upper left corner of cube + // [ -1, -1, 1, 0 ] <== front upper left corner ... + // [ -1, 1, -1, 0 ] + // ... + var index = 0; + for (var i = -1; i <= 1; i += 2) { + for (var j = -1; j <= 1; j += 2) { + for (var k = -1; k <= 1; k += 2) { + offsets[index] = new THREE.Vector4(i, j, k, 0); + index += 1; + } + } + } + + var minpt = new THREE.Vector2(1, 1); + var maxpt = new THREE.Vector2(-1, -1); + for (var i in offsets) { + pos.fromArray(light.pos); + pos.w = 1; + pos.applyMatrix4(view); // project light into view space + pos.addScaledVector(offsets[i], light.rad); // offset from light + pos.applyMatrix4(proj); // project onto screen + pos.divideScalar(pos.w); + minpt.clamp(neg_one, pos); // update min point with pos + maxpt.clamp(pos, pos_one); // update max point with pos + } if (maxpt.x < -1 || 1 < minpt.x || maxpt.y < -1 || 1 < minpt.y) { @@ -185,4 +219,4 @@ window.downloadCanvas = (function() { var img = canvas.toDataURL('image/png'); downloadURI(img, 'deferred-' + time + '.png'); }; -})(); +})(); \ No newline at end of file