Post Processing Effects in OpenGL
Post Processing Effects in OpenGL
Table Of Contents:
Post processing effects are effects which are applied after the rendering of the scene. The way we achieve those by taking the last image from our frame buffer and manuplate pixels using custom fragment shaders.
Initialization
Frame Buffer Creation
In order to use these effects in OpenGL we need a frame buffer to render scene to an image.
The way we are generating our frame buffer is very basic. Because we are not going to read any data from our frame buffer and just render images to it we are just going to create a render buffer which will create depth and stencil buffer for us.
uint32_t fbo;
glGenFramebuffers(1, &fbo);
uint32_t rbo;
glGenRenderbuffers(1, &rbo);
We also need a texture to write our final image to.
uint32_t texture;
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB,
SCREEN_WIDTH, SCREEN_HEIGHT, 0,
GL_RGB, GL_UNSIGNED_BYTE,
nullptr);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
If you want to learn more about these functions you can use docs.gl, a website which contains signature and description of OpenGL functions.
Now we can bind the texture into our frame buffer and render buffer.
glBindTexture(GL_TEXTURE_2D, texture);
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texture, 0);
glBindRenderbuffer(GL_RENDERBUFFER, rbo);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8, SCREEN_WIDTH, SCREEN_HEIGHT);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT,
GL_RENDERBUFFER, rbo);
// unbind buffers
glBindRenderbuffer(GL_RENDERBUFFER, 0);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
The variables SCREEN_WIDTH
and SCREEN_HEIGHT
is not supposed to be screen size, there could also be multiple
frame buffers that you may want to use different part of your screen, or you may have a viewport window for the
editor of your application it could be the size of it.
And that's all for creating frame buffer now you can render your content by just moving your rendering code between your binding of the frame buffer.
For the context we are going to draw a simple triangle.
Triangle Resource Creation
Shader code for our triangle:
const char* triangle_vertex_source = R"(
#version 450
layout (location = 0) out vec3 v_frag_color;
vec2 positions[3] = vec2[](
vec2(-0.5, -0.5),
vec2(0.0, 0.5),
vec2(0.5, -0.5)
);
vec3 colors[3] = vec3[](
vec3(1.0, 0.0, 0.0),
vec3(0.0, 1.0, 0.0),
vec3(0.0, 0.0, 1.0)
);
void main() {
gl_Position = vec4(positions[gl_VertexID], 0.0, 1.0);
v_frag_color = colors[gl_VertexID];
})";
const char* triangle_fragment_source = R"(
#version 450
layout(location = 0) in vec3 v_frag_color;
layout(location = 0) out vec4 o_color;
void main() {
o_color = vec4(v_frag_color, 1.0);
})";
We don't particulary need vertex/index buffer for the context of this tutorial that's why I hardcoded vertex information into the shader.
// create vertex array object
uint32_t triangle_vao;
glGenVertexArrays(1, &triangle_vao);
glBindVertexArray(triangle_vao);
// create shader program for our triangle
uint32_t triangle_program = glCreateProgram();
// compile shader sources
const uint32_t triangle_vertex_shader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(triangle_vertex_shader, 1, &triangle_vertex_source, nullptr);
glCompileShader(triangle_vertex_shader);
const uint32_t triangle_fragment_shader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(triangle_fragment_shader, 1, &triangle_fragment_source, nullptr);
glCompileShader(triangle_fragment_shader);
// you may want to check if compile is successful or not.
// https://www.khronos.org/opengl/wiki/Example/GLSL_Shader_Compile_Error_Testing
// attach shaders to our program
glAttachShader(triangle_program, triangle_vertex_shader);
glAttachShader(triangle_program, triangle_fragment_shader);
// link shaders to our program
glLinkProgram(triangle_program);
// now we can delete the shaders because we are not going to use them anymore
glDeleteShader(triangle_vertex_shader);
glDeleteShader(triangle_fragment_shader);
while (is_running) {
// bind our frame buffer
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
// we are not doing anything with depth but its good practice
// because we will disable it later when we draw our screen quad.
glEnable(GL_DEPTH_TEST);
glClearColor(0.0, 0.0, 0.0, 1.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// render our triangle
glBindVertexArray(triangle_vao);
glUseProgram(triangle_program);
glDrawArrays(GL_TRIANGLES, 0, 3);
// unbind our frame buffer
glBindFramebuffer(GL_FRAMEBUFFER, 0);
}
Try to run with frame buffer binding and without frame buffer binding you should see a triangle if you bind the frame buffer and you should not see anything if you don't bind the frame buffer. Now that we have our triangle rendered to a texture we can just manuplate it to look like just how we wanted!
For that we need couple of things:
- A vertex array object to hold our shader and will present our new texture to the screen.
- A shader to manuplate our frame buffer texture.
Let's start by creating a our screen quad to render our manuplated texture.
Screen Quad Resource Creation
const char* screen_vertex_source = R"(
#version 450 core
layout(location = 0) out vec2 v_tex_coord;
vec2 VERTICES[6] = vec2[](
vec2(-1.0, -1.0),
vec2(-1.0, 1.0),
vec2(1.0, 1.0),
vec2(1.0, 1.0),
vec2(1.0, -1.0),
vec2(-1.0, -1.0));
vec2 TEX_COORDS[6] = vec2[](
vec2(0.0, 0.0),
vec2(0.0, 1.0),
vec2(1.0, 1.0),
vec2(1.0, 1.0),
vec2(1.0, 0.0),
vec2(0.0, 0.0));
void main() {
v_tex_coord = TEX_COORDS[gl_VertexID];
gl_Position = vec4(VERTICES[gl_VertexID], 0.0, 1.0);
})";
const char* screen_fragment_source = R"(
#version 450 core
layout(location = 0) in vec2 v_tex_coord;
layout(location = 0) out vec4 o_color;
uniform sampler2D u_screen_texture;
void main() {
o_color = texture(u_screen_texture, v_tex_coord);
})";
// create vertex array object
uint32_t screen_vao;
glGenVertexArrays(1, &screen_vao);
glBindVertexArray(screen_vao);
// create shader program for our screen
uint32_t screen_program = glCreateProgram();
// compile shader sources
const uint32_t screen_vertex_shader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(screen_vertex_shader, 1, &screen_vertex_source, nullptr);
glCompileShader(screen_vertex_shader);
const uint32_t screen_fragment_shader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(screen_fragment_shader, 1, &screen_fragment_source, nullptr);
glCompileShader(screen_fragment_shader);
// attach shaders to our program
glAttachShader(screen_program, screen_vertex_shader);
glAttachShader(screen_program, screen_fragment_shader);
// link shaders to our program
glLinkProgram(screen_program);
// now we can delete the shaders because we are not going to use them anymore
glDeleteShader(screen_vertex_shader);
glDeleteShader(screen_fragment_shader);
glUseProgram(screen_program);
glUniform1i(glGetUniformLocation(screen_program, "u_screen_texture"), 0);
Let's render the quad to the screen.
...
glDisable(GL_DEPTH_TEST);
glClearColor(0.0, 0.0, 0.0, 1.0);
glClear(GL_COLOR_BUFFER_BIT);
glBindVertexArray(screen_vao);
glUseProgram(screen_program);
// bind our screen texture
glBindTexture(GL_TEXTURE_2D, texture);
// draw our quad to the screen
glDrawArrays(GL_TRIANGLES, 0, 6);
Now try to run program and you should be seeing the same triangle but now we are drawing it on a texture and then we are presenting a quad with that texture bound to it.
NOTE: after you are done you need to make sure the objects you've created to be destroyed.
// destroy triangle resources
glDeleteVertexArrays(1, &triangle_vao);
glDeleteProgram(triangle_program);
// destroy frame buffer resources
glDeleteVertexArrays(1, &screen_vao);
glDeleteProgram(screen_program);
glDeleteFramebuffers(1, &fbo);
glDeleteRenderbuffers(1, &rbo);
glDeleteTextures(1, &texture);
These were all the initialization part of post-processing now we can start creating some effects! You can just change screen_fragment_source
to achieve these effects.
Simple Effects
Now let's get into manuplating our screen image. Below there is some basic examples of some effects we can achieve.
Gray Scale
In order to turn or image to a gray scale image we need to get the intensity
of the pixel.
#version 450 core
layout(location = 0) in vec2 v_tex_coord;
layout(location = 0) out vec4 o_color;
uniform sampler2D u_screen_texture;
void main() {
o_color = texture(u_screen_texture, v_tex_coord);
float average = (o_color.r + o_color.g + o_color.b) / 3.0;
o_color = vec4(average, average, average, o_color.a);
}
Here we are finding the avarage of every component of the pixel (rgb) and then using that value as our final color. This will give us a basic gray scale image.
But this is not very sophisticated way to achieve this we can play with the average value a little bit more. The human eye is more sensitive to green light, so a simple average like this might not accurately represent perceived brightness.
float average = 0.2126 * o_color.r + 0.7152 * o_color.g + 0.0722 * o_color.b;
The values we are multipling are called weights and they represents which channel will contribute the most, and surely it would be the one with bigger weight.
Chromatic Aberration
Chromatic aberration is another frequently used post-processing effect. Essentially, it involves the adjustment of color channels to create a desired visual effect
#version 450 core
layout(location = 0) in vec2 v_tex_coord;
layout(location = 0) out vec4 o_color;
uniform sampler2D u_screen_texture;
uniform vec3 u_offset;
void main() {
o_color = texture(u_screen_texture, v_tex_coord);
o_color.r = texture(u_screen_texture, v_tex_coord + u_offset.r).r;
o_color.g = texture(u_screen_texture, v_tex_coord + u_offset.g).g;
o_color.b = texture(u_screen_texture, v_tex_coord + u_offset.b).b;
}
This is a very basic implementation of the chromatic aberration and it does not have any correction applied
on top of it. The u_offset uniform here controls how much that channel will be offsetted. You can try using
r = 0.009f
, g = 0.006f
and b = -0.006f
value for testing. You can set the offsets after the binding of the
screen shader like following.
...
glUseProgram(screen_program);
glUniform3f(glGetUniformLocation(screen_program, "u_offset"), 0.009f, 0.006f, -0.006f);
...
Kernel Effects
Kernel effects are effects which affects multiple pixels around by weights. Usually we define those kernels as 3x3 matrices. A basic kernel effect can be achieved like below.
#version 450 core
layout(location = 0) in vec2 v_tex_coord;
layout(location = 0) out vec4 o_color;
uniform sampler2D u_screen_texture;
void main() {
vec2 tex_size = textureSize(u_screen_texture, 0).xy;
vec2 offset = 1.0 / tex_size;
vec2 offsets[9] = vec2[](
vec2(-offset.x, offset.y), // top-left
vec2( 0.0, offset.y), // top-center
vec2( offset.x, offset.y), // top-right
vec2(-offset.x, 0.0), // center-left
vec2(0.0, 0.0), // center-center
vec2(offset.x, 0.0), // center-right
vec2(-offset.x, -offset.y), // bottom-left
vec2(0.0, -offset.y), // bottom-center
vec2(offset.x, -offset.y) // bottom-right
);
float kernel[9] = float[](
-1, -1, -1,
-1, 9, -1,
-1, -1, -1
);
vec3 texture[9];
for(int i = 0; i < 9; i++) {
texture[i] = vec3(texture(u_screen_texture, v_tex_coord.st + offsets[i]));
}
vec3 color = vec3(0.0);
for(int i = 0; i < 9; i++) {
color += texture[i] * kernel[i];
}
o_color = vec4(color, 1.0);
}
NOTE: the sum of the kernel matrix should sum up to 1 if it is less then one the image will be darker and if it is greater then 1 the image will be brighter and if it is zero only the edges will be shown.
Sharpen
The kernel effect above was doing some simple sharpen effect with kernel below.
float kernel[9] = float[](
-1, -1, -1,
-1, 9, -1,
-1, -1, -1
);
Though it is very negligible with our triangle you can see interpolation of colors on the edges became sharper.
Blur
Blur might be one of the most popular effects used in game and film industry. There is a lot of way to achieve blur and our way of doing this is the easiest one.
float kernel[9] = float[](
1.0 / 16, 2.0 / 16, 1.0 / 16,
2.0 / 16, 4.0 / 16, 2.0 / 16,
1.0 / 16, 2.0 / 16, 1.0 / 16
);
In this implementation, we're creating a 3x3 kernel. Since the sum of all the kernel values equals 16, we normalize each element of the matrix by dividing it by the total sum of the matrix. This normalization process ensures that each value represents the average contribution of its neighboring pixels to the final result, effectively distributing the blur across the image.
Changes of the effects could be negligible with our model but it would show itself more in a bigger scene.
You can achive so much with this setup and of course you can use multiple effects at once. My reccommendation is to check 3d-game-shaders-for-beginners repo from lettier it encapsulates many other post-processing effects and how they implemented.