From ca2835943ca4327ad08b54af480e0c6333df201f Mon Sep 17 00:00:00 2001 From: talha Date: Mon, 22 Apr 2024 03:38:29 +0500 Subject: Completed main levels to progress to text-rendering and 2d development. - Only lessons left are geometry shaders and anti-aliasing - will get to those later on soon - need to do text rendering now --- source/lessons/uniform buffer objects/main.cpp | 1405 ++++++++++++++++++++ source/lessons/uniform buffer objects/math.h | 409 ++++++ .../shaders/blend_test.fs.glsl | 14 + .../uniform buffer objects/shaders/cubemap.fs.glsl | 9 + .../uniform buffer objects/shaders/cubemap.vs.glsl | 14 + .../shaders/depth_test.fs.glsl | 30 + .../shaders/depth_test.vs.glsl | 21 + .../uniform buffer objects/shaders/fbo.fs.glsl | 72 + .../uniform buffer objects/shaders/fbo.vs.glsl | 10 + .../uniform buffer objects/shaders/refl.fs.glsl | 14 + .../uniform buffer objects/shaders/refl.vs.glsl | 16 + .../uniform buffer objects/shaders/refr.fs.glsl | 15 + .../uniform buffer objects/shaders/refr.vs.glsl | 20 + 13 files changed, 2049 insertions(+) create mode 100644 source/lessons/uniform buffer objects/main.cpp create mode 100644 source/lessons/uniform buffer objects/math.h create mode 100644 source/lessons/uniform buffer objects/shaders/blend_test.fs.glsl create mode 100644 source/lessons/uniform buffer objects/shaders/cubemap.fs.glsl create mode 100644 source/lessons/uniform buffer objects/shaders/cubemap.vs.glsl create mode 100644 source/lessons/uniform buffer objects/shaders/depth_test.fs.glsl create mode 100644 source/lessons/uniform buffer objects/shaders/depth_test.vs.glsl create mode 100644 source/lessons/uniform buffer objects/shaders/fbo.fs.glsl create mode 100644 source/lessons/uniform buffer objects/shaders/fbo.vs.glsl create mode 100644 source/lessons/uniform buffer objects/shaders/refl.fs.glsl create mode 100644 source/lessons/uniform buffer objects/shaders/refl.vs.glsl create mode 100644 source/lessons/uniform buffer objects/shaders/refr.fs.glsl create mode 100644 source/lessons/uniform buffer objects/shaders/refr.vs.glsl (limited to 'source/lessons/uniform buffer objects') diff --git a/source/lessons/uniform buffer objects/main.cpp b/source/lessons/uniform buffer objects/main.cpp new file mode 100644 index 0000000..889536f --- /dev/null +++ b/source/lessons/uniform buffer objects/main.cpp @@ -0,0 +1,1405 @@ +#include +#include +#include +#include +#include +#include +#include + +#define STB_IMAGE_IMPLEMENTATION +#include "stb_image.h" + +/* @lookup: +* - understand kernals, how they work and how they affect post processing +* - Check to see why it is necessary to do glBindTexture() +* - understand the difference between binding textures, and activating a texture unit +* - I do not understand how floating point numbers work, so I should probably look into that. +* - The normal matrix calculation in the fragment shader for the object affected by light has been mainly copied. +* I have tried to understand the formula, and whilst it made some sense, it is not fully clear to me, and I cannot picture it yet. +* Revisit the derivation for the normal matrix some time in the future. +* - Lookup the derivation of the formula for reflecting a vector about a normal. I am doing that for specular lighting, but the learnopengl tutorial +* just uses a glsl reflect formula, and at the time of writing it is also very late so I am not in the mood or position to look into it at present. +* - One of the things I have observed with specular lights is that the circle/specular highlight follows the camera (me) when I move. I would like to figure +* out a way by which this does not happen and it remains fixed on the object, at the angle at which it hits. All of this will be made complicated by the fact +* that ofcourse everything is actually happening from the cameras' perspective. I would still love to figure this out. +*/ + +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef uint64_t u64; + +typedef int8_t s8; +typedef int16_t s16; +typedef int32_t s32; +typedef int64_t s64; + +typedef float r32; +typedef double r64; + +typedef u8 b8; + +#include "math.h" + +// =========== Shader Loading ============= + +unsigned int gl_create_vertex_shader(char* vertex_shader_source) +{ + unsigned int vertex_shader = glCreateShader(GL_VERTEX_SHADER); + glShaderSource(vertex_shader, 1, &vertex_shader_source, NULL); + glCompileShader(vertex_shader); + + int success; + char info_log[512]; + glGetShaderiv(vertex_shader, GL_COMPILE_STATUS, &success); + if (!success) + { + glGetShaderInfoLog(vertex_shader, 512, NULL, info_log); + printf("================================\n"); + printf("vertex shader compilation failed:\n%s\n", info_log); + } + + return vertex_shader; +} + +unsigned int gl_create_fragment_shader(char* fragment_shader_source) +{ + unsigned int fragment_shader = glCreateShader(GL_FRAGMENT_SHADER); + glShaderSource(fragment_shader, 1, &fragment_shader_source, NULL); + glCompileShader(fragment_shader); + + int success; + char info_log[512]; + glGetShaderiv(fragment_shader, GL_COMPILE_STATUS, &success); + if (!success) + { + glGetShaderInfoLog(fragment_shader, 512, NULL, info_log); + printf("================================\n"); + printf("fragment shader compilation failed:\n%s\n", info_log); + } + + return fragment_shader; +} + +unsigned int gl_create_shader_program(unsigned int vertex_shader, unsigned int fragment_shader) +{ + unsigned int shader_program = glCreateProgram(); + + glAttachShader(shader_program, vertex_shader); + glAttachShader(shader_program, fragment_shader); + glLinkProgram(shader_program); + + int success; + char info_log[512]; + glGetProgramiv(shader_program, GL_LINK_STATUS, &success); + if (!success) + { + glGetProgramInfoLog(shader_program, 512, NULL, info_log); + printf("================================\n"); + printf("shader program linking failed:\n%s\n", info_log); + } + + glDeleteShader(vertex_shader); + glDeleteShader(fragment_shader); + + return shader_program; +} + +unsigned int gl_shader_program(char* vertex_shader_source, char* fragment_shader_source) +{ + unsigned int vertex_shader = gl_create_vertex_shader(vertex_shader_source); + unsigned int fragment_shader = gl_create_fragment_shader(fragment_shader_source); + unsigned int shader_program = gl_create_shader_program(vertex_shader, fragment_shader); + + return shader_program; +} + +Mat4 camera_create4m(Vec3 camera_pos, Vec3 camera_look, Vec3 camera_up) +{ + // @note: We do this because this allows the camera to have the axis it looks at + // inwards be the +z axis. + // If we did not do this, then the inward axis the camera looks at would be negative. + // I am still learning from learnopengl.com but I imagine that this was done for conveniences' sake. + Vec3 camera_forward_dir = normalize3v(subtract3v(camera_pos, camera_look)); + Vec3 camera_right_dir = normalize3v(cross_multiply3v(camera_up, camera_forward_dir)); + Vec3 camera_up_dir = normalize3v(cross_multiply3v(camera_forward_dir, camera_right_dir)); + + Mat4 res = lookat4m(camera_up_dir, camera_forward_dir, camera_right_dir, camera_pos); + + return res; +} + +Vec3 camera_look_around(r32 angle_pitch, r32 angle_yaw) +{ + Vec3 camera_look = {0.0}; + camera_look.x = cosf(angle_yaw) * cosf(angle_pitch); + camera_look.y = sinf(angle_pitch); + camera_look.z = sinf(angle_yaw) * cosf(angle_pitch); + camera_look = normalize3v(camera_look); + + return camera_look; +} + +s32 gl_load_texture(u32 texture_id, const char* path) +{ + s32 width, height, nrChannels; + unsigned char *data = stbi_load(path, &width, &height, &nrChannels, 0); + if (data) + { + GLenum format; + if (nrChannels == 1) + format = GL_RED; + else if (nrChannels == 3) + format = GL_RGB; + else if (nrChannels == 4) + format = GL_RGBA; + + glBindTexture(GL_TEXTURE_2D, texture_id); + glTexImage2D(GL_TEXTURE_2D, 0, format, width, height, 0, format, GL_UNSIGNED_BYTE, data); + glGenerateMipmap(GL_TEXTURE_2D); + + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); + stbi_image_free(data); + } + else + { + printf("failed to load image texture at path: %s", path); + stbi_image_free(data); + } + + return texture_id; +} + +// =================== Model Loading ======================== +// This section contains a whole host of things: +// 1. classes +// 2. std::vectors +// 3. std::strings +// that I have only used as a glue for I did not know if I had the model loading setup properly. +// @todo: replace these things eventually. For now the goal is to complete learnopengl + +s32 TextureFromFile(const char* filepath, std::string directory) +{ + // @note: this function is stupid as it already became outdated as I needed to tweak the parameters + // for wrapping. Either those become function parameters (Which makes sense I guess) or I look at + // exactly what steps I am reusing and just make that a function so the function is called fewer times. + // + // I am guessing this won't look good from a design point of view for all those jobs and postings, even if + // this may be the simpler and faster thing to do, albeit at the cost of typing. + std::string filename = std::string(filepath); + filename = directory + '/' + filename; + + u32 texid; + glGenTextures(1, &texid); + + s32 width, height, nrChannels; + unsigned char *data = stbi_load(filename.c_str(), &width, &height, &nrChannels, 0); + if (data) + { + GLenum format; + if (nrChannels == 1) + format = GL_RED; + else if (nrChannels == 3) + format = GL_RGB; + else if (nrChannels == 4) + format = GL_RGBA; + + glBindTexture(GL_TEXTURE_2D, texid); + glTexImage2D(GL_TEXTURE_2D, 0, format, width, height, 0, format, GL_UNSIGNED_BYTE, data); + glGenerateMipmap(GL_TEXTURE_2D); + + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); + + stbi_image_free(data); + } + else + { + printf("failed to load image texture at path: %s", filepath); + stbi_image_free(data); + } + + return texid; +} + +enum TextureType { TextureDiffuse=0, TextureSpecular }; + +struct Vertex { + Vec3 position; + Vec3 normal; + Vec2 texture; +}; + +struct Texture { + u32 id; + enum TextureType type; + std::string fname; +}; + +class Mesh { + public: + std::vector vertices; + std::vector indices; + std::vector textures; + + u32 vao; + u32 vbo; + u32 ebo; + + Mesh(std::vector vertices, std::vector indices, std::vector textures) + { + this->vertices = vertices; + this->indices = indices; + this->textures = textures; + + // setup mesh shader stuff + glGenVertexArrays(1, &vao); + glGenBuffers(1, &vbo); + glGenBuffers(1, &ebo); + + glBindVertexArray(vao); + + glBindBuffer(GL_ARRAY_BUFFER, vbo); + glBufferData(GL_ARRAY_BUFFER, this->vertices.size() * sizeof(struct Vertex), &(this->vertices[0]), GL_STATIC_DRAW); + + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ebo); + glBufferData(GL_ELEMENT_ARRAY_BUFFER, this->indices.size() * sizeof(u32), &(this->indices[0]), GL_STATIC_DRAW); + + // position + glEnableVertexAttribArray(0); + glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)0); + // normal + glEnableVertexAttribArray(1); + glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)offsetof(Vertex, normal)); + // texture + glEnableVertexAttribArray(2); + glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)offsetof(Vertex, texture)); + + glBindVertexArray(0); + } + + void draw(u32 shader_program) + { + glUseProgram(shader_program); + + u32 diffuse_num = 1; + u32 specular_num = 1; + char tex_unit_name[64]; + // set shininess + s32 mat_shine_loc = glGetUniformLocation(shader_program, "material.shininess"); + glUniform1f(mat_shine_loc, 32.0f); + + for (u32 i=0; i loaded_textures; + std::vector meshes; + std::string directory; + + void load_model(std::string path); + void process_node(aiNode *node, const aiScene *scene); + Mesh process_mesh(aiMesh *mesh, const aiScene *scene); + std::vector load_material_textures(aiMaterial *mat, aiTextureType type, TextureType type_name); +}; + +void Model::draw(u32 shader_program) +{ + for (int i=0; i < meshes.size(); i++) + { + meshes[i].draw(shader_program); + } +} + +void Model::load_model(std::string path) +{ + Assimp::Importer import; + const aiScene *scene = import.ReadFile(path, aiProcess_Triangulate | aiProcess_FlipUVs); + + if (!scene || scene->mFlags & AI_SCENE_FLAGS_INCOMPLETE || !scene->mRootNode) + { + printf("error loading model :%s\n", import.GetErrorString()); + return; + } + + directory = path.substr(0, path.find_last_of('/')); + process_node(scene->mRootNode, scene); +} + +void Model::process_node(aiNode *node, const aiScene *scene) +{ + for (int i=0; i < node->mNumMeshes; i++) + { + aiMesh *mesh = scene->mMeshes[node->mMeshes[i]]; + meshes.push_back(process_mesh(mesh, scene)); + } + + for (int i=0; imNumChildren; i++) + { + process_node(node->mChildren[i], scene); + } +} + +Mesh Model::process_mesh(aiMesh *mesh, const aiScene *scene) +{ + std::vector vertices; + std::vector indices; + std::vector textures; + + for (u32 i=0; i < mesh->mNumVertices; i++) + { + Vec3 position; + position.x = mesh->mVertices[i].x; + position.y = mesh->mVertices[i].y; + position.z = mesh->mVertices[i].z; + + Vec3 normal; + normal.x = mesh->mNormals[i].x; + normal.y = mesh->mNormals[i].y; + normal.z = mesh->mNormals[i].z; + + Vec2 texture = {0, 0}; + if (mesh->mTextureCoords[0]) + { + texture.x = mesh->mTextureCoords[0][i].x; + texture.y = mesh->mTextureCoords[0][i].y; + } + + struct Vertex vertex; + vertex.position = position; + vertex.normal = normal; + vertex.texture = texture; + + vertices.push_back(vertex); + } + // process indices + for (u32 i = 0; i < mesh->mNumFaces; i++) + { + aiFace face = mesh->mFaces[i]; + for(u32 j = 0; j < face.mNumIndices; j++) + { + indices.push_back(face.mIndices[j]); + } + } + // process material + if (mesh->mMaterialIndex >= 0) + { + aiMaterial *material = scene->mMaterials[mesh->mMaterialIndex]; + std::vector diffuse_maps = load_material_textures(material, aiTextureType_DIFFUSE, TextureDiffuse); + textures.insert(textures.end(), diffuse_maps.begin(), diffuse_maps.end()); + std::vector specular_maps = load_material_textures(material, aiTextureType_SPECULAR, TextureSpecular); + textures.insert(textures.end(), specular_maps.begin(), specular_maps.end()); + } + + return Mesh(vertices, indices, textures); +} + +std::vector Model::load_material_textures(aiMaterial *mat, aiTextureType type, TextureType tex_type) +{ + std::vector textures; + for(u32 i=0; iGetTextureCount(type); i++) + { + bool load_texture = true; + aiString str; + mat->GetTexture(type, i, &str); + const char* fname = str.C_Str(); + + for (s32 j=0; j top) + { + x = top; + } + + return x; +} + +// ==== Vector Math ==== + +union Vec2 { + struct { + r32 x; + r32 y; + }; + r32 data[2]; +}; + +union Vec3 { + struct { + r32 x; + r32 y; + r32 z; + }; + r32 data[3]; +}; + +union Vec4 { + struct { + r32 x; + r32 y; + r32 z; + r32 w; + }; + r32 data[4]; +}; + +union Mat4 { + Vec4 xyzw[4]; + r32 data[4][4]; + r32 buffer[16]; +}; + +// ========================================================== Vec3 ========================================================== + +Vec3 init3v(r32 x, r32 y, r32 z) +{ + Vec3 res; + res.x = x; + res.y = y; + res.z = z; + + return res; +} + +Vec3 scaler_add3v(Vec3 vec, r32 scaler) +{ + Vec3 res; + res.x = vec.x + scaler; + res.y = vec.y + scaler; + res.z = vec.z + scaler; + + return res; +} + +Vec3 scaler_multiply3v(Vec3 vec, r32 scaler) +{ + Vec3 res; + res.x = vec.x * scaler; + res.y = vec.y * scaler; + res.z = vec.z * scaler; + + return res; +} + +Vec3 scaler_divide3v(Vec3 vec, r32 scaler) +{ + Vec3 res; + res.x = vec.x / scaler; + res.y = vec.y / scaler; + res.z = vec.z / scaler; + + return res; +} + + +Vec3 add3v(Vec3 a, Vec3 b) +{ + Vec3 res; + res.x = a.x + b.x; + res.y = a.y + b.y; + res.z = a.z + b.z; + + return res; +} + +Vec3 subtract3v(Vec3 a, Vec3 b) +{ + Vec3 res; + res.x = a.x - b.x; + res.y = a.y - b.y; + res.z = a.z - b.z; + + return res; +} + +r32 dot_multiply3v(Vec3 a, Vec3 b) +{ + r32 x = a.x * b.x; + r32 y = a.y * b.y; + r32 z = a.z * b.z; + + r32 res = x + y + z; + + return res; +} + +r32 magnitude3v(Vec3 vec) +{ + r32 res = sqrtf(Square(vec.x) + Square(vec.y) + Square(vec.z)); + return res; +} + +Vec3 normalize3v(Vec3 vec) +{ + r32 magnitude = magnitude3v(vec); + Vec3 res = scaler_divide3v(vec, magnitude); + return res; +} + +#ifndef FUN_CALCS +r32 angle3v(Vec3 a, Vec3 b) +{ + Vec3 a_norm = normalize3v(a); + Vec3 b_norm = normalize3v(b); + + r32 dot_product = dot_multiply3v(a_norm, b_norm); + r32 res = acosf(dot_product); + + return res; +} +#endif + +Vec3 cross_multiply3v(Vec3 a, Vec3 b) +{ + Vec3 res; + res.x = (a.y * b.z) - (a.z * b.y); + res.y = (a.z * b.x) - (a.x * b.z); + res.z = (a.x * b.y) - (a.y * b.x); + + return res; +} + +// ============================================== Vec4, Mat4 ============================================== + +Vec4 init4v(r32 x, r32 y, r32 z, r32 w) +{ + Vec4 res; + res.x = x; + res.y = y; + res.z = z; + res.w = w; + + return res; +} + +Mat4 init_value4m(r32 value) +{ + Mat4 res = {0}; + res.data[0][0] = value; + res.data[1][1] = value; + res.data[2][2] = value; + res.data[3][3] = value; + + return res; +} + +// @note: These operations are just defined and not expressed. They are kept here for completeness sake BUT +// since I have not had to do anything related to these, I have not created them. +Vec4 scaler_add4v(Vec4 vec, r32 scaler); +Vec4 scaler_subtract4v(Vec4 vec, r32 scaler); +Vec4 scaler_multiply4v(Vec4 vec, r32 scaler); +Vec4 scaler_divide4v(Vec4 vec, r32 scaler); +Vec4 add4v(Vec4 a, Vec4 b); +Vec4 subtract4v(Vec4 a, Vec4 b); +Vec4 dot_multiply4v(Vec4 a, Vec4 b); + +Mat4 add4m(Mat4 a, Mat4 b) +{ + Mat4 res; + // row 0 + res.data[0][0] = a.data[0][0] + b.data[0][0]; + res.data[0][1] = a.data[0][1] + b.data[0][1]; + res.data[0][2] = a.data[0][2] + b.data[0][2]; + res.data[0][3] = a.data[0][3] + b.data[0][3]; + // row 1 + res.data[1][0] = a.data[1][0] + b.data[1][0]; + res.data[1][1] = a.data[1][1] + b.data[1][1]; + res.data[1][2] = a.data[1][2] + b.data[1][2]; + res.data[1][3] = a.data[1][3] + b.data[1][3]; + // row 2 + res.data[2][0] = a.data[2][0] + b.data[2][0]; + res.data[2][1] = a.data[2][1] + b.data[2][1]; + res.data[2][2] = a.data[2][2] + b.data[2][2]; + res.data[2][3] = a.data[2][3] + b.data[2][3]; + // row 3 + res.data[3][0] = a.data[3][0] + b.data[3][0]; + res.data[3][1] = a.data[3][1] + b.data[3][1]; + res.data[3][2] = a.data[3][2] + b.data[3][2]; + res.data[3][3] = a.data[3][3] + b.data[3][3]; + + return res; +} + +Mat4 subtract4m(Mat4 a, Mat4 b) +{ + Mat4 res; + // row 0 + res.data[0][0] = a.data[0][0] - b.data[0][0]; + res.data[0][1] = a.data[0][1] - b.data[0][1]; + res.data[0][2] = a.data[0][2] - b.data[0][2]; + res.data[0][3] = a.data[0][3] - b.data[0][3]; + // row 1 + res.data[1][0] = a.data[1][0] - b.data[1][0]; + res.data[1][1] = a.data[1][1] - b.data[1][1]; + res.data[1][2] = a.data[1][2] - b.data[1][2]; + res.data[1][3] = a.data[1][3] - b.data[1][3]; + // row 2 + res.data[2][0] = a.data[2][0] - b.data[2][0]; + res.data[2][1] = a.data[2][1] - b.data[2][1]; + res.data[2][2] = a.data[2][2] - b.data[2][2]; + res.data[2][3] = a.data[2][3] - b.data[2][3]; + // row 3 + res.data[3][0] = a.data[3][0] - b.data[3][0]; + res.data[3][1] = a.data[3][1] - b.data[3][1]; + res.data[3][2] = a.data[3][2] - b.data[3][2]; + res.data[3][3] = a.data[3][3] - b.data[3][3]; + + return res; +} + +Vec4 multiply4vm(Vec4 vec, Mat4 mat) +{ + /* + * @note: Incase I get confused about this in the future. + * + * Everything is row-order, which means that things in memory are laid out row first. So with a sample matrix + * we have this order in memory: r1c1 r1c2 r1c3 r1c4 r2c1 ... (r = row, c = column). The same holds true for + * vectors. (maybe move this explanation to the top) + * + * Now, multiply4vm will multiply a vector with a matrix. Conventionally that does not make any sense as + * a vector is usually 4x1 and a matrix ix 4x4. + * What this function considers a vector, while it is a vector, it is infact a row from a matrix, which + * means that the vector is 1x4 and the matrix is 4x4. + * + * The function is meant to supplement the matrix multiplication process to alleviate the multiple lines of code + * we have to write when multiplying the row of a left matrix to each column of the right matrix + */ + Vec4 res = { 0 }; + res.x = (mat.data[0][0] * vec.x) + (mat.data[0][1] * vec.y) + (mat.data[0][2] * vec.z) + (mat.data[0][3] * vec.w); + res.y = (mat.data[1][0] * vec.x) + (mat.data[1][1] * vec.y) + (mat.data[1][2] * vec.z) + (mat.data[1][3] * vec.w); + res.z = (mat.data[2][0] * vec.x) + (mat.data[2][1] * vec.y) + (mat.data[2][2] * vec.z) + (mat.data[2][3] * vec.w); + res.w = (mat.data[3][0] * vec.x) + (mat.data[3][1] * vec.y) + (mat.data[3][2] * vec.z) + (mat.data[3][3] * vec.w); + + return res; +} + +Mat4 multiply4m(Mat4 a, Mat4 b) +{ + Mat4 res = { 0 }; + + res.xyzw[0] = multiply4vm(a.xyzw[0], b); + res.xyzw[1] = multiply4vm(a.xyzw[1], b); + res.xyzw[2] = multiply4vm(a.xyzw[2], b); + res.xyzw[3] = multiply4vm(a.xyzw[3], b); + + return res; +} + +// ==== Matrix Transformation ==== + +Mat4 scaling_matrix4m(r32 x, r32 y, r32 z) // generates a 4x4 scaling matrix for scaling each of the x,y,z axis +{ + Mat4 res = init_value4m(1.0f); + res.data[0][0] = x; + res.data[1][1] = y; + res.data[2][2] = z; + + return res; +} + +Mat4 translation_matrix4m(r32 x, r32 y, r32 z) // generates a 4x4 translation matrix for translation along each of the x,y,z axis +{ + Mat4 res = init_value4m(1.0f); + res.data[0][3] = x; + res.data[1][3] = y; + res.data[2][3] = z; + + return res; +} + +Mat4 rotation_matrix4m(r32 angle_radians, Vec3 axis) // generates a 4x4 rotation matrix for rotation along each of the x,y,z axis +{ + Mat4 res = init_value4m(1.0f); + axis = normalize3v(axis); + + r32 cos_theta = cosf(angle_radians); + r32 sin_theta = sinf(angle_radians); + r32 cos_value = 1.0f - cos_theta; + + res.data[0][0] = (axis.x * axis.x * cos_value) + cos_theta; + res.data[0][1] = (axis.x * axis.y * cos_value) + (axis.z * sin_theta); + res.data[0][2] = (axis.x * axis.z * cos_value) - (axis.y * sin_theta); + + res.data[1][0] = (axis.x * axis.y * cos_value) - (axis.z * sin_theta); + res.data[1][1] = (axis.y * axis.y * cos_value) + cos_theta; + res.data[1][2] = (axis.y * axis.z * cos_value) + (axis.x * sin_theta); + + res.data[2][0] = (axis.x * axis.z * cos_value) + (axis.y * sin_theta); + res.data[2][1] = (axis.z * axis.y * cos_value) - (axis.x * sin_theta); + res.data[2][2] = (axis.z * axis.z * cos_value) + cos_theta; + + return res; +} + +Mat4 perspective_projection_matrix4m(r32 left, r32 right, r32 bottom, r32 top, r32 near, r32 far) +{ + Mat4 res = { 0 }; + + res.data[0][0] = (2.0 * near)/(right - left); + res.data[0][2] = (right + left)/(right - left); + + res.data[1][1] = (2.0 * near)/(top - bottom); + res.data[1][2] = (top + bottom)/(top - bottom); + + res.data[2][2] = -(far + near)/(far - near); + res.data[2][3] = -2.0*far*near/(far - near); + + res.data[3][2] = -1.0; + + return res; +} + +Mat4 perspective4m(r32 fov, r32 aspect_ratio, r32 near, r32 far) +{ + r32 cotangent = 1.0f / tanf(fov / 2.0f); + + Mat4 res = { 0 }; + + res.data[0][0] = cotangent / aspect_ratio; + + res.data[1][1] = cotangent; + + res.data[2][2] = -(far + near) / (far - near); + res.data[2][3] = -2.0f * far * near / (far - near); + + res.data[3][2] = -1.0f; + + return res; +} + +Mat4 lookat4m(Vec3 up, Vec3 forward, Vec3 right, Vec3 position) +{ + /* + * @note: The construction of the lookat matrix is not obvious. For that reason here is the supplemental matrial I have used to understand + * things while I maintain my elementary understanding of linear algebra. + * 1. This youtube video (https://www.youtube.com/watch?v=3ZmqJb7J5wE) helped me understand why we invert matrices. + * It is because, we are moving from the position matrix which is a global to the view matrix which + * is a local. It won't be very clear from this illustration alone, so you would be best served watching the video and recollecting and understanding from there. + * 2. This article (https://twodee.org/blog/17560) derives (or rather shows), in a very shallow way how we get to the look at matrix. + */ + Mat4 res = init_value4m(1.0); + res.xyzw[0] = Vec4{ right.x, right.y, right.z, -dot_multiply3v(right, position) }; + res.xyzw[1] = Vec4{ up.x, up.y, up.z, -dot_multiply3v(up, position) }; + res.xyzw[2] = Vec4{ forward.x, forward.y, forward.z, -dot_multiply3v(forward, position) }; + res.xyzw[3] = Vec4{ 0.0f, 0.0f, 0.0f, 1.0f }; + + return res; +} + +Mat4 to_col_major4m(Mat4 mat) +{ + Mat4 res = {0.0f}; + + res.data[0][0] = mat.data[0][0]; res.data[1][0] = mat.data[0][1]; res.data[2][0] = mat.data[0][2]; res.data[3][0] = mat.data[0][3]; + + res.data[0][1] = mat.data[1][0]; res.data[1][1] = mat.data[1][1]; res.data[2][1] = mat.data[1][2]; res.data[3][1] = mat.data[1][3]; + + res.data[0][2] = mat.data[2][0]; res.data[1][2] = mat.data[2][1]; res.data[2][2] = mat.data[2][2]; res.data[3][2] = mat.data[2][3]; + + res.data[0][3] = mat.data[3][0]; res.data[1][3] = mat.data[3][1]; res.data[2][3] = mat.data[3][2]; res.data[3][3] = mat.data[3][3]; + + return res; +} +#endif diff --git a/source/lessons/uniform buffer objects/shaders/blend_test.fs.glsl b/source/lessons/uniform buffer objects/shaders/blend_test.fs.glsl new file mode 100644 index 0000000..23daa14 --- /dev/null +++ b/source/lessons/uniform buffer objects/shaders/blend_test.fs.glsl @@ -0,0 +1,14 @@ +#version 330 core + + +in vec2 TexCoords; +in vec3 VertexWorldPos; +uniform sampler2D TexId; +uniform vec4 hlt_color; +out vec4 FragColor; + +void main() { + vec4 tex = texture(TexId, TexCoords); + + FragColor = tex; +} diff --git a/source/lessons/uniform buffer objects/shaders/cubemap.fs.glsl b/source/lessons/uniform buffer objects/shaders/cubemap.fs.glsl new file mode 100644 index 0000000..72dfe4c --- /dev/null +++ b/source/lessons/uniform buffer objects/shaders/cubemap.fs.glsl @@ -0,0 +1,9 @@ +#version 330 core + +in vec3 TexCoords; +uniform samplerCube skybox; +out vec4 FragColor; + +void main() { + FragColor = texture(skybox, TexCoords); +}; diff --git a/source/lessons/uniform buffer objects/shaders/cubemap.vs.glsl b/source/lessons/uniform buffer objects/shaders/cubemap.vs.glsl new file mode 100644 index 0000000..956673a --- /dev/null +++ b/source/lessons/uniform buffer objects/shaders/cubemap.vs.glsl @@ -0,0 +1,14 @@ +#version 330 core +layout(location=0) in vec3 aPos; + +uniform mat4 Model; +uniform mat4 View; +uniform mat4 Projection; + +out vec3 TexCoords; + +void main() { + vec4 pos = Projection*View*vec4(aPos, 1.0); + gl_Position = vec4(pos.xyww); + TexCoords = aPos; +}; diff --git a/source/lessons/uniform buffer objects/shaders/depth_test.fs.glsl b/source/lessons/uniform buffer objects/shaders/depth_test.fs.glsl new file mode 100644 index 0000000..796d849 --- /dev/null +++ b/source/lessons/uniform buffer objects/shaders/depth_test.fs.glsl @@ -0,0 +1,30 @@ +#version 330 core + + +in vec2 TexCoords; +in vec3 VertexWorldPos; +uniform sampler2D TexId; +out vec4 FragColor; + +uniform float near = 0.1f; +uniform float far = 100.0f; + +/* @note +float linear_fragment_depth = MakeDepthLinear(non_linear_fragment_depth); +float scaled_lfd = linear_fragment_depth/far; + +gives us the z value in eye space. +This is purely for learning purposes. +The equation used in MakeDepthLinear is derived from the PerspectiveProjectionMatrix. +Take a look at the equation for that in the codebase +or here: https://www.songho.ca/opengl/gl_projectionmatrix.html +*/ +float MakeDepthLinear(float depth) { + float ndc = 2.0f*depth - 1; + float linear_depth = (2.0 * far * near)/(far + near - ndc*(far - near)); + return linear_depth; +} + +void main() { + FragColor = texture(TexId, TexCoords); +} diff --git a/source/lessons/uniform buffer objects/shaders/depth_test.vs.glsl b/source/lessons/uniform buffer objects/shaders/depth_test.vs.glsl new file mode 100644 index 0000000..827da20 --- /dev/null +++ b/source/lessons/uniform buffer objects/shaders/depth_test.vs.glsl @@ -0,0 +1,21 @@ +#version 330 core +layout(location=0) in vec3 aPos; +layout(location=1) in vec2 aTex; + +uniform mat4 Model; +layout (std140) uniform Matrices { + mat4 View; // start: 0 // end: 16 * 4 = 64 + mat4 Projection; // start: 64 // end: 64 + 64 = 128 +}; + +out vec2 TexCoords; +out vec3 VertexWorldPos; + +// @note: I still do not fully understand how the FragNormal calculation works. Need to make sure I intuitively +// get that + +void main() { + gl_Position = Projection*View*Model*vec4(aPos, 1.0); + VertexWorldPos = vec3(Model * vec4(aPos, 1.0)); + TexCoords = aTex; +}; diff --git a/source/lessons/uniform buffer objects/shaders/fbo.fs.glsl b/source/lessons/uniform buffer objects/shaders/fbo.fs.glsl new file mode 100644 index 0000000..e12ad33 --- /dev/null +++ b/source/lessons/uniform buffer objects/shaders/fbo.fs.glsl @@ -0,0 +1,72 @@ +#version 330 core + +in vec2 TexCoords; +uniform sampler2D TexId; +out vec4 FragColor; + +vec4 filter_color_invert(vec4 color) +{ + vec4 res = vec4(vec3(1.0) - vec3(color), 1.0); + return res; +} + +vec4 filter_color_grayscale(vec4 color) +{ + // we will need to average the colors + // float average = (color.x + color.y + color.z) / 3.0f; + // in reality, human our most sensitive towards green and least to blue, so will need to weight those + float average = 0.2126 * color.r + 0.7152 * color.g + 0.0722 * color.b; + vec4 res = vec4(vec3(average), 1.0); + + return res; +} + +// @note: different kernels for experimentation +const float kernel_sharpen[9] = float[]( + -1, -1, -1, + -1, 9, -1, + -1, -1, -1 +); + +const float kernel_blur[9] = float[]( + 1.0/16.0, 2.0/16.0, 1.0/16.0, + 2.0/16.0, 4.0/16.0, 2.0/16.0, + 1.0/16.0, 2.0/16.0, 1.0/16.0 +); + +const float kernel_edge_detection[9] = float[]( + 1, 1, 1, + 1, -8, 1, + 1, 1, 1 +); + +vec4 filter_kernal_effects() +{ + const float offset = 1.0/300.0; + vec2 offsets[9] = vec2[]( + vec2(-offset, offset), // top left + vec2( 0, offset), // top center + vec2( offset, offset), // top right + vec2(-offset, 0), // center left + vec2( 0, 0), // center center + vec2( offset, 0), // center right + vec2(-offset, -offset), // bot left + vec2( 0, -offset), // bot center + vec2( offset, -offset) // bot right + ); + + float kernal[9] = kernel_edge_detection; + vec3 kernalValue = vec3(0.0); + vec3 sampleTex[9]; + for (int i=0; i<9; i++) { + sampleTex[i] = vec3(texture(TexId, TexCoords + offsets[i])); + kernalValue += (kernal[i] * sampleTex[i]); + } + + vec4 res = vec4(kernalValue, 1.0); + return res; +} + +void main() { + FragColor = texture(TexId, TexCoords); +} diff --git a/source/lessons/uniform buffer objects/shaders/fbo.vs.glsl b/source/lessons/uniform buffer objects/shaders/fbo.vs.glsl new file mode 100644 index 0000000..82d7211 --- /dev/null +++ b/source/lessons/uniform buffer objects/shaders/fbo.vs.glsl @@ -0,0 +1,10 @@ +#version 330 core +layout(location=0) in vec3 aPos; +layout(location=1) in vec2 aTex; + +out vec2 TexCoords; + +void main() { + gl_Position = vec4(aPos.x, aPos.y, 0.0f, 1.0f); + TexCoords = aTex; +}; diff --git a/source/lessons/uniform buffer objects/shaders/refl.fs.glsl b/source/lessons/uniform buffer objects/shaders/refl.fs.glsl new file mode 100644 index 0000000..6d28392 --- /dev/null +++ b/source/lessons/uniform buffer objects/shaders/refl.fs.glsl @@ -0,0 +1,14 @@ +#version 330 core + +in vec3 Normal; +in vec3 Position; + +uniform samplerCube skybox; +uniform vec3 cameraPos; +out vec4 FragColor; + +void main() { + vec3 I = normalize(Position - cameraPos); + vec3 R = reflect(I, normalize(Normal)); + FragColor = vec4(texture(skybox, R).rgb, 1.0); +}; diff --git a/source/lessons/uniform buffer objects/shaders/refl.vs.glsl b/source/lessons/uniform buffer objects/shaders/refl.vs.glsl new file mode 100644 index 0000000..b8f2b97 --- /dev/null +++ b/source/lessons/uniform buffer objects/shaders/refl.vs.glsl @@ -0,0 +1,16 @@ +#version 330 core +layout(location=0) in vec3 aPos; +layout(location=1) in vec3 aNormal; + +uniform mat4 View; +uniform mat4 Model; +uniform mat4 Projection; + +out vec3 Normal; +out vec3 Position; + +void main() { + Normal = mat3(transpose(inverse(Model))) * aNormal; + Position = vec3(Model * vec4(aPos, 1.0)); + gl_Position = Projection * View * Model * vec4(aPos, 1.0); +}; diff --git a/source/lessons/uniform buffer objects/shaders/refr.fs.glsl b/source/lessons/uniform buffer objects/shaders/refr.fs.glsl new file mode 100644 index 0000000..6747ded --- /dev/null +++ b/source/lessons/uniform buffer objects/shaders/refr.fs.glsl @@ -0,0 +1,15 @@ +#version 330 core + +in vec3 Normal; +in vec3 Position; + +uniform samplerCube skybox; +uniform vec3 cameraPos; +out vec4 FragColor; + +void main() { + float refr_ratio = 1.0/1.52; + vec3 I = normalize(Position - cameraPos); + vec3 R = refract(I, normalize(Normal), refr_ratio); + FragColor = vec4(texture(skybox, R).rgb, 1.0); +}; diff --git a/source/lessons/uniform buffer objects/shaders/refr.vs.glsl b/source/lessons/uniform buffer objects/shaders/refr.vs.glsl new file mode 100644 index 0000000..0554f0a --- /dev/null +++ b/source/lessons/uniform buffer objects/shaders/refr.vs.glsl @@ -0,0 +1,20 @@ +#version 330 core +layout(location=0) in vec3 aPos; +layout(location=1) in vec3 aNormal; + +uniform mat4 View; +uniform mat4 Model; +uniform mat4 Projection; + +out vec3 Normal; +out vec3 Position; + +void main() { + // @note: This is the calculation for getting the normal vector + // one that is unaffected by non-uniform scaling that is. + // look at the lighting chapter in learnopengl.com to understand this more + Normal = mat3(transpose(inverse(Model))) * aNormal; + Position = vec3(Model * vec4(aPos, 1.0)); + gl_Position = Projection * View * Model * vec4(aPos, 1.0); +}; + -- cgit v1.2.3