summaryrefslogtreecommitdiff
path: root/source/lessons/instanced_rendering
diff options
context:
space:
mode:
Diffstat (limited to 'source/lessons/instanced_rendering')
-rw-r--r--source/lessons/instanced_rendering/basic quads/main.cpp833
-rw-r--r--source/lessons/instanced_rendering/basic quads/math.h413
-rw-r--r--source/lessons/instanced_rendering/basic quads/shaders/cubemap.fs.glsl9
-rw-r--r--source/lessons/instanced_rendering/basic quads/shaders/cubemap.vs.glsl14
-rw-r--r--source/lessons/instanced_rendering/basic quads/shaders/depth_test.fs.glsl30
-rw-r--r--source/lessons/instanced_rendering/basic quads/shaders/depth_test.vs.glsl21
-rw-r--r--source/lessons/instanced_rendering/basic quads/shaders/fbo.fs.glsl72
-rw-r--r--source/lessons/instanced_rendering/basic quads/shaders/fbo.vs.glsl10
-rw-r--r--source/lessons/instanced_rendering/basic quads/shaders/instancing.fs.glsl9
-rw-r--r--source/lessons/instanced_rendering/basic quads/shaders/instancing.vs.glsl16
-rw-r--r--source/lessons/instanced_rendering/basic quads/shaders/refl.fs.glsl14
-rw-r--r--source/lessons/instanced_rendering/basic quads/shaders/refl.vs.glsl16
-rw-r--r--source/lessons/instanced_rendering/basic quads/shaders/refr.fs.glsl15
-rw-r--r--source/lessons/instanced_rendering/basic quads/shaders/refr.vs.glsl20
-rw-r--r--source/lessons/instanced_rendering/planetary models/main.cpp935
-rw-r--r--source/lessons/instanced_rendering/planetary models/math.h413
-rw-r--r--source/lessons/instanced_rendering/planetary models/shaders/cubemap.fs.glsl9
-rw-r--r--source/lessons/instanced_rendering/planetary models/shaders/cubemap.vs.glsl14
-rw-r--r--source/lessons/instanced_rendering/planetary models/shaders/depth_test.fs.glsl30
-rw-r--r--source/lessons/instanced_rendering/planetary models/shaders/depth_test.vs.glsl21
-rw-r--r--source/lessons/instanced_rendering/planetary models/shaders/fbo.fs.glsl72
-rw-r--r--source/lessons/instanced_rendering/planetary models/shaders/fbo.vs.glsl10
-rw-r--r--source/lessons/instanced_rendering/planetary models/shaders/instanced_model.vs.glsl29
-rw-r--r--source/lessons/instanced_rendering/planetary models/shaders/instancing.fs.glsl9
-rw-r--r--source/lessons/instanced_rendering/planetary models/shaders/instancing.vs.glsl16
-rw-r--r--source/lessons/instanced_rendering/planetary models/shaders/model.fs.glsl150
-rw-r--r--source/lessons/instanced_rendering/planetary models/shaders/model.vs.glsl28
-rw-r--r--source/lessons/instanced_rendering/planetary models/shaders/refl.fs.glsl14
-rw-r--r--source/lessons/instanced_rendering/planetary models/shaders/refl.vs.glsl16
-rw-r--r--source/lessons/instanced_rendering/planetary models/shaders/refr.fs.glsl15
-rw-r--r--source/lessons/instanced_rendering/planetary models/shaders/refr.vs.glsl20
31 files changed, 3293 insertions, 0 deletions
diff --git a/source/lessons/instanced_rendering/basic quads/main.cpp b/source/lessons/instanced_rendering/basic quads/main.cpp
new file mode 100644
index 0000000..d1aedc3
--- /dev/null
+++ b/source/lessons/instanced_rendering/basic quads/main.cpp
@@ -0,0 +1,833 @@
+#include <stdio.h>
+#include <SDL2/SDL.h>
+#include <glad/glad.h>
+#include <assimp/Importer.hpp>
+#include <assimp/scene.h>
+#include <assimp/postprocess.h>
+#include <vector>
+
+#define STB_IMAGE_IMPLEMENTATION
+#include "stb_image.h"
+
+/* @lookup:
+* - understand kernals, how they work and how they affect post processing
+* - Check to see why it is necessary to do glBindTexture()
+* - understand the difference between binding textures, and activating a texture unit
+* - I do not understand how floating point numbers work, so I should probably look into that.
+* - The normal matrix calculation in the fragment shader for the object affected by light has been mainly copied.
+* I have tried to understand the formula, and whilst it made some sense, it is not fully clear to me, and I cannot picture it yet.
+* Revisit the derivation for the normal matrix some time in the future.
+* - Lookup the derivation of the formula for reflecting a vector about a normal. I am doing that for specular lighting, but the learnopengl tutorial
+* just uses a glsl reflect formula, and at the time of writing it is also very late so I am not in the mood or position to look into it at present.
+* - One of the things I have observed with specular lights is that the circle/specular highlight follows the camera (me) when I move. I would like to figure
+* out a way by which this does not happen and it remains fixed on the object, at the angle at which it hits. All of this will be made complicated by the fact
+* that ofcourse everything is actually happening from the cameras' perspective. I would still love to figure this out.
+*/
+
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+
+typedef int8_t s8;
+typedef int16_t s16;
+typedef int32_t s32;
+typedef int64_t s64;
+
+typedef float r32;
+typedef double r64;
+
+typedef u8 b8;
+
+#include "math.h"
+
+// =========== Shader Loading =============
+
+unsigned int gl_create_vertex_shader(char* vertex_shader_source)
+{
+ unsigned int vertex_shader = glCreateShader(GL_VERTEX_SHADER);
+ glShaderSource(vertex_shader, 1, &vertex_shader_source, NULL);
+ glCompileShader(vertex_shader);
+
+ int success;
+ char info_log[512];
+ glGetShaderiv(vertex_shader, GL_COMPILE_STATUS, &success);
+ if (!success)
+ {
+ glGetShaderInfoLog(vertex_shader, 512, NULL, info_log);
+ printf("================================\n");
+ printf("vertex shader compilation failed:\n%s\n", info_log);
+ }
+
+ return vertex_shader;
+}
+
+unsigned int gl_create_fragment_shader(char* fragment_shader_source)
+{
+ unsigned int fragment_shader = glCreateShader(GL_FRAGMENT_SHADER);
+ glShaderSource(fragment_shader, 1, &fragment_shader_source, NULL);
+ glCompileShader(fragment_shader);
+
+ int success;
+ char info_log[512];
+ glGetShaderiv(fragment_shader, GL_COMPILE_STATUS, &success);
+ if (!success)
+ {
+ glGetShaderInfoLog(fragment_shader, 512, NULL, info_log);
+ printf("================================\n");
+ printf("fragment shader compilation failed:\n%s\n", info_log);
+ }
+
+ return fragment_shader;
+}
+
+unsigned int gl_create_shader_program(unsigned int vertex_shader, unsigned int fragment_shader)
+{
+ unsigned int shader_program = glCreateProgram();
+
+ glAttachShader(shader_program, vertex_shader);
+ glAttachShader(shader_program, fragment_shader);
+ glLinkProgram(shader_program);
+
+ int success;
+ char info_log[512];
+ glGetProgramiv(shader_program, GL_LINK_STATUS, &success);
+ if (!success)
+ {
+ glGetProgramInfoLog(shader_program, 512, NULL, info_log);
+ printf("================================\n");
+ printf("shader program linking failed:\n%s\n", info_log);
+ }
+
+ glDeleteShader(vertex_shader);
+ glDeleteShader(fragment_shader);
+
+ return shader_program;
+}
+
+unsigned int gl_shader_program(char* vertex_shader_source, char* fragment_shader_source)
+{
+ unsigned int vertex_shader = gl_create_vertex_shader(vertex_shader_source);
+ unsigned int fragment_shader = gl_create_fragment_shader(fragment_shader_source);
+ unsigned int shader_program = gl_create_shader_program(vertex_shader, fragment_shader);
+
+ return shader_program;
+}
+
+Mat4 camera_create4m(Vec3 camera_pos, Vec3 camera_look, Vec3 camera_up)
+{
+ // @note: We do this because this allows the camera to have the axis it looks at
+ // inwards be the +z axis.
+ // If we did not do this, then the inward axis the camera looks at would be negative.
+ // I am still learning from learnopengl.com but I imagine that this was done for conveniences' sake.
+ Vec3 camera_forward_dir = normalize3v(subtract3v(camera_pos, camera_look));
+ Vec3 camera_right_dir = normalize3v(cross_multiply3v(camera_up, camera_forward_dir));
+ Vec3 camera_up_dir = normalize3v(cross_multiply3v(camera_forward_dir, camera_right_dir));
+
+ Mat4 res = lookat4m(camera_up_dir, camera_forward_dir, camera_right_dir, camera_pos);
+
+ return res;
+}
+
+Vec3 camera_look_around(r32 angle_pitch, r32 angle_yaw)
+{
+ Vec3 camera_look = {0.0};
+ camera_look.x = cosf(angle_yaw) * cosf(angle_pitch);
+ camera_look.y = sinf(angle_pitch);
+ camera_look.z = sinf(angle_yaw) * cosf(angle_pitch);
+ camera_look = normalize3v(camera_look);
+
+ return camera_look;
+}
+
+s32 gl_load_texture(u32 texture_id, const char* path)
+{
+ s32 width, height, nrChannels;
+ unsigned char *data = stbi_load(path, &width, &height, &nrChannels, 0);
+ if (data)
+ {
+ GLenum format;
+ if (nrChannels == 1)
+ format = GL_RED;
+ else if (nrChannels == 3)
+ format = GL_RGB;
+ else if (nrChannels == 4)
+ format = GL_RGBA;
+
+ glBindTexture(GL_TEXTURE_2D, texture_id);
+ glTexImage2D(GL_TEXTURE_2D, 0, format, width, height, 0, format, GL_UNSIGNED_BYTE, data);
+ glGenerateMipmap(GL_TEXTURE_2D);
+
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ stbi_image_free(data);
+ }
+ else
+ {
+ printf("failed to load image texture at path: %s", path);
+ stbi_image_free(data);
+ }
+
+ return texture_id;
+}
+
+// =================== Model Loading ========================
+// This section contains a whole host of things:
+// 1. classes
+// 2. std::vectors
+// 3. std::strings
+// that I have only used as a glue for I did not know if I had the model loading setup properly.
+// @todo: replace these things eventually. For now the goal is to complete learnopengl
+
+s32 TextureFromFile(const char* filepath, std::string directory)
+{
+ // @note: this function is stupid as it already became outdated as I needed to tweak the parameters
+ // for wrapping. Either those become function parameters (Which makes sense I guess) or I look at
+ // exactly what steps I am reusing and just make that a function so the function is called fewer times.
+ //
+ // I am guessing this won't look good from a design point of view for all those jobs and postings, even if
+ // this may be the simpler and faster thing to do, albeit at the cost of typing.
+ std::string filename = std::string(filepath);
+ filename = directory + '/' + filename;
+
+ u32 texid;
+ glGenTextures(1, &texid);
+
+ s32 width, height, nrChannels;
+ unsigned char *data = stbi_load(filename.c_str(), &width, &height, &nrChannels, 0);
+ if (data)
+ {
+ GLenum format;
+ if (nrChannels == 1)
+ format = GL_RED;
+ else if (nrChannels == 3)
+ format = GL_RGB;
+ else if (nrChannels == 4)
+ format = GL_RGBA;
+
+ glBindTexture(GL_TEXTURE_2D, texid);
+ glTexImage2D(GL_TEXTURE_2D, 0, format, width, height, 0, format, GL_UNSIGNED_BYTE, data);
+ glGenerateMipmap(GL_TEXTURE_2D);
+
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+
+ stbi_image_free(data);
+ }
+ else
+ {
+ printf("failed to load image texture at path: %s", filepath);
+ stbi_image_free(data);
+ }
+
+ return texid;
+}
+
+enum TextureType { TextureDiffuse=0, TextureSpecular };
+
+struct Vertex {
+ Vec3 position;
+ Vec3 normal;
+ Vec2 texture;
+};
+
+struct Texture {
+ u32 id;
+ enum TextureType type;
+ std::string fname;
+};
+
+class Mesh {
+ public:
+ std::vector<Vertex> vertices;
+ std::vector<u32> indices;
+ std::vector<Texture> textures;
+
+ u32 vao;
+ u32 vbo;
+ u32 ebo;
+
+ Mesh(std::vector<Vertex> vertices, std::vector<u32> indices, std::vector<Texture> textures)
+ {
+ this->vertices = vertices;
+ this->indices = indices;
+ this->textures = textures;
+
+ // setup mesh shader stuff
+ glGenVertexArrays(1, &vao);
+ glGenBuffers(1, &vbo);
+ glGenBuffers(1, &ebo);
+
+ glBindVertexArray(vao);
+
+ glBindBuffer(GL_ARRAY_BUFFER, vbo);
+ glBufferData(GL_ARRAY_BUFFER, this->vertices.size() * sizeof(struct Vertex), &(this->vertices[0]), GL_STATIC_DRAW);
+
+ glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ebo);
+ glBufferData(GL_ELEMENT_ARRAY_BUFFER, this->indices.size() * sizeof(u32), &(this->indices[0]), GL_STATIC_DRAW);
+
+ // position
+ glEnableVertexAttribArray(0);
+ glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)0);
+ // normal
+ glEnableVertexAttribArray(1);
+ glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)offsetof(Vertex, normal));
+ // texture
+ glEnableVertexAttribArray(2);
+ glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)offsetof(Vertex, texture));
+
+ glBindVertexArray(0);
+ }
+
+ void draw(u32 shader_program)
+ {
+ glUseProgram(shader_program);
+
+ u32 diffuse_num = 1;
+ u32 specular_num = 1;
+ char tex_unit_name[64];
+ // set shininess
+ s32 mat_shine_loc = glGetUniformLocation(shader_program, "material.shininess");
+ glUniform1f(mat_shine_loc, 32.0f);
+
+ for (u32 i=0; i<textures.size(); i++)
+ {
+ struct Texture curr_tex = textures[i];
+ if (curr_tex.type == TextureDiffuse)
+ {
+ sprintf(tex_unit_name, "material.diffuse[%i]", diffuse_num);
+ }
+ else if (curr_tex.type == TextureSpecular)
+ {
+ sprintf(tex_unit_name, "material.diffuse[%i]", specular_num);
+ }
+
+ glActiveTexture(GL_TEXTURE0 + i);
+ s32 tex_unit_loc = glGetUniformLocation(shader_program, tex_unit_name);
+ glUniform1i(tex_unit_loc, i);
+ glBindTexture(GL_TEXTURE_2D, curr_tex.id);
+ }
+ glActiveTexture(GL_TEXTURE0);
+
+ glBindVertexArray(vao);
+ glDrawElements(GL_TRIANGLES, indices.size(), GL_UNSIGNED_INT, 0);
+ glBindVertexArray(0);
+ }
+};
+
+class Model
+{
+ public:
+ Model(std::string path)
+ {
+ load_model(path);
+ }
+ void draw(u32 shader_program);
+ private:
+ std::vector<Texture> loaded_textures;
+ std::vector<Mesh> meshes;
+ std::string directory;
+
+ void load_model(std::string path);
+ void process_node(aiNode *node, const aiScene *scene);
+ Mesh process_mesh(aiMesh *mesh, const aiScene *scene);
+ std::vector<Texture> load_material_textures(aiMaterial *mat, aiTextureType type, TextureType type_name);
+};
+
+void Model::draw(u32 shader_program)
+{
+ for (int i=0; i < meshes.size(); i++)
+ {
+ meshes[i].draw(shader_program);
+ }
+}
+
+void Model::load_model(std::string path)
+{
+ Assimp::Importer import;
+ const aiScene *scene = import.ReadFile(path, aiProcess_Triangulate | aiProcess_FlipUVs);
+
+ if (!scene || scene->mFlags & AI_SCENE_FLAGS_INCOMPLETE || !scene->mRootNode)
+ {
+ printf("error loading model :%s\n", import.GetErrorString());
+ return;
+ }
+
+ directory = path.substr(0, path.find_last_of('/'));
+ process_node(scene->mRootNode, scene);
+}
+
+void Model::process_node(aiNode *node, const aiScene *scene)
+{
+ for (int i=0; i < node->mNumMeshes; i++)
+ {
+ aiMesh *mesh = scene->mMeshes[node->mMeshes[i]];
+ meshes.push_back(process_mesh(mesh, scene));
+ }
+
+ for (int i=0; i<node->mNumChildren; i++)
+ {
+ process_node(node->mChildren[i], scene);
+ }
+}
+
+Mesh Model::process_mesh(aiMesh *mesh, const aiScene *scene)
+{
+ std::vector<Vertex> vertices;
+ std::vector<u32> indices;
+ std::vector<Texture> textures;
+
+ for (u32 i=0; i < mesh->mNumVertices; i++)
+ {
+ Vec3 position;
+ position.x = mesh->mVertices[i].x;
+ position.y = mesh->mVertices[i].y;
+ position.z = mesh->mVertices[i].z;
+
+ Vec3 normal;
+ normal.x = mesh->mNormals[i].x;
+ normal.y = mesh->mNormals[i].y;
+ normal.z = mesh->mNormals[i].z;
+
+ Vec2 texture = {0, 0};
+ if (mesh->mTextureCoords[0])
+ {
+ texture.x = mesh->mTextureCoords[0][i].x;
+ texture.y = mesh->mTextureCoords[0][i].y;
+ }
+
+ struct Vertex vertex;
+ vertex.position = position;
+ vertex.normal = normal;
+ vertex.texture = texture;
+
+ vertices.push_back(vertex);
+ }
+ // process indices
+ for (u32 i = 0; i < mesh->mNumFaces; i++)
+ {
+ aiFace face = mesh->mFaces[i];
+ for(u32 j = 0; j < face.mNumIndices; j++)
+ {
+ indices.push_back(face.mIndices[j]);
+ }
+ }
+ // process material
+ if (mesh->mMaterialIndex >= 0)
+ {
+ aiMaterial *material = scene->mMaterials[mesh->mMaterialIndex];
+ std::vector<Texture> diffuse_maps = load_material_textures(material, aiTextureType_DIFFUSE, TextureDiffuse);
+ textures.insert(textures.end(), diffuse_maps.begin(), diffuse_maps.end());
+ std::vector<Texture> specular_maps = load_material_textures(material, aiTextureType_SPECULAR, TextureSpecular);
+ textures.insert(textures.end(), specular_maps.begin(), specular_maps.end());
+ }
+
+ return Mesh(vertices, indices, textures);
+}
+
+std::vector<Texture> Model::load_material_textures(aiMaterial *mat, aiTextureType type, TextureType tex_type)
+{
+ std::vector<Texture> textures;
+ for(u32 i=0; i<mat->GetTextureCount(type); i++)
+ {
+ bool load_texture = true;
+ aiString str;
+ mat->GetTexture(type, i, &str);
+ const char* fname = str.C_Str();
+
+ for (s32 j=0; j<loaded_textures.size(); j++)
+ {
+ if (std::strcmp(loaded_textures[j].fname.data(), fname) == 0)
+ {
+ load_texture = false;
+ textures.push_back(loaded_textures[j]);
+ break;
+ }
+ }
+ if (load_texture)
+ {
+ Texture texture;
+ texture.id = TextureFromFile(fname, directory);
+ texture.type = tex_type;
+ texture.fname = std::string(fname);
+ textures.push_back(texture);
+ loaded_textures.push_back(texture);
+ }
+ }
+
+ return textures;
+}
+
+class Shader {
+ // @note: this is a draft, I think frankly, it's a stupid idea to be making this at this point
+ // but my goal is to look at my code at this stage (which is in the second half of (or so I think)
+ // learnopengl and identify repeated code that I think I can yank out and can make convenient to write.
+ // The precondition for all of this is that I do not remodel the program based off of some vague idea of
+ // cleanliness in my head. This is all still very procedural, I just want to minimize the amount I type
+ // and at the same time see how well I can identify good abstractions
+ //
+ //
+ // I much prefer to have things be not a class, especially if I look at how I did my lovely
+ // math functions, which are so simple and straightforward
+ public:
+ u32 id;
+
+ // @note: all well and good until you get compute shaders
+ // then the entire thing shits the bed
+ Shader(char* vertex_shader_source, char* fragment_shader_source) {
+ id = gl_shader_program(vertex_shader_source, fragment_shader_source);
+ }
+
+ void use() {
+ glUseProgram(id);
+ }
+
+ void draw_triangles(u32 vao, u32 count) {
+ glBindVertexArray(vao);
+ glDrawArrays(GL_TRIANGLES, 0, count);
+ }
+
+ void set_1i(char* variable, s32 value) {
+ s32 loc = glGetUniformLocation(id, variable);
+ glUniform1i(loc, value);
+ }
+
+ void set_matrix4fv(char* variable, Mat4 value, int count) {
+ s32 loc = glGetUniformLocation(id, variable);
+ glUniformMatrix4fv(loc, count, GL_TRUE, value.buffer);
+ };
+
+ ~Shader() {
+ // @note: this can literally be replaced by another function that goes over the entire list
+ // of shader_programs near the program exit and deletes them, if I even need to do that.
+ glDeleteProgram(id);
+ }
+};
+
+int main(int argc, char* argv[])
+{
+
+ // ============ END ============
+ int width = 1024;
+ int height = 768;
+
+ if (SDL_Init(SDL_INIT_VIDEO) != 0)
+ {
+ printf("Error initialising SDL2: %s\n", SDL_GetError());
+ return 0;
+ };
+
+ // set opengl version and profile
+ SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 3);
+ SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 3);
+ SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
+ SDL_GL_SetAttribute( SDL_GL_STENCIL_SIZE, 8 );
+
+ // initialise window with opengl flag
+ SDL_Window* window = SDL_CreateWindow("SDL Test",
+ 50,
+ 50,
+ width,
+ height,
+ SDL_WINDOW_OPENGL);
+
+ SDL_SetRelativeMouseMode(SDL_TRUE);
+
+ // create an opengl context
+ SDL_GLContext context = SDL_GL_CreateContext(window);
+ if (!context)
+ {
+ printf("OpenGL context creation failed: %s\n", SDL_GetError());
+ return -1;
+ }
+
+
+ // load glad
+ if (!gladLoadGLLoader((GLADloadproc)SDL_GL_GetProcAddress)) {
+ printf("Failed to initialize Glad\n");
+ return 1;
+ }
+
+ // filesystem playground stuff
+ size_t read_count;
+ char* vertex_source = (char*)SDL_LoadFile("./source/shaders/instancing.vs.glsl", &read_count);
+ char* fragment_source = (char*)SDL_LoadFile("./source/shaders/instancing.fs.glsl", &read_count);
+
+ u32 shader_program = gl_shader_program(vertex_source, fragment_source);
+
+ stbi_set_flip_vertically_on_load(1);
+
+ float inst_vertices[] = {
+ // positions // texture Coords (note we set these higher than 1 (together with GL_REPEAT as texture wrapping mode). this will cause the floor texture to repeat)
+ // BottomFace
+ -0.5f, -1.0f, -0.5f, // top-right
+ 0.5f, -1.0f, -0.5f, // top-left
+ 0.5f, -1.0f, 0.5f, // bottom-left
+ 0.5f, -1.0f, 0.5f, // bottom-left
+ -0.5f, -1.0f, 0.5f, // bottom-right
+ -0.5f, -1.0f, -0.5f, // top-right
+ // Top face
+ -0.5f, -0.9f, -0.5f, // top-left
+ 0.5f, -0.9f, 0.5f, // bottom-right
+ 0.5f, -0.9f, -0.5f, // top-right
+ 0.5f, -0.9f, 0.5f, // bottom-right
+ -0.5f, -0.9f, -0.5f, // top-left
+ -0.5f, -0.9f, 0.5f // bottom-left
+ };
+
+ Mat4 offsets[10000];
+ u32 max_r = 100;
+ u32 max_c = 100;
+ for (u32 r = 0; r < max_r; r++)
+ {
+ for (u32 c = 0; c < max_c; c++)
+ {
+ offsets[(r*max_r) + c] = to_col_major4m(translation_matrix4m((r32)(r + r), 0.0f, (r32)(c + c)));
+ }
+ }
+
+ u32 vao, vbo, instance_vbo;
+
+ glGenVertexArrays(1, &vao);
+ glGenBuffers(1, &vbo);
+
+ glBindVertexArray(vao);
+ glBindBuffer(GL_ARRAY_BUFFER, vbo);
+ glBufferData(GL_ARRAY_BUFFER, sizeof(inst_vertices), &inst_vertices, GL_STATIC_DRAW);
+ glEnableVertexAttribArray(0);
+ glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(float), (void*)0);
+
+ glGenBuffers(1, &instance_vbo);
+ glBindBuffer(GL_ARRAY_BUFFER, instance_vbo);
+ u32 offset_sz = sizeof(offsets);
+ glBufferData(GL_ARRAY_BUFFER, offset_sz, &(offsets[0].buffer), GL_STATIC_DRAW);
+ glEnableVertexAttribArray(1);
+ glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 16 * sizeof(u32), (void*)0);
+ glEnableVertexAttribArray(1);
+ glVertexAttribPointer(2, 4, GL_FLOAT, GL_FALSE, 16 * sizeof(u32), (void*)(4*sizeof(u32)));
+ glEnableVertexAttribArray(2);
+ glVertexAttribPointer(3, 4, GL_FLOAT, GL_FALSE, 16 * sizeof(u32), (void*)(8*sizeof(u32)));
+ glEnableVertexAttribArray(3);
+ glVertexAttribPointer(4, 4, GL_FLOAT, GL_FALSE, 16 * sizeof(u32), (void*)(12*sizeof(u32)));
+ glEnableVertexAttribArray(4);
+ glBindBuffer(GL_ARRAY_BUFFER, 0);
+ glVertexAttribDivisor(1, 1);
+ glVertexAttribDivisor(2, 1);
+ glVertexAttribDivisor(3, 1);
+ glVertexAttribDivisor(4, 1);
+ glBindVertexArray(0);
+
+ // uniform buffer objects
+ u32 ubo_camera_block;
+ glGenBuffers(1, &ubo_camera_block);
+ glBindBuffer(GL_UNIFORM_BUFFER, ubo_camera_block);
+ glBufferData(GL_UNIFORM_BUFFER, 128, NULL, GL_STATIC_DRAW);
+ glBindBuffer(GL_UNIFORM_BUFFER, 0);
+
+ // objects
+ Vec3 model_translations[] = {
+ Vec3{ 0.5, 0.0, 0.0}, // 0: origin square
+ Vec3{ -1.0, 0.0, -1.0}, // 1: plane
+ Vec3{ -1.0, 0.0, -0.5}, // 2: window between squares
+ Vec3{ 0.0, 0.0, 3.0}, // 3: window infront of origin square
+ Vec3{ -2.5, 0.0, -0.5}, // 4: square to the left
+ Vec3{ -1.0, 0.0, -1.5}, // 5: random square behind window between squares
+ Vec3{ -1.0, 0.0, -8.0}, // 6: reflective plane
+ Vec3{ -1.0, 2.0, -8.0}, // 6: refractive "window"
+ };
+
+ r32 FOV = 90.0;
+ r32 time_curr;
+ r32 time_prev = SDL_GetTicks64() / 100.0;
+ // camera stuff
+ Vec3 camera_pos = Vec3{ 0.0, 0.0, 10.0f};
+ Vec3 preset_up_dir = Vec3{ 0.0, 1.0, 0.0 };
+
+ r32 angle_yaw, angle_pitch, angle_roll;
+ angle_pitch = (r32)To_Radian(0.0f);
+ angle_yaw = (r32)-To_Radian(90.0f);
+
+ Vec3 camera_look = camera_look_around(angle_pitch, angle_yaw);
+
+ // @todo: remove this, I dont like this and think that this is unnecessary
+ Vec3 camera_look_increment;
+ r32 camera_speed = 2.5f;
+
+ Mat4 view = camera_create4m(camera_pos, camera_look, preset_up_dir);
+
+ Mat4 proj = perspective4m((r32)To_Radian(90.0), (r32)width / (r32)height, 0.1f, 1000.0f);
+
+ u32 block_binding = 0;
+ u32 matrices_ind;
+ matrices_ind = glGetUniformBlockIndex(shader_program, "Matrices");
+ glUniformBlockBinding(shader_program, matrices_ind, block_binding);
+ // or glBindBufferBase();
+ glBindBufferRange(GL_UNIFORM_BUFFER, block_binding, ubo_camera_block, 0, 128);
+
+ Mat4 col_major_proj = to_col_major4m(proj);
+ glBindBuffer(GL_UNIFORM_BUFFER, ubo_camera_block);
+ glBufferSubData(GL_UNIFORM_BUFFER, 64, 64, col_major_proj.buffer);
+ glBindBuffer(GL_UNIFORM_BUFFER, 0);
+
+ glEnable(GL_DEPTH_TEST);
+ glEnable(GL_CULL_FACE);
+ glCullFace(GL_BACK);
+
+ u8 game_running = true;
+
+ u8 hold_lshift = false;
+ u8 move_w = false;
+ u8 move_a = false;
+ u8 move_s = false;
+ u8 move_d = false;
+
+ while(game_running)
+ {
+
+ // frame delta
+ time_curr = SDL_GetTicks64() / 100.0;
+ r32 time_delta = time_curr - time_prev;
+
+ r32 camera_speed_adjusted = time_delta * camera_speed;
+ camera_look_increment = scaler_multiply3v(camera_look, camera_speed_adjusted);
+
+ SDL_Event ev;
+ while(SDL_PollEvent(&ev))
+ {
+
+ // INPUT
+ switch (ev.type)
+ {
+ case (SDL_QUIT):
+ {
+ game_running = false;
+ } break;
+ case (SDL_KEYDOWN):
+ {
+ if (ev.key.keysym.sym == SDLK_LSHIFT)
+ {
+ hold_lshift = true;
+ }
+ if (ev.key.keysym.sym == SDLK_w)
+ {
+ move_w = true;
+ }
+ if (ev.key.keysym.sym == SDLK_s)
+ {
+ move_s = true;
+ }
+ if (ev.key.keysym.sym == SDLK_a)
+ {
+ move_a = true;
+ }
+ if (ev.key.keysym.sym == SDLK_d)
+ {
+ move_d = true;
+ }
+ } break;
+ case (SDL_KEYUP):
+ {
+ if (ev.key.keysym.sym == SDLK_LSHIFT)
+ {
+ hold_lshift = false;
+ }
+ if (ev.key.keysym.sym == SDLK_w)
+ {
+ move_w = false;
+ }
+ if (ev.key.keysym.sym == SDLK_s)
+ {
+ move_s = false;
+ }
+ if (ev.key.keysym.sym == SDLK_a)
+ {
+ move_a = false;
+ }
+ if (ev.key.keysym.sym == SDLK_d)
+ {
+ move_d = false;
+ }
+ } break;
+ case (SDL_MOUSEMOTION):
+ {
+ SDL_MouseMotionEvent mouse_event = ev.motion;
+ r32 x_motion = (r32)mouse_event.xrel;
+ r32 y_motion = (r32)mouse_event.yrel;
+ if (x_motion != 0.0 || y_motion != 0.0)
+ {
+ angle_yaw = angle_yaw + To_Radian(x_motion * 0.1f);
+ angle_pitch = clampf(angle_pitch + To_Radian(-y_motion * 0.1f), To_Radian(-89.0f), To_Radian(89.0f));
+
+ camera_look = camera_look_around(angle_pitch, angle_yaw);
+ }
+ } break;
+ default:
+ {
+ break;
+ }
+ }
+ }
+
+ // PROCESS
+ if (move_w)
+ {
+ camera_pos = add3v(camera_pos, camera_look_increment);
+ }
+ if (move_s)
+ {
+ camera_pos = subtract3v(camera_pos, camera_look_increment);
+ }
+ if (move_a)
+ {
+ Vec3 camera_right = normalize3v(cross_multiply3v(preset_up_dir, camera_look));
+ Vec3 camera_right_scaled = scaler_multiply3v(camera_right, camera_speed_adjusted);
+ camera_pos = add3v(camera_pos, camera_right_scaled);
+ }
+ if (move_d)
+ {
+ Vec3 camera_right = normalize3v(cross_multiply3v(preset_up_dir, camera_look));
+ Vec3 camera_right_scaled = scaler_multiply3v(camera_right, camera_speed_adjusted);
+ camera_pos = subtract3v(camera_pos, camera_right_scaled);
+ }
+ view = camera_create4m(camera_pos, add3v(camera_pos, camera_look), preset_up_dir);
+
+ // object shader program stuff
+ Mat4 col_major_view = to_col_major4m(view);
+ glBindBuffer(GL_UNIFORM_BUFFER, ubo_camera_block);
+ glBufferSubData(GL_UNIFORM_BUFFER, 0, 64, col_major_view.buffer);
+ glBindBuffer(GL_UNIFORM_BUFFER, 0);
+
+ time_prev = time_curr;
+
+ // OUTPUT
+ glClearColor(1.0f, 0.6f, .6f, 1.0f);
+ glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
+
+ {
+ glUseProgram(shader_program);
+ // u32 model_loc = glGetUniformLocation(shader_program, "Model");
+ // Vec3 translation = model_translations[0];
+ // Mat4 transform = translation_matrix4m(translation.x, translation.y, translation.z);
+ // glUniformMatrix4fv(model_loc, 1, GL_TRUE, transform.buffer);
+ glBindVertexArray(vao);
+ glDrawArraysInstanced(GL_TRIANGLES, 0, 12, 10000);
+ }
+
+ SDL_GL_SwapWindow(window);
+ }
+
+ // opengl free calls
+ glDeleteVertexArrays(1, &vao);
+ glDeleteBuffers(1, &vbo);
+ glDeleteProgram(shader_program);
+
+ // sdl free calls
+ SDL_GL_DeleteContext(context);
+ SDL_DestroyWindow(window);
+ SDL_Quit();
+ return 0;
+}
diff --git a/source/lessons/instanced_rendering/basic quads/math.h b/source/lessons/instanced_rendering/basic quads/math.h
new file mode 100644
index 0000000..fda6bf8
--- /dev/null
+++ b/source/lessons/instanced_rendering/basic quads/math.h
@@ -0,0 +1,413 @@
+#ifndef MATH_H
+#define MATH_H
+
+#define PI 3.14159265358979323846264338327950288f
+#define Square(x) ((x)*(x))
+#define To_Radian(x) ((x) * PI / 180.0f)
+#define To_Degree(x) ((x) * 180.0f / PI)
+
+// @notes:
+// I dislike the mat4 discrepancy in the raw data. It's called buffer here while everywhere else it's called
+// data. It's weird and I hate it.
+
+r32 clampf(r32 x, r32 bottom, r32 top)
+{
+ if (x < bottom)
+ {
+ x = bottom;
+ }
+ else if (x > top)
+ {
+ x = top;
+ }
+
+ return x;
+}
+
+// ==== Vector Math ====
+
+union Vec2 {
+ struct {
+ r32 x;
+ r32 y;
+ };
+ r32 data[2];
+};
+
+union Vec3 {
+ struct {
+ r32 x;
+ r32 y;
+ r32 z;
+ };
+ r32 data[3];
+};
+
+union Vec4 {
+ struct {
+ r32 x;
+ r32 y;
+ r32 z;
+ r32 w;
+ };
+ r32 data[4];
+};
+
+union Mat4 {
+ Vec4 xyzw[4];
+ r32 data[4][4];
+ r32 buffer[16];
+};
+
+// ========================================================== Vec3 ==========================================================
+
+Vec3 init3v(r32 x, r32 y, r32 z)
+{
+ Vec3 res;
+ res.x = x;
+ res.y = y;
+ res.z = z;
+
+ return res;
+}
+
+Vec3 scaler_add3v(Vec3 vec, r32 scaler)
+{
+ Vec3 res;
+ res.x = vec.x + scaler;
+ res.y = vec.y + scaler;
+ res.z = vec.z + scaler;
+
+ return res;
+}
+
+Vec3 scaler_multiply3v(Vec3 vec, r32 scaler)
+{
+ Vec3 res;
+ res.x = vec.x * scaler;
+ res.y = vec.y * scaler;
+ res.z = vec.z * scaler;
+
+ return res;
+}
+
+Vec3 scaler_divide3v(Vec3 vec, r32 scaler)
+{
+ Vec3 res;
+ res.x = vec.x / scaler;
+ res.y = vec.y / scaler;
+ res.z = vec.z / scaler;
+
+ return res;
+}
+
+
+Vec3 add3v(Vec3 a, Vec3 b)
+{
+ Vec3 res;
+ res.x = a.x + b.x;
+ res.y = a.y + b.y;
+ res.z = a.z + b.z;
+
+ return res;
+}
+
+Vec3 subtract3v(Vec3 a, Vec3 b)
+{
+ Vec3 res;
+ res.x = a.x - b.x;
+ res.y = a.y - b.y;
+ res.z = a.z - b.z;
+
+ return res;
+}
+
+r32 dot_multiply3v(Vec3 a, Vec3 b)
+{
+ r32 x = a.x * b.x;
+ r32 y = a.y * b.y;
+ r32 z = a.z * b.z;
+
+ r32 res = x + y + z;
+
+ return res;
+}
+
+r32 magnitude3v(Vec3 vec)
+{
+ r32 res = sqrtf(Square(vec.x) + Square(vec.y) + Square(vec.z));
+ return res;
+}
+
+Vec3 normalize3v(Vec3 vec)
+{
+ r32 magnitude = magnitude3v(vec);
+ Vec3 res = scaler_divide3v(vec, magnitude);
+ return res;
+}
+
+#ifndef FUN_CALCS
+r32 angle3v(Vec3 a, Vec3 b)
+{
+ Vec3 a_norm = normalize3v(a);
+ Vec3 b_norm = normalize3v(b);
+
+ r32 dot_product = dot_multiply3v(a_norm, b_norm);
+ r32 res = acosf(dot_product);
+
+ return res;
+}
+#endif
+
+Vec3 cross_multiply3v(Vec3 a, Vec3 b)
+{
+ Vec3 res;
+ res.x = (a.y * b.z) - (a.z * b.y);
+ res.y = (a.z * b.x) - (a.x * b.z);
+ res.z = (a.x * b.y) - (a.y * b.x);
+
+ return res;
+}
+
+// ============================================== Vec4, Mat4 ==============================================
+
+Vec4 init4v(r32 x, r32 y, r32 z, r32 w)
+{
+ Vec4 res;
+ res.x = x;
+ res.y = y;
+ res.z = z;
+ res.w = w;
+
+ return res;
+}
+
+Mat4 init_value4m(r32 value)
+{
+ Mat4 res = {0};
+ res.data[0][0] = value;
+ res.data[1][1] = value;
+ res.data[2][2] = value;
+ res.data[3][3] = value;
+
+ return res;
+}
+
+// @note: These operations are just defined and not expressed. They are kept here for completeness sake BUT
+// since I have not had to do anything related to these, I have not created them.
+Vec4 scaler_add4v(Vec4 vec, r32 scaler);
+Vec4 scaler_subtract4v(Vec4 vec, r32 scaler);
+Vec4 scaler_multiply4v(Vec4 vec, r32 scaler);
+Vec4 scaler_divide4v(Vec4 vec, r32 scaler);
+Vec4 add4v(Vec4 a, Vec4 b);
+Vec4 subtract4v(Vec4 a, Vec4 b);
+Vec4 dot_multiply4v(Vec4 a, Vec4 b);
+
+Mat4 add4m(Mat4 a, Mat4 b)
+{
+ Mat4 res;
+ // row 0
+ res.data[0][0] = a.data[0][0] + b.data[0][0];
+ res.data[0][1] = a.data[0][1] + b.data[0][1];
+ res.data[0][2] = a.data[0][2] + b.data[0][2];
+ res.data[0][3] = a.data[0][3] + b.data[0][3];
+ // row 1
+ res.data[1][0] = a.data[1][0] + b.data[1][0];
+ res.data[1][1] = a.data[1][1] + b.data[1][1];
+ res.data[1][2] = a.data[1][2] + b.data[1][2];
+ res.data[1][3] = a.data[1][3] + b.data[1][3];
+ // row 2
+ res.data[2][0] = a.data[2][0] + b.data[2][0];
+ res.data[2][1] = a.data[2][1] + b.data[2][1];
+ res.data[2][2] = a.data[2][2] + b.data[2][2];
+ res.data[2][3] = a.data[2][3] + b.data[2][3];
+ // row 3
+ res.data[3][0] = a.data[3][0] + b.data[3][0];
+ res.data[3][1] = a.data[3][1] + b.data[3][1];
+ res.data[3][2] = a.data[3][2] + b.data[3][2];
+ res.data[3][3] = a.data[3][3] + b.data[3][3];
+
+ return res;
+}
+
+Mat4 subtract4m(Mat4 a, Mat4 b)
+{
+ Mat4 res;
+ // row 0
+ res.data[0][0] = a.data[0][0] - b.data[0][0];
+ res.data[0][1] = a.data[0][1] - b.data[0][1];
+ res.data[0][2] = a.data[0][2] - b.data[0][2];
+ res.data[0][3] = a.data[0][3] - b.data[0][3];
+ // row 1
+ res.data[1][0] = a.data[1][0] - b.data[1][0];
+ res.data[1][1] = a.data[1][1] - b.data[1][1];
+ res.data[1][2] = a.data[1][2] - b.data[1][2];
+ res.data[1][3] = a.data[1][3] - b.data[1][3];
+ // row 2
+ res.data[2][0] = a.data[2][0] - b.data[2][0];
+ res.data[2][1] = a.data[2][1] - b.data[2][1];
+ res.data[2][2] = a.data[2][2] - b.data[2][2];
+ res.data[2][3] = a.data[2][3] - b.data[2][3];
+ // row 3
+ res.data[3][0] = a.data[3][0] - b.data[3][0];
+ res.data[3][1] = a.data[3][1] - b.data[3][1];
+ res.data[3][2] = a.data[3][2] - b.data[3][2];
+ res.data[3][3] = a.data[3][3] - b.data[3][3];
+
+ return res;
+}
+
+Vec4 multiply4vm(Vec4 vec, Mat4 mat)
+{
+ /*
+ * @note: Incase I get confused about this in the future.
+ *
+ * Everything is row-order, which means that things in memory are laid out row first. So with a sample matrix
+ * we have this order in memory: r1c1 r1c2 r1c3 r1c4 r2c1 ... (r = row, c = column). The same holds true for
+ * vectors. (maybe move this explanation to the top)
+ *
+ * Now, multiply4vm will multiply a vector with a matrix. Conventionally that does not make any sense as
+ * a vector is usually 4x1 and a matrix ix 4x4.
+ * What this function considers a vector, while it is a vector, it is infact a row from a matrix, which
+ * means that the vector is 1x4 and the matrix is 4x4.
+ *
+ * The function is meant to supplement the matrix multiplication process to alleviate the multiple lines of code
+ * we have to write when multiplying the row of a left matrix to each column of the right matrix
+ */
+ Vec4 res = { 0 };
+ res.x = (mat.data[0][0] * vec.x) + (mat.data[0][1] * vec.y) + (mat.data[0][2] * vec.z) + (mat.data[0][3] * vec.w);
+ res.y = (mat.data[1][0] * vec.x) + (mat.data[1][1] * vec.y) + (mat.data[1][2] * vec.z) + (mat.data[1][3] * vec.w);
+ res.z = (mat.data[2][0] * vec.x) + (mat.data[2][1] * vec.y) + (mat.data[2][2] * vec.z) + (mat.data[2][3] * vec.w);
+ res.w = (mat.data[3][0] * vec.x) + (mat.data[3][1] * vec.y) + (mat.data[3][2] * vec.z) + (mat.data[3][3] * vec.w);
+
+ return res;
+}
+
+Mat4 multiply4m(Mat4 a, Mat4 b)
+{
+ Mat4 res = { 0 };
+
+ res.xyzw[0] = multiply4vm(a.xyzw[0], b);
+ res.xyzw[1] = multiply4vm(a.xyzw[1], b);
+ res.xyzw[2] = multiply4vm(a.xyzw[2], b);
+ res.xyzw[3] = multiply4vm(a.xyzw[3], b);
+
+ return res;
+}
+
+// ==== Matrix Transformation ====
+
+Mat4 scaling_matrix4m(r32 x, r32 y, r32 z) // generates a 4x4 scaling matrix for scaling each of the x,y,z axis
+{
+ Mat4 res = init_value4m(1.0f);
+ res.data[0][0] = x;
+ res.data[1][1] = y;
+ res.data[2][2] = z;
+
+ return res;
+}
+
+Mat4 translation_matrix4m(r32 x, r32 y, r32 z) // generates a 4x4 translation matrix for translation along each of the x,y,z axis
+{
+ Mat4 res = init_value4m(1.0f);
+ res.data[0][3] = x;
+ res.data[1][3] = y;
+ res.data[2][3] = z;
+
+ return res;
+}
+
+Mat4 rotation_matrix4m(r32 angle_radians, Vec3 axis) // generates a 4x4 rotation matrix for rotation along each of the x,y,z axis
+{
+ Mat4 res = init_value4m(1.0f);
+ axis = normalize3v(axis);
+
+ r32 cos_theta = cosf(angle_radians);
+ r32 sin_theta = sinf(angle_radians);
+ r32 cos_value = 1.0f - cos_theta;
+
+ res.data[0][0] = (axis.x * axis.x * cos_value) + cos_theta;
+ res.data[0][1] = (axis.x * axis.y * cos_value) + (axis.z * sin_theta);
+ res.data[0][2] = (axis.x * axis.z * cos_value) - (axis.y * sin_theta);
+
+ res.data[1][0] = (axis.x * axis.y * cos_value) - (axis.z * sin_theta);
+ res.data[1][1] = (axis.y * axis.y * cos_value) + cos_theta;
+ res.data[1][2] = (axis.y * axis.z * cos_value) + (axis.x * sin_theta);
+
+ res.data[2][0] = (axis.x * axis.z * cos_value) + (axis.y * sin_theta);
+ res.data[2][1] = (axis.z * axis.y * cos_value) - (axis.x * sin_theta);
+ res.data[2][2] = (axis.z * axis.z * cos_value) + cos_theta;
+
+ return res;
+}
+
+Mat4 perspective_projection_matrix4m(r32 left, r32 right, r32 bottom, r32 top, r32 near, r32 far)
+{
+ Mat4 res = { 0 };
+
+ res.data[0][0] = (2.0 * near)/(right - left);
+ res.data[0][2] = (right + left)/(right - left);
+
+ res.data[1][1] = (2.0 * near)/(top - bottom);
+ res.data[1][2] = (top + bottom)/(top - bottom);
+
+ res.data[2][2] = -(far + near)/(far - near);
+ res.data[2][3] = -2.0*far*near/(far - near);
+
+ res.data[3][2] = -1.0;
+
+ return res;
+}
+
+Mat4 perspective4m(r32 fov, r32 aspect_ratio, r32 near, r32 far)
+{
+ r32 cotangent = 1.0f / tanf(fov / 2.0f);
+
+ Mat4 res = { 0 };
+
+ res.data[0][0] = cotangent / aspect_ratio;
+
+ res.data[1][1] = cotangent;
+
+ res.data[2][2] = -(far + near) / (far - near);
+ res.data[2][3] = -2.0f * far * near / (far - near);
+
+ res.data[3][2] = -1.0f;
+
+ return res;
+}
+
+Mat4 lookat4m(Vec3 up, Vec3 forward, Vec3 right, Vec3 position)
+{
+ /*
+ * @note: The construction of the lookat matrix is not obvious. For that reason here is the supplemental matrial I have used to understand
+ * things while I maintain my elementary understanding of linear algebra.
+ * 1. This youtube video (https://www.youtube.com/watch?v=3ZmqJb7J5wE) helped me understand why we invert matrices.
+ * It is because, we are moving from the position matrix which is a global to the view matrix which
+ * is a local. It won't be very clear from this illustration alone, so you would be best served watching the video and recollecting and understanding from there.
+ * 2. This article (https://twodee.org/blog/17560) derives (or rather shows), in a very shallow way how we get to the look at matrix.
+ */
+ Mat4 res = init_value4m(1.0);
+ res.xyzw[0] = Vec4{ right.x, right.y, right.z, -dot_multiply3v(right, position) };
+ res.xyzw[1] = Vec4{ up.x, up.y, up.z, -dot_multiply3v(up, position) };
+ res.xyzw[2] = Vec4{ forward.x, forward.y, forward.z, -dot_multiply3v(forward, position) };
+ res.xyzw[3] = Vec4{ 0.0f, 0.0f, 0.0f, 1.0f };
+
+ return res;
+}
+
+Mat4 to_col_major4m(Mat4 mat)
+{
+ Mat4 res = {0.0f};
+
+ res.data[0][0] = mat.data[0][0]; res.data[1][0] = mat.data[0][1]; res.data[2][0] = mat.data[0][2]; res.data[3][0] = mat.data[0][3];
+
+ res.data[0][1] = mat.data[1][0]; res.data[1][1] = mat.data[1][1]; res.data[2][1] = mat.data[1][2]; res.data[3][1] = mat.data[1][3];
+
+ res.data[0][2] = mat.data[2][0]; res.data[1][2] = mat.data[2][1]; res.data[2][2] = mat.data[2][2]; res.data[3][2] = mat.data[2][3];
+
+ res.data[0][3] = mat.data[3][0]; res.data[1][3] = mat.data[3][1]; res.data[2][3] = mat.data[3][2]; res.data[3][3] = mat.data[3][3];
+
+ return res;
+}
+#endif
diff --git a/source/lessons/instanced_rendering/basic quads/shaders/cubemap.fs.glsl b/source/lessons/instanced_rendering/basic quads/shaders/cubemap.fs.glsl
new file mode 100644
index 0000000..72dfe4c
--- /dev/null
+++ b/source/lessons/instanced_rendering/basic quads/shaders/cubemap.fs.glsl
@@ -0,0 +1,9 @@
+#version 330 core
+
+in vec3 TexCoords;
+uniform samplerCube skybox;
+out vec4 FragColor;
+
+void main() {
+ FragColor = texture(skybox, TexCoords);
+};
diff --git a/source/lessons/instanced_rendering/basic quads/shaders/cubemap.vs.glsl b/source/lessons/instanced_rendering/basic quads/shaders/cubemap.vs.glsl
new file mode 100644
index 0000000..956673a
--- /dev/null
+++ b/source/lessons/instanced_rendering/basic quads/shaders/cubemap.vs.glsl
@@ -0,0 +1,14 @@
+#version 330 core
+layout(location=0) in vec3 aPos;
+
+uniform mat4 Model;
+uniform mat4 View;
+uniform mat4 Projection;
+
+out vec3 TexCoords;
+
+void main() {
+ vec4 pos = Projection*View*vec4(aPos, 1.0);
+ gl_Position = vec4(pos.xyww);
+ TexCoords = aPos;
+};
diff --git a/source/lessons/instanced_rendering/basic quads/shaders/depth_test.fs.glsl b/source/lessons/instanced_rendering/basic quads/shaders/depth_test.fs.glsl
new file mode 100644
index 0000000..796d849
--- /dev/null
+++ b/source/lessons/instanced_rendering/basic quads/shaders/depth_test.fs.glsl
@@ -0,0 +1,30 @@
+#version 330 core
+
+
+in vec2 TexCoords;
+in vec3 VertexWorldPos;
+uniform sampler2D TexId;
+out vec4 FragColor;
+
+uniform float near = 0.1f;
+uniform float far = 100.0f;
+
+/* @note
+float linear_fragment_depth = MakeDepthLinear(non_linear_fragment_depth);
+float scaled_lfd = linear_fragment_depth/far;
+
+gives us the z value in eye space.
+This is purely for learning purposes.
+The equation used in MakeDepthLinear is derived from the PerspectiveProjectionMatrix.
+Take a look at the equation for that in the codebase
+or here: https://www.songho.ca/opengl/gl_projectionmatrix.html
+*/
+float MakeDepthLinear(float depth) {
+ float ndc = 2.0f*depth - 1;
+ float linear_depth = (2.0 * far * near)/(far + near - ndc*(far - near));
+ return linear_depth;
+}
+
+void main() {
+ FragColor = texture(TexId, TexCoords);
+}
diff --git a/source/lessons/instanced_rendering/basic quads/shaders/depth_test.vs.glsl b/source/lessons/instanced_rendering/basic quads/shaders/depth_test.vs.glsl
new file mode 100644
index 0000000..827da20
--- /dev/null
+++ b/source/lessons/instanced_rendering/basic quads/shaders/depth_test.vs.glsl
@@ -0,0 +1,21 @@
+#version 330 core
+layout(location=0) in vec3 aPos;
+layout(location=1) in vec2 aTex;
+
+uniform mat4 Model;
+layout (std140) uniform Matrices {
+ mat4 View; // start: 0 // end: 16 * 4 = 64
+ mat4 Projection; // start: 64 // end: 64 + 64 = 128
+};
+
+out vec2 TexCoords;
+out vec3 VertexWorldPos;
+
+// @note: I still do not fully understand how the FragNormal calculation works. Need to make sure I intuitively
+// get that
+
+void main() {
+ gl_Position = Projection*View*Model*vec4(aPos, 1.0);
+ VertexWorldPos = vec3(Model * vec4(aPos, 1.0));
+ TexCoords = aTex;
+};
diff --git a/source/lessons/instanced_rendering/basic quads/shaders/fbo.fs.glsl b/source/lessons/instanced_rendering/basic quads/shaders/fbo.fs.glsl
new file mode 100644
index 0000000..e12ad33
--- /dev/null
+++ b/source/lessons/instanced_rendering/basic quads/shaders/fbo.fs.glsl
@@ -0,0 +1,72 @@
+#version 330 core
+
+in vec2 TexCoords;
+uniform sampler2D TexId;
+out vec4 FragColor;
+
+vec4 filter_color_invert(vec4 color)
+{
+ vec4 res = vec4(vec3(1.0) - vec3(color), 1.0);
+ return res;
+}
+
+vec4 filter_color_grayscale(vec4 color)
+{
+ // we will need to average the colors
+ // float average = (color.x + color.y + color.z) / 3.0f;
+ // in reality, human our most sensitive towards green and least to blue, so will need to weight those
+ float average = 0.2126 * color.r + 0.7152 * color.g + 0.0722 * color.b;
+ vec4 res = vec4(vec3(average), 1.0);
+
+ return res;
+}
+
+// @note: different kernels for experimentation
+const float kernel_sharpen[9] = float[](
+ -1, -1, -1,
+ -1, 9, -1,
+ -1, -1, -1
+);
+
+const float kernel_blur[9] = float[](
+ 1.0/16.0, 2.0/16.0, 1.0/16.0,
+ 2.0/16.0, 4.0/16.0, 2.0/16.0,
+ 1.0/16.0, 2.0/16.0, 1.0/16.0
+);
+
+const float kernel_edge_detection[9] = float[](
+ 1, 1, 1,
+ 1, -8, 1,
+ 1, 1, 1
+);
+
+vec4 filter_kernal_effects()
+{
+ const float offset = 1.0/300.0;
+ vec2 offsets[9] = vec2[](
+ vec2(-offset, offset), // top left
+ vec2( 0, offset), // top center
+ vec2( offset, offset), // top right
+ vec2(-offset, 0), // center left
+ vec2( 0, 0), // center center
+ vec2( offset, 0), // center right
+ vec2(-offset, -offset), // bot left
+ vec2( 0, -offset), // bot center
+ vec2( offset, -offset) // bot right
+ );
+
+ float kernal[9] = kernel_edge_detection;
+ vec3 kernalValue = vec3(0.0);
+ vec3 sampleTex[9];
+ for (int i=0; i<9; i++) {
+ sampleTex[i] = vec3(texture(TexId, TexCoords + offsets[i]));
+ kernalValue += (kernal[i] * sampleTex[i]);
+ }
+
+ vec4 res = vec4(kernalValue, 1.0);
+ return res;
+}
+
+void main() {
+ FragColor = texture(TexId, TexCoords);
+}
diff --git a/source/lessons/instanced_rendering/basic quads/shaders/fbo.vs.glsl b/source/lessons/instanced_rendering/basic quads/shaders/fbo.vs.glsl
new file mode 100644
index 0000000..82d7211
--- /dev/null
+++ b/source/lessons/instanced_rendering/basic quads/shaders/fbo.vs.glsl
@@ -0,0 +1,10 @@
+#version 330 core
+layout(location=0) in vec3 aPos;
+layout(location=1) in vec2 aTex;
+
+out vec2 TexCoords;
+
+void main() {
+ gl_Position = vec4(aPos.x, aPos.y, 0.0f, 1.0f);
+ TexCoords = aTex;
+};
diff --git a/source/lessons/instanced_rendering/basic quads/shaders/instancing.fs.glsl b/source/lessons/instanced_rendering/basic quads/shaders/instancing.fs.glsl
new file mode 100644
index 0000000..1ec8011
--- /dev/null
+++ b/source/lessons/instanced_rendering/basic quads/shaders/instancing.fs.glsl
@@ -0,0 +1,9 @@
+#version 330 core
+
+
+// uniform sampler2D TexId;
+out vec4 FragColor;
+
+void main() {
+ FragColor = vec4(gl_FragCoord.x/1024.0, gl_FragCoord.y/768.0, 0.0, 1.0f);
+}
diff --git a/source/lessons/instanced_rendering/basic quads/shaders/instancing.vs.glsl b/source/lessons/instanced_rendering/basic quads/shaders/instancing.vs.glsl
new file mode 100644
index 0000000..eff2fae
--- /dev/null
+++ b/source/lessons/instanced_rendering/basic quads/shaders/instancing.vs.glsl
@@ -0,0 +1,16 @@
+#version 330 core
+layout(location=0) in vec3 aPos;
+layout(location=1) in mat4 aOffset;
+
+// uniform mat4 Model;
+layout (std140) uniform Matrices {
+ mat4 View; // start: 0 // end: 16 * 4 = 64
+ mat4 Projection; // start: 64 // end: 64 + 64 = 128
+};
+
+// @note: I still do not fully understand how the FragNormal calculation works. Need to make sure I intuitively
+// get that
+
+void main() {
+ gl_Position = Projection*View*aOffset*vec4(aPos, 1.0);
+}
diff --git a/source/lessons/instanced_rendering/basic quads/shaders/refl.fs.glsl b/source/lessons/instanced_rendering/basic quads/shaders/refl.fs.glsl
new file mode 100644
index 0000000..6d28392
--- /dev/null
+++ b/source/lessons/instanced_rendering/basic quads/shaders/refl.fs.glsl
@@ -0,0 +1,14 @@
+#version 330 core
+
+in vec3 Normal;
+in vec3 Position;
+
+uniform samplerCube skybox;
+uniform vec3 cameraPos;
+out vec4 FragColor;
+
+void main() {
+ vec3 I = normalize(Position - cameraPos);
+ vec3 R = reflect(I, normalize(Normal));
+ FragColor = vec4(texture(skybox, R).rgb, 1.0);
+};
diff --git a/source/lessons/instanced_rendering/basic quads/shaders/refl.vs.glsl b/source/lessons/instanced_rendering/basic quads/shaders/refl.vs.glsl
new file mode 100644
index 0000000..b8f2b97
--- /dev/null
+++ b/source/lessons/instanced_rendering/basic quads/shaders/refl.vs.glsl
@@ -0,0 +1,16 @@
+#version 330 core
+layout(location=0) in vec3 aPos;
+layout(location=1) in vec3 aNormal;
+
+uniform mat4 View;
+uniform mat4 Model;
+uniform mat4 Projection;
+
+out vec3 Normal;
+out vec3 Position;
+
+void main() {
+ Normal = mat3(transpose(inverse(Model))) * aNormal;
+ Position = vec3(Model * vec4(aPos, 1.0));
+ gl_Position = Projection * View * Model * vec4(aPos, 1.0);
+};
diff --git a/source/lessons/instanced_rendering/basic quads/shaders/refr.fs.glsl b/source/lessons/instanced_rendering/basic quads/shaders/refr.fs.glsl
new file mode 100644
index 0000000..6747ded
--- /dev/null
+++ b/source/lessons/instanced_rendering/basic quads/shaders/refr.fs.glsl
@@ -0,0 +1,15 @@
+#version 330 core
+
+in vec3 Normal;
+in vec3 Position;
+
+uniform samplerCube skybox;
+uniform vec3 cameraPos;
+out vec4 FragColor;
+
+void main() {
+ float refr_ratio = 1.0/1.52;
+ vec3 I = normalize(Position - cameraPos);
+ vec3 R = refract(I, normalize(Normal), refr_ratio);
+ FragColor = vec4(texture(skybox, R).rgb, 1.0);
+};
diff --git a/source/lessons/instanced_rendering/basic quads/shaders/refr.vs.glsl b/source/lessons/instanced_rendering/basic quads/shaders/refr.vs.glsl
new file mode 100644
index 0000000..0554f0a
--- /dev/null
+++ b/source/lessons/instanced_rendering/basic quads/shaders/refr.vs.glsl
@@ -0,0 +1,20 @@
+#version 330 core
+layout(location=0) in vec3 aPos;
+layout(location=1) in vec3 aNormal;
+
+uniform mat4 View;
+uniform mat4 Model;
+uniform mat4 Projection;
+
+out vec3 Normal;
+out vec3 Position;
+
+void main() {
+ // @note: This is the calculation for getting the normal vector
+ // one that is unaffected by non-uniform scaling that is.
+ // look at the lighting chapter in learnopengl.com to understand this more
+ Normal = mat3(transpose(inverse(Model))) * aNormal;
+ Position = vec3(Model * vec4(aPos, 1.0));
+ gl_Position = Projection * View * Model * vec4(aPos, 1.0);
+};
+
diff --git a/source/lessons/instanced_rendering/planetary models/main.cpp b/source/lessons/instanced_rendering/planetary models/main.cpp
new file mode 100644
index 0000000..a9e5c81
--- /dev/null
+++ b/source/lessons/instanced_rendering/planetary models/main.cpp
@@ -0,0 +1,935 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <SDL2/SDL.h>
+#include <glad/glad.h>
+#include <assimp/Importer.hpp>
+#include <assimp/scene.h>
+#include <assimp/postprocess.h>
+#include <vector>
+
+#define STB_IMAGE_IMPLEMENTATION
+#include "stb_image.h"
+
+/* @lookup:
+* - understand kernals, how they work and how they affect post processing
+* - Check to see why it is necessary to do glBindTexture()
+* - understand the difference between binding textures, and activating a texture unit
+* - I do not understand how floating point numbers work, so I should probably look into that.
+* - The normal matrix calculation in the fragment shader for the object affected by light has been mainly copied.
+* I have tried to understand the formula, and whilst it made some sense, it is not fully clear to me, and I cannot picture it yet.
+* Revisit the derivation for the normal matrix some time in the future.
+* - Lookup the derivation of the formula for reflecting a vector about a normal. I am doing that for specular lighting, but the learnopengl tutorial
+* just uses a glsl reflect formula, and at the time of writing it is also very late so I am not in the mood or position to look into it at present.
+* - One of the things I have observed with specular lights is that the circle/specular highlight follows the camera (me) when I move. I would like to figure
+* out a way by which this does not happen and it remains fixed on the object, at the angle at which it hits. All of this will be made complicated by the fact
+* that ofcourse everything is actually happening from the cameras' perspective. I would still love to figure this out.
+*/
+
+/*
+ * @note:
+ * Model loading is busted and is missing a lot of things, not that it matters for now,
+ * but it will be useful for the future
+ */
+
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+
+typedef int8_t s8;
+typedef int16_t s16;
+typedef int32_t s32;
+typedef int64_t s64;
+
+typedef float r32;
+typedef double r64;
+
+typedef u8 b8;
+
+#include "math.h"
+
+// =========== Shader Loading =============
+
+unsigned int gl_create_vertex_shader(char* vertex_shader_source)
+{
+ unsigned int vertex_shader = glCreateShader(GL_VERTEX_SHADER);
+ glShaderSource(vertex_shader, 1, &vertex_shader_source, NULL);
+ glCompileShader(vertex_shader);
+
+ int success;
+ char info_log[512];
+ glGetShaderiv(vertex_shader, GL_COMPILE_STATUS, &success);
+ if (!success)
+ {
+ glGetShaderInfoLog(vertex_shader, 512, NULL, info_log);
+ printf("================================\n");
+ printf("vertex shader compilation failed:\n%s\n", info_log);
+ }
+
+ return vertex_shader;
+}
+
+unsigned int gl_create_fragment_shader(char* fragment_shader_source)
+{
+ unsigned int fragment_shader = glCreateShader(GL_FRAGMENT_SHADER);
+ glShaderSource(fragment_shader, 1, &fragment_shader_source, NULL);
+ glCompileShader(fragment_shader);
+
+ int success;
+ char info_log[512];
+ glGetShaderiv(fragment_shader, GL_COMPILE_STATUS, &success);
+ if (!success)
+ {
+ glGetShaderInfoLog(fragment_shader, 512, NULL, info_log);
+ printf("================================\n");
+ printf("fragment shader compilation failed:\n%s\n", info_log);
+ }
+
+ return fragment_shader;
+}
+
+unsigned int gl_create_shader_program(unsigned int vertex_shader, unsigned int fragment_shader)
+{
+ unsigned int shader_program = glCreateProgram();
+
+ glAttachShader(shader_program, vertex_shader);
+ glAttachShader(shader_program, fragment_shader);
+ glLinkProgram(shader_program);
+
+ int success;
+ char info_log[512];
+ glGetProgramiv(shader_program, GL_LINK_STATUS, &success);
+ if (!success)
+ {
+ glGetProgramInfoLog(shader_program, 512, NULL, info_log);
+ printf("================================\n");
+ printf("shader program linking failed:\n%s\n", info_log);
+ }
+
+ glDeleteShader(vertex_shader);
+ glDeleteShader(fragment_shader);
+
+ return shader_program;
+}
+
+unsigned int gl_shader_program(char* vertex_shader_source, char* fragment_shader_source)
+{
+ unsigned int vertex_shader = gl_create_vertex_shader(vertex_shader_source);
+ unsigned int fragment_shader = gl_create_fragment_shader(fragment_shader_source);
+ unsigned int shader_program = gl_create_shader_program(vertex_shader, fragment_shader);
+
+ return shader_program;
+}
+
+Mat4 camera_create4m(Vec3 camera_pos, Vec3 camera_look, Vec3 camera_up)
+{
+ // @note: We do this because this allows the camera to have the axis it looks at
+ // inwards be the +z axis.
+ // If we did not do this, then the inward axis the camera looks at would be negative.
+ // I am still learning from learnopengl.com but I imagine that this was done for conveniences' sake.
+ Vec3 camera_forward_dir = normalize3v(subtract3v(camera_pos, camera_look));
+ Vec3 camera_right_dir = normalize3v(cross_multiply3v(camera_up, camera_forward_dir));
+ Vec3 camera_up_dir = normalize3v(cross_multiply3v(camera_forward_dir, camera_right_dir));
+
+ Mat4 res = lookat4m(camera_up_dir, camera_forward_dir, camera_right_dir, camera_pos);
+
+ return res;
+}
+
+Vec3 camera_look_around(r32 angle_pitch, r32 angle_yaw)
+{
+ Vec3 camera_look = {0.0};
+ camera_look.x = cosf(angle_yaw) * cosf(angle_pitch);
+ camera_look.y = sinf(angle_pitch);
+ camera_look.z = sinf(angle_yaw) * cosf(angle_pitch);
+ camera_look = normalize3v(camera_look);
+
+ return camera_look;
+}
+
+s32 gl_load_texture(u32 texture_id, const char* path)
+{
+ s32 width, height, nrChannels;
+ unsigned char *data = stbi_load(path, &width, &height, &nrChannels, 0);
+ if (data)
+ {
+ GLenum format;
+ if (nrChannels == 1)
+ format = GL_RED;
+ else if (nrChannels == 3)
+ format = GL_RGB;
+ else if (nrChannels == 4)
+ format = GL_RGBA;
+
+ glBindTexture(GL_TEXTURE_2D, texture_id);
+ glTexImage2D(GL_TEXTURE_2D, 0, format, width, height, 0, format, GL_UNSIGNED_BYTE, data);
+ glGenerateMipmap(GL_TEXTURE_2D);
+
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ stbi_image_free(data);
+ }
+ else
+ {
+ printf("failed to load image texture at path: %s", path);
+ stbi_image_free(data);
+ }
+
+ return texture_id;
+}
+
+// =================== Model Loading ========================
+// This section contains a whole host of things:
+// 1. classes
+// 2. std::vectors
+// 3. std::strings
+// that I have only used as a glue for I did not know if I had the model loading setup properly.
+// @todo: replace these things eventually. For now the goal is to complete learnopengl
+
+s32 TextureFromFile(const char* filepath, std::string directory)
+{
+ // @note: this function is stupid as it already became outdated as I needed to tweak the parameters
+ // for wrapping. Either those become function parameters (Which makes sense I guess) or I look at
+ // exactly what steps I am reusing and just make that a function so the function is called fewer times.
+ //
+ // I am guessing this won't look good from a design point of view for all those jobs and postings, even if
+ // this may be the simpler and faster thing to do, albeit at the cost of typing.
+ std::string filename = std::string(filepath);
+ filename = directory + '/' + filename;
+
+ u32 texid;
+ glGenTextures(1, &texid);
+
+ s32 width, height, nrChannels;
+ unsigned char *data = stbi_load(filename.c_str(), &width, &height, &nrChannels, 0);
+ if (data)
+ {
+ GLenum format;
+ if (nrChannels == 1)
+ format = GL_RED;
+ else if (nrChannels == 3)
+ format = GL_RGB;
+ else if (nrChannels == 4)
+ format = GL_RGBA;
+
+ glBindTexture(GL_TEXTURE_2D, texid);
+ glTexImage2D(GL_TEXTURE_2D, 0, format, width, height, 0, format, GL_UNSIGNED_BYTE, data);
+ glGenerateMipmap(GL_TEXTURE_2D);
+
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+
+ stbi_image_free(data);
+ }
+ else
+ {
+ printf("failed to load image texture at path: %s", filepath);
+ stbi_image_free(data);
+ }
+
+ return texid;
+}
+
+enum TextureType { TextureDiffuse=0, TextureSpecular };
+
+struct Vertex {
+ Vec3 position;
+ Vec3 normal;
+ Vec2 texture;
+};
+
+struct Texture {
+ u32 id;
+ enum TextureType type;
+ std::string fname;
+};
+
+class Mesh {
+ public:
+ std::vector<Vertex> vertices;
+ std::vector<u32> indices;
+ std::vector<Texture> textures;
+
+ u32 vao;
+ u32 vbo;
+ u32 ebo;
+
+ Mesh(std::vector<Vertex> vertices, std::vector<u32> indices, std::vector<Texture> textures)
+ {
+ this->vertices = vertices;
+ this->indices = indices;
+ this->textures = textures;
+
+ // setup mesh shader stuff
+ glGenVertexArrays(1, &vao);
+ glGenBuffers(1, &vbo);
+ glGenBuffers(1, &ebo);
+
+ glBindVertexArray(vao);
+
+ glBindBuffer(GL_ARRAY_BUFFER, vbo);
+ glBufferData(GL_ARRAY_BUFFER, this->vertices.size() * sizeof(struct Vertex), &(this->vertices[0]), GL_STATIC_DRAW);
+
+ glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ebo);
+ glBufferData(GL_ELEMENT_ARRAY_BUFFER, this->indices.size() * sizeof(u32), &(this->indices[0]), GL_STATIC_DRAW);
+
+ // position
+ glEnableVertexAttribArray(0);
+ glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)0);
+ // normal
+ glEnableVertexAttribArray(1);
+ glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)offsetof(Vertex, normal));
+ // texture
+ glEnableVertexAttribArray(2);
+ glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)offsetof(Vertex, texture));
+
+ glBindVertexArray(0);
+ }
+
+ void draw(u32 shader_program)
+ {
+ glUseProgram(shader_program);
+
+ u32 diffuse_num = 1;
+ u32 specular_num = 1;
+ char tex_unit_name[64];
+ // set shininess
+ s32 mat_shine_loc = glGetUniformLocation(shader_program, "material.shininess");
+ glUniform1f(mat_shine_loc, 32.0f);
+
+ for (u32 i=0; i<textures.size(); i++)
+ {
+ struct Texture curr_tex = textures[i];
+ if (curr_tex.type == TextureDiffuse)
+ {
+ sprintf(tex_unit_name, "material.diffuse[%i]", diffuse_num);
+ }
+ else if (curr_tex.type == TextureSpecular)
+ {
+ sprintf(tex_unit_name, "material.diffuse[%i]", specular_num);
+ }
+
+ glActiveTexture(GL_TEXTURE0 + i);
+ s32 tex_unit_loc = glGetUniformLocation(shader_program, tex_unit_name);
+ glUniform1i(tex_unit_loc, i);
+ glBindTexture(GL_TEXTURE_2D, curr_tex.id);
+ }
+ glActiveTexture(GL_TEXTURE0);
+
+ glBindVertexArray(vao);
+ glDrawElements(GL_TRIANGLES, indices.size(), GL_UNSIGNED_INT, 0);
+ glBindVertexArray(0);
+ }
+
+ void draw_instanced(u32 shader_program, u32 instance_count)
+ {
+ glUseProgram(shader_program);
+
+ u32 diffuse_num = 1;
+ u32 specular_num = 1;
+ char tex_unit_name[64];
+ // set shininess
+ s32 mat_shine_loc = glGetUniformLocation(shader_program, "material.shininess");
+ glUniform1f(mat_shine_loc, 32.0f);
+
+ for (u32 i=0; i<textures.size(); i++)
+ {
+ struct Texture curr_tex = textures[i];
+ if (curr_tex.type == TextureDiffuse)
+ {
+ sprintf(tex_unit_name, "material.diffuse[%i]", diffuse_num);
+ }
+ else if (curr_tex.type == TextureSpecular)
+ {
+ sprintf(tex_unit_name, "material.diffuse[%i]", specular_num);
+ }
+
+ glActiveTexture(GL_TEXTURE0 + i);
+ s32 tex_unit_loc = glGetUniformLocation(shader_program, tex_unit_name);
+ glUniform1i(tex_unit_loc, i);
+ glBindTexture(GL_TEXTURE_2D, curr_tex.id);
+ }
+ glActiveTexture(GL_TEXTURE0);
+
+ glBindVertexArray(vao);
+ glDrawElementsInstanced(GL_TRIANGLES, indices.size(), GL_UNSIGNED_INT, 0, instance_count);
+ glBindVertexArray(0);
+ }
+};
+
+class Model
+{
+ public:
+ std::vector<Texture> loaded_textures;
+ std::vector<Mesh> meshes;
+ std::string directory;
+
+ Model(std::string path)
+ {
+ load_model(path);
+ }
+ void instance_mesh();
+ void draw(u32 shader_program);
+ void draw_instanced(u32 shader_program, u32 instance_count);
+ private:
+ void load_model(std::string path);
+ void process_node(aiNode *node, const aiScene *scene);
+ Mesh process_mesh(aiMesh *mesh, const aiScene *scene);
+ std::vector<Texture> load_material_textures(aiMaterial *mat, aiTextureType type, TextureType type_name);
+};
+
+void Model::instance_mesh()
+{
+ for (u32 i=0; i < meshes.size(); i++)
+ {
+ Mesh curr_mesh = meshes[i];
+ glBindVertexArray(curr_mesh.vao);
+ glVertexAttribPointer(3, 4, GL_FLOAT, GL_FALSE, 16 * sizeof(u32), (void*)0);
+ glEnableVertexAttribArray(3);
+
+ glVertexAttribPointer(4, 4, GL_FLOAT, GL_FALSE, 16 * sizeof(u32), (void*)(4*sizeof(u32)));
+ glEnableVertexAttribArray(4);
+
+ glVertexAttribPointer(5, 4, GL_FLOAT, GL_FALSE, 16 * sizeof(u32), (void*)(8*sizeof(u32)));
+ glEnableVertexAttribArray(5);
+
+ glVertexAttribPointer(6, 4, GL_FLOAT, GL_FALSE, 16 * sizeof(u32), (void*)(12*sizeof(u32)));
+ glEnableVertexAttribArray(6);
+
+ glBindBuffer(GL_ARRAY_BUFFER, 0);
+ glVertexAttribDivisor(3, 1);
+ glVertexAttribDivisor(4, 1);
+ glVertexAttribDivisor(5, 1);
+ glVertexAttribDivisor(6, 1);
+
+ glBindVertexArray(0);
+ }
+}
+
+void Model::draw(u32 shader_program)
+{
+ for (int i=0; i < meshes.size(); i++)
+ {
+ meshes[i].draw(shader_program);
+ }
+}
+
+void Model::draw_instanced(u32 shader_program, u32 instance_count)
+{
+ for (int i=0; i < meshes.size(); i++)
+ {
+ meshes[i].draw_instanced(shader_program, instance_count);
+ }
+}
+
+void Model::load_model(std::string path)
+{
+ Assimp::Importer import;
+ const aiScene *scene = import.ReadFile(path, aiProcess_Triangulate | aiProcess_FlipUVs);
+
+ if (!scene || scene->mFlags & AI_SCENE_FLAGS_INCOMPLETE || !scene->mRootNode)
+ {
+ printf("error loading model :%s\n", import.GetErrorString());
+ return;
+ }
+
+ directory = path.substr(0, path.find_last_of('/'));
+ process_node(scene->mRootNode, scene);
+}
+
+void Model::process_node(aiNode *node, const aiScene *scene)
+{
+ for (int i=0; i < node->mNumMeshes; i++)
+ {
+ aiMesh *mesh = scene->mMeshes[node->mMeshes[i]];
+ meshes.push_back(process_mesh(mesh, scene));
+ }
+
+ for (int i=0; i<node->mNumChildren; i++)
+ {
+ process_node(node->mChildren[i], scene);
+ }
+}
+
+Mesh Model::process_mesh(aiMesh *mesh, const aiScene *scene)
+{
+ std::vector<Vertex> vertices;
+ std::vector<u32> indices;
+ std::vector<Texture> textures;
+
+ for (u32 i=0; i < mesh->mNumVertices; i++)
+ {
+ Vec3 position;
+ position.x = mesh->mVertices[i].x;
+ position.y = mesh->mVertices[i].y;
+ position.z = mesh->mVertices[i].z;
+
+ Vec3 normal;
+ normal.x = mesh->mNormals[i].x;
+ normal.y = mesh->mNormals[i].y;
+ normal.z = mesh->mNormals[i].z;
+
+ Vec2 texture = {0, 0};
+ if (mesh->mTextureCoords[0])
+ {
+ texture.x = mesh->mTextureCoords[0][i].x;
+ texture.y = mesh->mTextureCoords[0][i].y;
+ }
+
+ struct Vertex vertex;
+ vertex.position = position;
+ vertex.normal = normal;
+ vertex.texture = texture;
+
+ vertices.push_back(vertex);
+ }
+ // process indices
+ for (u32 i = 0; i < mesh->mNumFaces; i++)
+ {
+ aiFace face = mesh->mFaces[i];
+ for(u32 j = 0; j < face.mNumIndices; j++)
+ {
+ indices.push_back(face.mIndices[j]);
+ }
+ }
+ // process material
+ if (mesh->mMaterialIndex >= 0)
+ {
+ aiMaterial *material = scene->mMaterials[mesh->mMaterialIndex];
+ std::vector<Texture> diffuse_maps = load_material_textures(material, aiTextureType_DIFFUSE, TextureDiffuse);
+ textures.insert(textures.end(), diffuse_maps.begin(), diffuse_maps.end());
+ std::vector<Texture> specular_maps = load_material_textures(material, aiTextureType_SPECULAR, TextureSpecular);
+ textures.insert(textures.end(), specular_maps.begin(), specular_maps.end());
+ }
+
+ return Mesh(vertices, indices, textures);
+}
+
+std::vector<Texture> Model::load_material_textures(aiMaterial *mat, aiTextureType type, TextureType tex_type)
+{
+ std::vector<Texture> textures;
+ for(u32 i=0; i<mat->GetTextureCount(type); i++)
+ {
+ bool load_texture = true;
+ aiString str;
+ mat->GetTexture(type, i, &str);
+ const char* fname = str.C_Str();
+
+ for (s32 j=0; j<loaded_textures.size(); j++)
+ {
+ if (std::strcmp(loaded_textures[j].fname.data(), fname) == 0)
+ {
+ load_texture = false;
+ textures.push_back(loaded_textures[j]);
+ break;
+ }
+ }
+ if (load_texture)
+ {
+ Texture texture;
+ texture.id = TextureFromFile(fname, directory);
+ texture.type = tex_type;
+ texture.fname = std::string(fname);
+ textures.push_back(texture);
+ loaded_textures.push_back(texture);
+ }
+ }
+
+ return textures;
+}
+
+class Shader {
+ // @note: this is a draft, I think frankly, it's a stupid idea to be making this at this point
+ // but my goal is to look at my code at this stage (which is in the second half of (or so I think)
+ // learnopengl and identify repeated code that I think I can yank out and can make convenient to write.
+ // The precondition for all of this is that I do not remodel the program based off of some vague idea of
+ // cleanliness in my head. This is all still very procedural, I just want to minimize the amount I type
+ // and at the same time see how well I can identify good abstractions
+ //
+ //
+ // I much prefer to have things be not a class, especially if I look at how I did my lovely
+ // math functions, which are so simple and straightforward
+ public:
+ u32 id;
+
+ // @note: all well and good until you get compute shaders
+ // then the entire thing shits the bed
+ Shader(char* vertex_shader_source, char* fragment_shader_source) {
+ id = gl_shader_program(vertex_shader_source, fragment_shader_source);
+ }
+
+ void use() {
+ glUseProgram(id);
+ }
+
+ void draw_triangles(u32 vao, u32 count) {
+ glBindVertexArray(vao);
+ glDrawArrays(GL_TRIANGLES, 0, count);
+ }
+
+ void set_1i(char* variable, s32 value) {
+ s32 loc = glGetUniformLocation(id, variable);
+ glUniform1i(loc, value);
+ }
+
+ void set_matrix4fv(char* variable, Mat4 value, int count) {
+ s32 loc = glGetUniformLocation(id, variable);
+ glUniformMatrix4fv(loc, count, GL_TRUE, value.buffer);
+ };
+
+ ~Shader() {
+ // @note: this can literally be replaced by another function that goes over the entire list
+ // of shader_programs near the program exit and deletes them, if I even need to do that.
+ glDeleteProgram(id);
+ }
+};
+
+int main(int argc, char* argv[])
+{
+
+ // ============ END ============
+ int width = 1024;
+ int height = 768;
+
+ if (SDL_Init(SDL_INIT_VIDEO) != 0)
+ {
+ printf("Error initialising SDL2: %s\n", SDL_GetError());
+ return 0;
+ };
+
+ // set opengl version and profile
+ SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 3);
+ SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 3);
+ SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
+ SDL_GL_SetAttribute( SDL_GL_STENCIL_SIZE, 8 );
+
+ // initialise window with opengl flag
+ SDL_Window* window = SDL_CreateWindow("SDL Test",
+ 50,
+ 50,
+ width,
+ height,
+ SDL_WINDOW_OPENGL);
+
+ SDL_SetRelativeMouseMode(SDL_TRUE);
+
+ // create an opengl context
+ SDL_GLContext context = SDL_GL_CreateContext(window);
+ if (!context)
+ {
+ printf("OpenGL context creation failed: %s\n", SDL_GetError());
+ return -1;
+ }
+
+
+ // load glad
+ if (!gladLoadGLLoader((GLADloadproc)SDL_GL_GetProcAddress)) {
+ printf("Failed to initialize Glad\n");
+ return 1;
+ }
+
+ // vsync controls: 0 = OFF | 1 = ON (Default)
+ // SDL_GL_SetSwapInterval(0);
+
+ // filesystem playground stuff
+ size_t read_count;
+ char* vertex_source = (char*)SDL_LoadFile("./source/shaders/model.vs.glsl", &read_count);
+ char* inst_vertex_source = (char*)SDL_LoadFile("./source/shaders/instanced_model.vs.glsl", &read_count);
+ char* fragment_source = (char*)SDL_LoadFile("./source/shaders/model.fs.glsl", &read_count);
+
+ u32 shader_program = gl_shader_program(vertex_source, fragment_source);
+ u32 inst_shader_program = gl_shader_program(inst_vertex_source, fragment_source);
+
+ stbi_set_flip_vertically_on_load(1);
+
+ Model planet_model = Model(std::string("./assets/planet/planet.obj"));
+ Model rock_model = Model(std::string("assets/rock/rock.obj"));
+
+ float inst_vertices[] = {
+ // positions // texture Coords (note we set these higher than 1 (together with GL_REPEAT as texture wrapping mode). this will cause the floor texture to repeat)
+ // BottomFace
+ -0.5f, -1.0f, -0.5f, // top-right
+ 0.5f, -1.0f, -0.5f, // top-left
+ 0.5f, -1.0f, 0.5f, // bottom-left
+ 0.5f, -1.0f, 0.5f, // bottom-left
+ -0.5f, -1.0f, 0.5f, // bottom-right
+ -0.5f, -1.0f, -0.5f, // top-right
+ // Top face
+ -0.5f, -0.9f, -0.5f, // top-left
+ 0.5f, -0.9f, 0.5f, // bottom-right
+ 0.5f, -0.9f, -0.5f, // top-right
+ 0.5f, -0.9f, 0.5f, // bottom-right
+ -0.5f, -0.9f, -0.5f, // top-left
+ -0.5f, -0.9f, 0.5f // bottom-left
+ };
+ const u32 instance_count = 100000;
+ Mat4 *offsets = (Mat4 *)calloc(instance_count, sizeof(Mat4));
+ u32 amount = instance_count;
+ r32 offset = 7.0f;
+ r32 radius = 50.0f;
+ srand(SDL_GetTicks64());
+ for (u32 i = 0; i < amount; i++)
+ {
+ Mat4 model = init_value4m(1.0f);
+
+ // 1. translation
+ u32 angle = (i*360)/amount;
+ r32 displacement = (rand()%(u32)(2 * offset * 100)) / 100.0f - offset;
+ r32 x = sin(angle) * radius + displacement;
+ displacement = (rand()%(u32)(2 * offset * 100)) / 100.0f - offset;
+ r32 y = displacement * 1.5f;
+ displacement = (rand()%(u32)(2 * offset * 100)) / 100.0f - offset;
+ r32 z = cos(angle) * radius + displacement;
+ Mat4 translation = translation_matrix4m(x, y, z);
+
+ // 2. scale
+ r32 rnd_scale = (rand()%20) / 100.0f + 0.05;
+ Mat4 scale = scaling_matrix4m(rnd_scale, rnd_scale, rnd_scale);
+
+ // 3. rotation
+ r32 rot_angle = To_Radian(rand()%360);
+ Mat4 rotation = rotation_matrix4m(rot_angle, Vec3{0.4f, 0.6f, 0.8f});
+
+ model = multiply4m(rotation, model);
+ model = multiply4m(scale, model);
+ model = multiply4m(translation, model);
+
+ offsets[i] = to_col_major4m(model);
+ }
+
+ u32 instance_vbo;
+ glGenBuffers(1, &instance_vbo);
+ glBindBuffer(GL_ARRAY_BUFFER, instance_vbo);
+
+#if 0
+ u32 offset_sz = offsets.size();
+ Mat4 *offset_ptr = offsets.data();
+#endif
+ u32 offset_sz = instance_count * sizeof(Mat4);
+ Mat4 *offset_ptr = &offsets[0];
+ glBufferData(GL_ARRAY_BUFFER, offset_sz, offset_ptr, GL_STATIC_DRAW);
+
+ rock_model.instance_mesh();
+
+ // uniform buffer objects
+ u32 ubo_camera_block;
+ glGenBuffers(1, &ubo_camera_block);
+ glBindBuffer(GL_UNIFORM_BUFFER, ubo_camera_block);
+ glBufferData(GL_UNIFORM_BUFFER, 128, NULL, GL_STATIC_DRAW);
+ glBindBuffer(GL_UNIFORM_BUFFER, 0);
+
+ // objects
+ Vec3 model_translations[] = {
+ Vec3{ 0.5, 0.0, 0.0}, // 0: origin square
+ Vec3{ -1.0, 0.0, -1.0}, // 1: plane
+ Vec3{ -1.0, 0.0, -0.5}, // 2: window between squares
+ Vec3{ 0.0, 0.0, 3.0}, // 3: window infront of origin square
+ Vec3{ -2.5, 0.0, -0.5}, // 4: square to the left
+ Vec3{ -1.0, 0.0, -1.5}, // 5: random square behind window between squares
+ Vec3{ -1.0, 0.0, -8.0}, // 6: reflective plane
+ Vec3{ -1.0, 2.0, -8.0}, // 6: refractive "window"
+ };
+
+ r32 FOV = 90.0;
+ r32 time_curr;
+ r32 time_prev = SDL_GetTicks64() / 100.0;
+ // camera stuff
+ Vec3 camera_pos = Vec3{ 0.0, 0.0, 10.0f};
+ Vec3 preset_up_dir = Vec3{ 0.0, 1.0, 0.0 };
+
+ r32 angle_yaw, angle_pitch, angle_roll;
+ angle_pitch = (r32)To_Radian(0.0f);
+ angle_yaw = (r32)-To_Radian(90.0f);
+
+ Vec3 camera_look = camera_look_around(angle_pitch, angle_yaw);
+
+ // @todo: remove this, I dont like this and think that this is unnecessary
+ Vec3 camera_look_increment;
+ r32 camera_speed = 2.5f;
+
+ Mat4 view = camera_create4m(camera_pos, camera_look, preset_up_dir);
+
+ Mat4 proj = perspective4m((r32)To_Radian(90.0), (r32)width / (r32)height, 0.1f, 1000.0f);
+
+ u32 block_binding = 0;
+ u32 matrices_ind;
+ matrices_ind = glGetUniformBlockIndex(shader_program, "Matrices");
+ glUniformBlockBinding(shader_program, matrices_ind, block_binding);
+ matrices_ind = glGetUniformBlockIndex(inst_shader_program, "Matrices");
+ glUniformBlockBinding(inst_shader_program, matrices_ind, block_binding);
+ // or glBindBufferBase();
+ glBindBufferRange(GL_UNIFORM_BUFFER, block_binding, ubo_camera_block, 0, 128);
+
+ Mat4 col_major_proj = to_col_major4m(proj);
+ glBindBuffer(GL_UNIFORM_BUFFER, ubo_camera_block);
+ glBufferSubData(GL_UNIFORM_BUFFER, 64, 64, col_major_proj.buffer);
+ glBindBuffer(GL_UNIFORM_BUFFER, 0);
+
+ glEnable(GL_DEPTH_TEST);
+ glEnable(GL_CULL_FACE);
+ glCullFace(GL_BACK);
+
+ u8 game_running = true;
+
+ u8 hold_lshift = false;
+ u8 move_w = false;
+ u8 move_a = false;
+ u8 move_s = false;
+ u8 move_d = false;
+
+ while(game_running)
+ {
+
+ // frame delta
+ time_curr = SDL_GetTicks64() / 100.0;
+ r32 time_delta = time_curr - time_prev;
+
+ r32 camera_speed_adjusted = time_delta * camera_speed;
+ camera_look_increment = scaler_multiply3v(camera_look, camera_speed_adjusted);
+
+ SDL_Event ev;
+ while(SDL_PollEvent(&ev))
+ {
+
+ // INPUT
+ switch (ev.type)
+ {
+ case (SDL_QUIT):
+ {
+ game_running = false;
+ } break;
+ case (SDL_KEYDOWN):
+ {
+ if (ev.key.keysym.sym == SDLK_LSHIFT)
+ {
+ hold_lshift = true;
+ }
+ if (ev.key.keysym.sym == SDLK_w)
+ {
+ move_w = true;
+ }
+ if (ev.key.keysym.sym == SDLK_s)
+ {
+ move_s = true;
+ }
+ if (ev.key.keysym.sym == SDLK_a)
+ {
+ move_a = true;
+ }
+ if (ev.key.keysym.sym == SDLK_d)
+ {
+ move_d = true;
+ }
+ } break;
+ case (SDL_KEYUP):
+ {
+ if (ev.key.keysym.sym == SDLK_LSHIFT)
+ {
+ hold_lshift = false;
+ }
+ if (ev.key.keysym.sym == SDLK_w)
+ {
+ move_w = false;
+ }
+ if (ev.key.keysym.sym == SDLK_s)
+ {
+ move_s = false;
+ }
+ if (ev.key.keysym.sym == SDLK_a)
+ {
+ move_a = false;
+ }
+ if (ev.key.keysym.sym == SDLK_d)
+ {
+ move_d = false;
+ }
+ } break;
+ case (SDL_MOUSEMOTION):
+ {
+ SDL_MouseMotionEvent mouse_event = ev.motion;
+ r32 x_motion = (r32)mouse_event.xrel;
+ r32 y_motion = (r32)mouse_event.yrel;
+ if (x_motion != 0.0 || y_motion != 0.0)
+ {
+ angle_yaw = angle_yaw + To_Radian(x_motion * 0.1f);
+ angle_pitch = clampf(angle_pitch + To_Radian(-y_motion * 0.1f), To_Radian(-89.0f), To_Radian(89.0f));
+
+ camera_look = camera_look_around(angle_pitch, angle_yaw);
+ }
+ } break;
+ default:
+ {
+ break;
+ }
+ }
+ }
+
+ // PROCESS
+ if (move_w)
+ {
+ camera_pos = add3v(camera_pos, camera_look_increment);
+ }
+ if (move_s)
+ {
+ camera_pos = subtract3v(camera_pos, camera_look_increment);
+ }
+ if (move_a)
+ {
+ Vec3 camera_right = normalize3v(cross_multiply3v(preset_up_dir, camera_look));
+ Vec3 camera_right_scaled = scaler_multiply3v(camera_right, camera_speed_adjusted);
+ camera_pos = add3v(camera_pos, camera_right_scaled);
+ }
+ if (move_d)
+ {
+ Vec3 camera_right = normalize3v(cross_multiply3v(preset_up_dir, camera_look));
+ Vec3 camera_right_scaled = scaler_multiply3v(camera_right, camera_speed_adjusted);
+ camera_pos = subtract3v(camera_pos, camera_right_scaled);
+ }
+ view = camera_create4m(camera_pos, add3v(camera_pos, camera_look), preset_up_dir);
+
+ // object shader program stuff
+ Mat4 col_major_view = to_col_major4m(view);
+ glBindBuffer(GL_UNIFORM_BUFFER, ubo_camera_block);
+ glBufferSubData(GL_UNIFORM_BUFFER, 0, 64, col_major_view.buffer);
+ glBindBuffer(GL_UNIFORM_BUFFER, 0);
+
+ time_prev = time_curr;
+
+ // OUTPUT
+ glClearColor(1.0f, 0.6f, .6f, 1.0f);
+ glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
+
+ {
+ glUseProgram(shader_program);
+ Vec3 translation_iter = model_translations[0];
+ Mat4 model = init_value4m(1.0);
+ Mat4 model_scale = scaling_matrix4m(5.0f, 5.0f, 5.0f);
+ model = multiply4m(model_scale, model);
+ Mat4 model_translation = translation_matrix4m(translation_iter.x, translation_iter.y, translation_iter.z);
+ model = multiply4m(model_translation, model);
+ u32 model_loc = glGetUniformLocation(shader_program, "Model");
+ glUniformMatrix4fv(model_loc, 1, GL_TRUE, model.buffer);
+ planet_model.draw(shader_program);
+ }
+ {
+ rock_model.draw_instanced(inst_shader_program, instance_count);
+ }
+
+ SDL_GL_SwapWindow(window);
+ }
+
+ // opengl free calls
+ //glDeleteVertexArrays(1, &vao);
+ //glDeleteBuffers(1, &vbo);
+ glDeleteProgram(shader_program);
+
+ // sdl free calls
+ SDL_GL_DeleteContext(context);
+ SDL_DestroyWindow(window);
+ SDL_Quit();
+ return 0;
+}
diff --git a/source/lessons/instanced_rendering/planetary models/math.h b/source/lessons/instanced_rendering/planetary models/math.h
new file mode 100644
index 0000000..fda6bf8
--- /dev/null
+++ b/source/lessons/instanced_rendering/planetary models/math.h
@@ -0,0 +1,413 @@
+#ifndef MATH_H
+#define MATH_H
+
+#define PI 3.14159265358979323846264338327950288f
+#define Square(x) ((x)*(x))
+#define To_Radian(x) ((x) * PI / 180.0f)
+#define To_Degree(x) ((x) * 180.0f / PI)
+
+// @notes:
+// I dislike the mat4 discrepancy in the raw data. It's called buffer here while everywhere else it's called
+// data. It's weird and I hate it.
+
+r32 clampf(r32 x, r32 bottom, r32 top)
+{
+ if (x < bottom)
+ {
+ x = bottom;
+ }
+ else if (x > top)
+ {
+ x = top;
+ }
+
+ return x;
+}
+
+// ==== Vector Math ====
+
+union Vec2 {
+ struct {
+ r32 x;
+ r32 y;
+ };
+ r32 data[2];
+};
+
+union Vec3 {
+ struct {
+ r32 x;
+ r32 y;
+ r32 z;
+ };
+ r32 data[3];
+};
+
+union Vec4 {
+ struct {
+ r32 x;
+ r32 y;
+ r32 z;
+ r32 w;
+ };
+ r32 data[4];
+};
+
+union Mat4 {
+ Vec4 xyzw[4];
+ r32 data[4][4];
+ r32 buffer[16];
+};
+
+// ========================================================== Vec3 ==========================================================
+
+Vec3 init3v(r32 x, r32 y, r32 z)
+{
+ Vec3 res;
+ res.x = x;
+ res.y = y;
+ res.z = z;
+
+ return res;
+}
+
+Vec3 scaler_add3v(Vec3 vec, r32 scaler)
+{
+ Vec3 res;
+ res.x = vec.x + scaler;
+ res.y = vec.y + scaler;
+ res.z = vec.z + scaler;
+
+ return res;
+}
+
+Vec3 scaler_multiply3v(Vec3 vec, r32 scaler)
+{
+ Vec3 res;
+ res.x = vec.x * scaler;
+ res.y = vec.y * scaler;
+ res.z = vec.z * scaler;
+
+ return res;
+}
+
+Vec3 scaler_divide3v(Vec3 vec, r32 scaler)
+{
+ Vec3 res;
+ res.x = vec.x / scaler;
+ res.y = vec.y / scaler;
+ res.z = vec.z / scaler;
+
+ return res;
+}
+
+
+Vec3 add3v(Vec3 a, Vec3 b)
+{
+ Vec3 res;
+ res.x = a.x + b.x;
+ res.y = a.y + b.y;
+ res.z = a.z + b.z;
+
+ return res;
+}
+
+Vec3 subtract3v(Vec3 a, Vec3 b)
+{
+ Vec3 res;
+ res.x = a.x - b.x;
+ res.y = a.y - b.y;
+ res.z = a.z - b.z;
+
+ return res;
+}
+
+r32 dot_multiply3v(Vec3 a, Vec3 b)
+{
+ r32 x = a.x * b.x;
+ r32 y = a.y * b.y;
+ r32 z = a.z * b.z;
+
+ r32 res = x + y + z;
+
+ return res;
+}
+
+r32 magnitude3v(Vec3 vec)
+{
+ r32 res = sqrtf(Square(vec.x) + Square(vec.y) + Square(vec.z));
+ return res;
+}
+
+Vec3 normalize3v(Vec3 vec)
+{
+ r32 magnitude = magnitude3v(vec);
+ Vec3 res = scaler_divide3v(vec, magnitude);
+ return res;
+}
+
+#ifndef FUN_CALCS
+r32 angle3v(Vec3 a, Vec3 b)
+{
+ Vec3 a_norm = normalize3v(a);
+ Vec3 b_norm = normalize3v(b);
+
+ r32 dot_product = dot_multiply3v(a_norm, b_norm);
+ r32 res = acosf(dot_product);
+
+ return res;
+}
+#endif
+
+Vec3 cross_multiply3v(Vec3 a, Vec3 b)
+{
+ Vec3 res;
+ res.x = (a.y * b.z) - (a.z * b.y);
+ res.y = (a.z * b.x) - (a.x * b.z);
+ res.z = (a.x * b.y) - (a.y * b.x);
+
+ return res;
+}
+
+// ============================================== Vec4, Mat4 ==============================================
+
+Vec4 init4v(r32 x, r32 y, r32 z, r32 w)
+{
+ Vec4 res;
+ res.x = x;
+ res.y = y;
+ res.z = z;
+ res.w = w;
+
+ return res;
+}
+
+Mat4 init_value4m(r32 value)
+{
+ Mat4 res = {0};
+ res.data[0][0] = value;
+ res.data[1][1] = value;
+ res.data[2][2] = value;
+ res.data[3][3] = value;
+
+ return res;
+}
+
+// @note: These operations are just defined and not expressed. They are kept here for completeness sake BUT
+// since I have not had to do anything related to these, I have not created them.
+Vec4 scaler_add4v(Vec4 vec, r32 scaler);
+Vec4 scaler_subtract4v(Vec4 vec, r32 scaler);
+Vec4 scaler_multiply4v(Vec4 vec, r32 scaler);
+Vec4 scaler_divide4v(Vec4 vec, r32 scaler);
+Vec4 add4v(Vec4 a, Vec4 b);
+Vec4 subtract4v(Vec4 a, Vec4 b);
+Vec4 dot_multiply4v(Vec4 a, Vec4 b);
+
+Mat4 add4m(Mat4 a, Mat4 b)
+{
+ Mat4 res;
+ // row 0
+ res.data[0][0] = a.data[0][0] + b.data[0][0];
+ res.data[0][1] = a.data[0][1] + b.data[0][1];
+ res.data[0][2] = a.data[0][2] + b.data[0][2];
+ res.data[0][3] = a.data[0][3] + b.data[0][3];
+ // row 1
+ res.data[1][0] = a.data[1][0] + b.data[1][0];
+ res.data[1][1] = a.data[1][1] + b.data[1][1];
+ res.data[1][2] = a.data[1][2] + b.data[1][2];
+ res.data[1][3] = a.data[1][3] + b.data[1][3];
+ // row 2
+ res.data[2][0] = a.data[2][0] + b.data[2][0];
+ res.data[2][1] = a.data[2][1] + b.data[2][1];
+ res.data[2][2] = a.data[2][2] + b.data[2][2];
+ res.data[2][3] = a.data[2][3] + b.data[2][3];
+ // row 3
+ res.data[3][0] = a.data[3][0] + b.data[3][0];
+ res.data[3][1] = a.data[3][1] + b.data[3][1];
+ res.data[3][2] = a.data[3][2] + b.data[3][2];
+ res.data[3][3] = a.data[3][3] + b.data[3][3];
+
+ return res;
+}
+
+Mat4 subtract4m(Mat4 a, Mat4 b)
+{
+ Mat4 res;
+ // row 0
+ res.data[0][0] = a.data[0][0] - b.data[0][0];
+ res.data[0][1] = a.data[0][1] - b.data[0][1];
+ res.data[0][2] = a.data[0][2] - b.data[0][2];
+ res.data[0][3] = a.data[0][3] - b.data[0][3];
+ // row 1
+ res.data[1][0] = a.data[1][0] - b.data[1][0];
+ res.data[1][1] = a.data[1][1] - b.data[1][1];
+ res.data[1][2] = a.data[1][2] - b.data[1][2];
+ res.data[1][3] = a.data[1][3] - b.data[1][3];
+ // row 2
+ res.data[2][0] = a.data[2][0] - b.data[2][0];
+ res.data[2][1] = a.data[2][1] - b.data[2][1];
+ res.data[2][2] = a.data[2][2] - b.data[2][2];
+ res.data[2][3] = a.data[2][3] - b.data[2][3];
+ // row 3
+ res.data[3][0] = a.data[3][0] - b.data[3][0];
+ res.data[3][1] = a.data[3][1] - b.data[3][1];
+ res.data[3][2] = a.data[3][2] - b.data[3][2];
+ res.data[3][3] = a.data[3][3] - b.data[3][3];
+
+ return res;
+}
+
+Vec4 multiply4vm(Vec4 vec, Mat4 mat)
+{
+ /*
+ * @note: Incase I get confused about this in the future.
+ *
+ * Everything is row-order, which means that things in memory are laid out row first. So with a sample matrix
+ * we have this order in memory: r1c1 r1c2 r1c3 r1c4 r2c1 ... (r = row, c = column). The same holds true for
+ * vectors. (maybe move this explanation to the top)
+ *
+ * Now, multiply4vm will multiply a vector with a matrix. Conventionally that does not make any sense as
+ * a vector is usually 4x1 and a matrix ix 4x4.
+ * What this function considers a vector, while it is a vector, it is infact a row from a matrix, which
+ * means that the vector is 1x4 and the matrix is 4x4.
+ *
+ * The function is meant to supplement the matrix multiplication process to alleviate the multiple lines of code
+ * we have to write when multiplying the row of a left matrix to each column of the right matrix
+ */
+ Vec4 res = { 0 };
+ res.x = (mat.data[0][0] * vec.x) + (mat.data[0][1] * vec.y) + (mat.data[0][2] * vec.z) + (mat.data[0][3] * vec.w);
+ res.y = (mat.data[1][0] * vec.x) + (mat.data[1][1] * vec.y) + (mat.data[1][2] * vec.z) + (mat.data[1][3] * vec.w);
+ res.z = (mat.data[2][0] * vec.x) + (mat.data[2][1] * vec.y) + (mat.data[2][2] * vec.z) + (mat.data[2][3] * vec.w);
+ res.w = (mat.data[3][0] * vec.x) + (mat.data[3][1] * vec.y) + (mat.data[3][2] * vec.z) + (mat.data[3][3] * vec.w);
+
+ return res;
+}
+
+Mat4 multiply4m(Mat4 a, Mat4 b)
+{
+ Mat4 res = { 0 };
+
+ res.xyzw[0] = multiply4vm(a.xyzw[0], b);
+ res.xyzw[1] = multiply4vm(a.xyzw[1], b);
+ res.xyzw[2] = multiply4vm(a.xyzw[2], b);
+ res.xyzw[3] = multiply4vm(a.xyzw[3], b);
+
+ return res;
+}
+
+// ==== Matrix Transformation ====
+
+Mat4 scaling_matrix4m(r32 x, r32 y, r32 z) // generates a 4x4 scaling matrix for scaling each of the x,y,z axis
+{
+ Mat4 res = init_value4m(1.0f);
+ res.data[0][0] = x;
+ res.data[1][1] = y;
+ res.data[2][2] = z;
+
+ return res;
+}
+
+Mat4 translation_matrix4m(r32 x, r32 y, r32 z) // generates a 4x4 translation matrix for translation along each of the x,y,z axis
+{
+ Mat4 res = init_value4m(1.0f);
+ res.data[0][3] = x;
+ res.data[1][3] = y;
+ res.data[2][3] = z;
+
+ return res;
+}
+
+Mat4 rotation_matrix4m(r32 angle_radians, Vec3 axis) // generates a 4x4 rotation matrix for rotation along each of the x,y,z axis
+{
+ Mat4 res = init_value4m(1.0f);
+ axis = normalize3v(axis);
+
+ r32 cos_theta = cosf(angle_radians);
+ r32 sin_theta = sinf(angle_radians);
+ r32 cos_value = 1.0f - cos_theta;
+
+ res.data[0][0] = (axis.x * axis.x * cos_value) + cos_theta;
+ res.data[0][1] = (axis.x * axis.y * cos_value) + (axis.z * sin_theta);
+ res.data[0][2] = (axis.x * axis.z * cos_value) - (axis.y * sin_theta);
+
+ res.data[1][0] = (axis.x * axis.y * cos_value) - (axis.z * sin_theta);
+ res.data[1][1] = (axis.y * axis.y * cos_value) + cos_theta;
+ res.data[1][2] = (axis.y * axis.z * cos_value) + (axis.x * sin_theta);
+
+ res.data[2][0] = (axis.x * axis.z * cos_value) + (axis.y * sin_theta);
+ res.data[2][1] = (axis.z * axis.y * cos_value) - (axis.x * sin_theta);
+ res.data[2][2] = (axis.z * axis.z * cos_value) + cos_theta;
+
+ return res;
+}
+
+Mat4 perspective_projection_matrix4m(r32 left, r32 right, r32 bottom, r32 top, r32 near, r32 far)
+{
+ Mat4 res = { 0 };
+
+ res.data[0][0] = (2.0 * near)/(right - left);
+ res.data[0][2] = (right + left)/(right - left);
+
+ res.data[1][1] = (2.0 * near)/(top - bottom);
+ res.data[1][2] = (top + bottom)/(top - bottom);
+
+ res.data[2][2] = -(far + near)/(far - near);
+ res.data[2][3] = -2.0*far*near/(far - near);
+
+ res.data[3][2] = -1.0;
+
+ return res;
+}
+
+Mat4 perspective4m(r32 fov, r32 aspect_ratio, r32 near, r32 far)
+{
+ r32 cotangent = 1.0f / tanf(fov / 2.0f);
+
+ Mat4 res = { 0 };
+
+ res.data[0][0] = cotangent / aspect_ratio;
+
+ res.data[1][1] = cotangent;
+
+ res.data[2][2] = -(far + near) / (far - near);
+ res.data[2][3] = -2.0f * far * near / (far - near);
+
+ res.data[3][2] = -1.0f;
+
+ return res;
+}
+
+Mat4 lookat4m(Vec3 up, Vec3 forward, Vec3 right, Vec3 position)
+{
+ /*
+ * @note: The construction of the lookat matrix is not obvious. For that reason here is the supplemental matrial I have used to understand
+ * things while I maintain my elementary understanding of linear algebra.
+ * 1. This youtube video (https://www.youtube.com/watch?v=3ZmqJb7J5wE) helped me understand why we invert matrices.
+ * It is because, we are moving from the position matrix which is a global to the view matrix which
+ * is a local. It won't be very clear from this illustration alone, so you would be best served watching the video and recollecting and understanding from there.
+ * 2. This article (https://twodee.org/blog/17560) derives (or rather shows), in a very shallow way how we get to the look at matrix.
+ */
+ Mat4 res = init_value4m(1.0);
+ res.xyzw[0] = Vec4{ right.x, right.y, right.z, -dot_multiply3v(right, position) };
+ res.xyzw[1] = Vec4{ up.x, up.y, up.z, -dot_multiply3v(up, position) };
+ res.xyzw[2] = Vec4{ forward.x, forward.y, forward.z, -dot_multiply3v(forward, position) };
+ res.xyzw[3] = Vec4{ 0.0f, 0.0f, 0.0f, 1.0f };
+
+ return res;
+}
+
+Mat4 to_col_major4m(Mat4 mat)
+{
+ Mat4 res = {0.0f};
+
+ res.data[0][0] = mat.data[0][0]; res.data[1][0] = mat.data[0][1]; res.data[2][0] = mat.data[0][2]; res.data[3][0] = mat.data[0][3];
+
+ res.data[0][1] = mat.data[1][0]; res.data[1][1] = mat.data[1][1]; res.data[2][1] = mat.data[1][2]; res.data[3][1] = mat.data[1][3];
+
+ res.data[0][2] = mat.data[2][0]; res.data[1][2] = mat.data[2][1]; res.data[2][2] = mat.data[2][2]; res.data[3][2] = mat.data[2][3];
+
+ res.data[0][3] = mat.data[3][0]; res.data[1][3] = mat.data[3][1]; res.data[2][3] = mat.data[3][2]; res.data[3][3] = mat.data[3][3];
+
+ return res;
+}
+#endif
diff --git a/source/lessons/instanced_rendering/planetary models/shaders/cubemap.fs.glsl b/source/lessons/instanced_rendering/planetary models/shaders/cubemap.fs.glsl
new file mode 100644
index 0000000..72dfe4c
--- /dev/null
+++ b/source/lessons/instanced_rendering/planetary models/shaders/cubemap.fs.glsl
@@ -0,0 +1,9 @@
+#version 330 core
+
+in vec3 TexCoords;
+uniform samplerCube skybox;
+out vec4 FragColor;
+
+void main() {
+ FragColor = texture(skybox, TexCoords);
+};
diff --git a/source/lessons/instanced_rendering/planetary models/shaders/cubemap.vs.glsl b/source/lessons/instanced_rendering/planetary models/shaders/cubemap.vs.glsl
new file mode 100644
index 0000000..956673a
--- /dev/null
+++ b/source/lessons/instanced_rendering/planetary models/shaders/cubemap.vs.glsl
@@ -0,0 +1,14 @@
+#version 330 core
+layout(location=0) in vec3 aPos;
+
+uniform mat4 Model;
+uniform mat4 View;
+uniform mat4 Projection;
+
+out vec3 TexCoords;
+
+void main() {
+ vec4 pos = Projection*View*vec4(aPos, 1.0);
+ gl_Position = vec4(pos.xyww);
+ TexCoords = aPos;
+};
diff --git a/source/lessons/instanced_rendering/planetary models/shaders/depth_test.fs.glsl b/source/lessons/instanced_rendering/planetary models/shaders/depth_test.fs.glsl
new file mode 100644
index 0000000..796d849
--- /dev/null
+++ b/source/lessons/instanced_rendering/planetary models/shaders/depth_test.fs.glsl
@@ -0,0 +1,30 @@
+#version 330 core
+
+
+in vec2 TexCoords;
+in vec3 VertexWorldPos;
+uniform sampler2D TexId;
+out vec4 FragColor;
+
+uniform float near = 0.1f;
+uniform float far = 100.0f;
+
+/* @note
+float linear_fragment_depth = MakeDepthLinear(non_linear_fragment_depth);
+float scaled_lfd = linear_fragment_depth/far;
+
+gives us the z value in eye space.
+This is purely for learning purposes.
+The equation used in MakeDepthLinear is derived from the PerspectiveProjectionMatrix.
+Take a look at the equation for that in the codebase
+or here: https://www.songho.ca/opengl/gl_projectionmatrix.html
+*/
+float MakeDepthLinear(float depth) {
+ float ndc = 2.0f*depth - 1;
+ float linear_depth = (2.0 * far * near)/(far + near - ndc*(far - near));
+ return linear_depth;
+}
+
+void main() {
+ FragColor = texture(TexId, TexCoords);
+}
diff --git a/source/lessons/instanced_rendering/planetary models/shaders/depth_test.vs.glsl b/source/lessons/instanced_rendering/planetary models/shaders/depth_test.vs.glsl
new file mode 100644
index 0000000..827da20
--- /dev/null
+++ b/source/lessons/instanced_rendering/planetary models/shaders/depth_test.vs.glsl
@@ -0,0 +1,21 @@
+#version 330 core
+layout(location=0) in vec3 aPos;
+layout(location=1) in vec2 aTex;
+
+uniform mat4 Model;
+layout (std140) uniform Matrices {
+ mat4 View; // start: 0 // end: 16 * 4 = 64
+ mat4 Projection; // start: 64 // end: 64 + 64 = 128
+};
+
+out vec2 TexCoords;
+out vec3 VertexWorldPos;
+
+// @note: I still do not fully understand how the FragNormal calculation works. Need to make sure I intuitively
+// get that
+
+void main() {
+ gl_Position = Projection*View*Model*vec4(aPos, 1.0);
+ VertexWorldPos = vec3(Model * vec4(aPos, 1.0));
+ TexCoords = aTex;
+};
diff --git a/source/lessons/instanced_rendering/planetary models/shaders/fbo.fs.glsl b/source/lessons/instanced_rendering/planetary models/shaders/fbo.fs.glsl
new file mode 100644
index 0000000..e12ad33
--- /dev/null
+++ b/source/lessons/instanced_rendering/planetary models/shaders/fbo.fs.glsl
@@ -0,0 +1,72 @@
+#version 330 core
+
+in vec2 TexCoords;
+uniform sampler2D TexId;
+out vec4 FragColor;
+
+vec4 filter_color_invert(vec4 color)
+{
+ vec4 res = vec4(vec3(1.0) - vec3(color), 1.0);
+ return res;
+}
+
+vec4 filter_color_grayscale(vec4 color)
+{
+ // we will need to average the colors
+ // float average = (color.x + color.y + color.z) / 3.0f;
+ // in reality, human our most sensitive towards green and least to blue, so will need to weight those
+ float average = 0.2126 * color.r + 0.7152 * color.g + 0.0722 * color.b;
+ vec4 res = vec4(vec3(average), 1.0);
+
+ return res;
+}
+
+// @note: different kernels for experimentation
+const float kernel_sharpen[9] = float[](
+ -1, -1, -1,
+ -1, 9, -1,
+ -1, -1, -1
+);
+
+const float kernel_blur[9] = float[](
+ 1.0/16.0, 2.0/16.0, 1.0/16.0,
+ 2.0/16.0, 4.0/16.0, 2.0/16.0,
+ 1.0/16.0, 2.0/16.0, 1.0/16.0
+);
+
+const float kernel_edge_detection[9] = float[](
+ 1, 1, 1,
+ 1, -8, 1,
+ 1, 1, 1
+);
+
+vec4 filter_kernal_effects()
+{
+ const float offset = 1.0/300.0;
+ vec2 offsets[9] = vec2[](
+ vec2(-offset, offset), // top left
+ vec2( 0, offset), // top center
+ vec2( offset, offset), // top right
+ vec2(-offset, 0), // center left
+ vec2( 0, 0), // center center
+ vec2( offset, 0), // center right
+ vec2(-offset, -offset), // bot left
+ vec2( 0, -offset), // bot center
+ vec2( offset, -offset) // bot right
+ );
+
+ float kernal[9] = kernel_edge_detection;
+ vec3 kernalValue = vec3(0.0);
+ vec3 sampleTex[9];
+ for (int i=0; i<9; i++) {
+ sampleTex[i] = vec3(texture(TexId, TexCoords + offsets[i]));
+ kernalValue += (kernal[i] * sampleTex[i]);
+ }
+
+ vec4 res = vec4(kernalValue, 1.0);
+ return res;
+}
+
+void main() {
+ FragColor = texture(TexId, TexCoords);
+}
diff --git a/source/lessons/instanced_rendering/planetary models/shaders/fbo.vs.glsl b/source/lessons/instanced_rendering/planetary models/shaders/fbo.vs.glsl
new file mode 100644
index 0000000..82d7211
--- /dev/null
+++ b/source/lessons/instanced_rendering/planetary models/shaders/fbo.vs.glsl
@@ -0,0 +1,10 @@
+#version 330 core
+layout(location=0) in vec3 aPos;
+layout(location=1) in vec2 aTex;
+
+out vec2 TexCoords;
+
+void main() {
+ gl_Position = vec4(aPos.x, aPos.y, 0.0f, 1.0f);
+ TexCoords = aTex;
+};
diff --git a/source/lessons/instanced_rendering/planetary models/shaders/instanced_model.vs.glsl b/source/lessons/instanced_rendering/planetary models/shaders/instanced_model.vs.glsl
new file mode 100644
index 0000000..2456a3c
--- /dev/null
+++ b/source/lessons/instanced_rendering/planetary models/shaders/instanced_model.vs.glsl
@@ -0,0 +1,29 @@
+#version 330 core
+layout(location=0) in vec3 aPos;
+layout(location=1) in vec3 aNormal;
+layout(location=2) in vec2 aTex;
+layout(location=3) in mat4 aTransform;
+
+// uniform mat4 Model;
+layout (std140) uniform Matrices {
+ mat4 View; // start: 0 // end: 16 * 4 = 64
+ mat4 Projection; // start: 64 // end: 64 + 64 = 128
+};
+
+uniform mat4 Model;
+
+out vec2 TexCoords;
+out vec3 VertexWorldPos;
+out vec3 FragNormal;
+
+// @note: I still do not fully understand how the FragNormal calculation works. Need to make sure I intuitively
+// get that
+
+void main() {
+ gl_Position = Projection*View*aTransform*vec4(aPos, 1.0);
+
+ VertexWorldPos = vec3(aTransform * vec4(aPos, 1.0));
+ FragNormal = mat3(transpose(inverse(aTransform))) * aNormal;
+ FragNormal = normalize(FragNormal);
+ TexCoords = aTex;
+};
diff --git a/source/lessons/instanced_rendering/planetary models/shaders/instancing.fs.glsl b/source/lessons/instanced_rendering/planetary models/shaders/instancing.fs.glsl
new file mode 100644
index 0000000..1ec8011
--- /dev/null
+++ b/source/lessons/instanced_rendering/planetary models/shaders/instancing.fs.glsl
@@ -0,0 +1,9 @@
+#version 330 core
+
+
+// uniform sampler2D TexId;
+out vec4 FragColor;
+
+void main() {
+ FragColor = vec4(gl_FragCoord.x/1024.0, gl_FragCoord.y/768.0, 0.0, 1.0f);
+}
diff --git a/source/lessons/instanced_rendering/planetary models/shaders/instancing.vs.glsl b/source/lessons/instanced_rendering/planetary models/shaders/instancing.vs.glsl
new file mode 100644
index 0000000..eff2fae
--- /dev/null
+++ b/source/lessons/instanced_rendering/planetary models/shaders/instancing.vs.glsl
@@ -0,0 +1,16 @@
+#version 330 core
+layout(location=0) in vec3 aPos;
+layout(location=1) in mat4 aOffset;
+
+// uniform mat4 Model;
+layout (std140) uniform Matrices {
+ mat4 View; // start: 0 // end: 16 * 4 = 64
+ mat4 Projection; // start: 64 // end: 64 + 64 = 128
+};
+
+// @note: I still do not fully understand how the FragNormal calculation works. Need to make sure I intuitively
+// get that
+
+void main() {
+ gl_Position = Projection*View*aOffset*vec4(aPos, 1.0);
+}
diff --git a/source/lessons/instanced_rendering/planetary models/shaders/model.fs.glsl b/source/lessons/instanced_rendering/planetary models/shaders/model.fs.glsl
new file mode 100644
index 0000000..dbe5cf4
--- /dev/null
+++ b/source/lessons/instanced_rendering/planetary models/shaders/model.fs.glsl
@@ -0,0 +1,150 @@
+#version 330 core
+
+#define MAX_TEXTURES 32
+struct Material {
+ sampler2D diffuse[MAX_TEXTURES];
+ sampler2D specular[MAX_TEXTURES];
+ float shininess;
+};
+
+struct DirectionalLight {
+ vec3 direction;
+
+ vec3 ambient;
+ vec3 diffuse;
+ vec3 specular;
+};
+
+struct PointLight {
+ vec3 position;
+
+ vec3 ambient;
+ vec3 diffuse;
+ vec3 specular;
+
+ // attentuation factors
+ float kC;
+ float kL;
+ float kQ;
+};
+
+struct SpotLight {
+ vec3 position;
+
+ vec3 ambient;
+ vec3 diffuse;
+ vec3 specular;
+
+ // attenuation factors
+ float kC;
+ float kL;
+ float kQ;
+
+ // vector for the direction directly in front of the spotlight
+ vec3 front;
+
+ // spot radius
+ float radius_inner;
+ float radius_outer; // to smooth out the light
+
+};
+
+// this is the result of a light creation. This contains the multipliers for each kind of a light we want
+// to have.
+struct LightFactor {
+ vec3 ambient;
+ vec3 diffuse;
+ vec3 specular;
+};
+
+in vec2 TexCoords;
+in vec3 FragNormal;
+in vec3 VertexWorldPos;
+uniform Material material;
+uniform PointLight pointLight;
+uniform DirectionalLight dirLight;
+uniform vec3 cameraPosition;
+
+out vec4 FragColor;
+
+LightFactor make_directional_light(DirectionalLight light, vec3 CONST_viewDir) {
+ LightFactor res;
+
+ vec3 DL_lightDir = normalize(-light.direction);
+ res.ambient = light.ambient;
+
+ float DL_diffuseStrength = max(dot(DL_lightDir, FragNormal), 0.0);
+ res.diffuse = light.diffuse * DL_diffuseStrength;
+
+ vec3 DL_reflectDir = reflect(-DL_lightDir, FragNormal);
+ float DL_specularity = max(dot(CONST_viewDir, DL_reflectDir), 0.0);
+ float DL_shinePower = pow(DL_specularity, material.shininess);
+ res.specular = light.specular * DL_shinePower;
+
+ return res;
+};
+
+LightFactor make_point_light(PointLight light, vec3 CONST_viewDir) {
+ LightFactor res;
+
+ float PL_lightDistance = length(light.position - VertexWorldPos);
+ float PL_attenuationFactor = 1.0 /
+ (light.kC + (light.kL * PL_lightDistance) + (light.kQ * PL_lightDistance * PL_lightDistance));
+ res.ambient = PL_attenuationFactor * light.ambient;
+
+ vec3 PL_lightDir = normalize(light.position - VertexWorldPos);
+ float PL_diffuseStrength = max(dot(PL_lightDir, FragNormal), 0.0);
+ res.diffuse = PL_attenuationFactor * light.diffuse * PL_diffuseStrength;
+
+ vec3 PL_reflectDir = reflect(-PL_lightDir, FragNormal);
+ float PL_specularity = max(dot(CONST_viewDir, PL_reflectDir), 0.0);
+ float PL_shinePower = pow(PL_specularity, material.shininess);
+ res.specular = PL_attenuationFactor * PL_shinePower * light.specular;
+
+ return res;
+}
+
+LightFactor make_spot_light(SpotLight light, vec3 CONST_viewDir) {
+ LightFactor res;
+
+ float SL_lightDistance = length(light.position - VertexWorldPos);
+ float SL_attenuationFactor = 1.0 /
+ (light.kC + (light.kL * SL_lightDistance) + (light.kQ * SL_lightDistance * SL_lightDistance));
+ vec3 SL_lightDir = normalize(light.position - VertexWorldPos);
+
+ res.ambient = SL_attenuationFactor * light.ambient;
+
+ float SL_diffAmount = dot(SL_lightDir, normalize(-light.front));
+ float SL_spotLightFadeFactor = clamp((SL_diffAmount - light.radius_outer)/(light.radius_inner - light.radius_outer), 0.0f, 1.0f);
+ float SL_diffuseStrength = max(dot(SL_lightDir, FragNormal), 0.0);
+ res.diffuse = SL_spotLightFadeFactor * SL_attenuationFactor * light.diffuse * SL_diffuseStrength;
+
+ vec3 SL_reflectDir = reflect(-SL_lightDir, FragNormal);
+ float SL_specularity = max(dot(CONST_viewDir, SL_reflectDir), 0.0);
+ float SL_shinePower = pow(SL_specularity, material.shininess);
+ res.specular = SL_spotLightFadeFactor * SL_attenuationFactor * SL_shinePower * light.specular;
+
+ return res;
+}
+
+void main() {
+ //vec3 CONST_viewDir = normalize(cameraPosition - VertexWorldPos);
+ //vec3 combinedAmbience = vec3(0.0);
+ //vec3 combinedDiffuse = vec3(0.0);
+ //vec3 combinedSpecular = vec3(0.0);
+
+ //LightFactor DL_factors = make_directional_light(dirLight, CONST_viewDir);
+ //combinedAmbience += DL_factors.ambient;
+ //combinedDiffuse += DL_factors.diffuse;
+ //combinedSpecular += DL_factors.specular;
+
+ //LightFactor PL_factors = make_point_light(pointLight, CONST_viewDir);
+ //combinedAmbience += PL_factors.ambient;
+ //combinedDiffuse += PL_factors.diffuse;
+ //combinedSpecular += PL_factors.specular;
+
+ vec3 diffuseLight = vec3(texture(material.diffuse[0], TexCoords));
+
+ vec3 color = diffuseLight;
+ FragColor = vec4(color, 1.0);
+};
diff --git a/source/lessons/instanced_rendering/planetary models/shaders/model.vs.glsl b/source/lessons/instanced_rendering/planetary models/shaders/model.vs.glsl
new file mode 100644
index 0000000..e4896c9
--- /dev/null
+++ b/source/lessons/instanced_rendering/planetary models/shaders/model.vs.glsl
@@ -0,0 +1,28 @@
+#version 330 core
+layout(location=0) in vec3 aPos;
+layout(location=1) in vec3 aNormal;
+layout(location=2) in vec2 aTex;
+
+// uniform mat4 Model;
+layout (std140) uniform Matrices {
+ mat4 View; // start: 0 // end: 16 * 4 = 64
+ mat4 Projection; // start: 64 // end: 64 + 64 = 128
+};
+
+uniform mat4 Model;
+
+out vec2 TexCoords;
+out vec3 VertexWorldPos;
+out vec3 FragNormal;
+
+// @note: I still do not fully understand how the FragNormal calculation works. Need to make sure I intuitively
+// get that
+
+void main() {
+ gl_Position = Projection*View*Model*vec4(aPos, 1.0);
+
+ VertexWorldPos = vec3(Model * vec4(aPos, 1.0));
+ FragNormal = mat3(transpose(inverse(Model))) * aNormal;
+ FragNormal = normalize(FragNormal);
+ TexCoords = aTex;
+};
diff --git a/source/lessons/instanced_rendering/planetary models/shaders/refl.fs.glsl b/source/lessons/instanced_rendering/planetary models/shaders/refl.fs.glsl
new file mode 100644
index 0000000..6d28392
--- /dev/null
+++ b/source/lessons/instanced_rendering/planetary models/shaders/refl.fs.glsl
@@ -0,0 +1,14 @@
+#version 330 core
+
+in vec3 Normal;
+in vec3 Position;
+
+uniform samplerCube skybox;
+uniform vec3 cameraPos;
+out vec4 FragColor;
+
+void main() {
+ vec3 I = normalize(Position - cameraPos);
+ vec3 R = reflect(I, normalize(Normal));
+ FragColor = vec4(texture(skybox, R).rgb, 1.0);
+};
diff --git a/source/lessons/instanced_rendering/planetary models/shaders/refl.vs.glsl b/source/lessons/instanced_rendering/planetary models/shaders/refl.vs.glsl
new file mode 100644
index 0000000..b8f2b97
--- /dev/null
+++ b/source/lessons/instanced_rendering/planetary models/shaders/refl.vs.glsl
@@ -0,0 +1,16 @@
+#version 330 core
+layout(location=0) in vec3 aPos;
+layout(location=1) in vec3 aNormal;
+
+uniform mat4 View;
+uniform mat4 Model;
+uniform mat4 Projection;
+
+out vec3 Normal;
+out vec3 Position;
+
+void main() {
+ Normal = mat3(transpose(inverse(Model))) * aNormal;
+ Position = vec3(Model * vec4(aPos, 1.0));
+ gl_Position = Projection * View * Model * vec4(aPos, 1.0);
+};
diff --git a/source/lessons/instanced_rendering/planetary models/shaders/refr.fs.glsl b/source/lessons/instanced_rendering/planetary models/shaders/refr.fs.glsl
new file mode 100644
index 0000000..6747ded
--- /dev/null
+++ b/source/lessons/instanced_rendering/planetary models/shaders/refr.fs.glsl
@@ -0,0 +1,15 @@
+#version 330 core
+
+in vec3 Normal;
+in vec3 Position;
+
+uniform samplerCube skybox;
+uniform vec3 cameraPos;
+out vec4 FragColor;
+
+void main() {
+ float refr_ratio = 1.0/1.52;
+ vec3 I = normalize(Position - cameraPos);
+ vec3 R = refract(I, normalize(Normal), refr_ratio);
+ FragColor = vec4(texture(skybox, R).rgb, 1.0);
+};
diff --git a/source/lessons/instanced_rendering/planetary models/shaders/refr.vs.glsl b/source/lessons/instanced_rendering/planetary models/shaders/refr.vs.glsl
new file mode 100644
index 0000000..0554f0a
--- /dev/null
+++ b/source/lessons/instanced_rendering/planetary models/shaders/refr.vs.glsl
@@ -0,0 +1,20 @@
+#version 330 core
+layout(location=0) in vec3 aPos;
+layout(location=1) in vec3 aNormal;
+
+uniform mat4 View;
+uniform mat4 Model;
+uniform mat4 Projection;
+
+out vec3 Normal;
+out vec3 Position;
+
+void main() {
+ // @note: This is the calculation for getting the normal vector
+ // one that is unaffected by non-uniform scaling that is.
+ // look at the lighting chapter in learnopengl.com to understand this more
+ Normal = mat3(transpose(inverse(Model))) * aNormal;
+ Position = vec3(Model * vec4(aPos, 1.0));
+ gl_Position = Projection * View * Model * vec4(aPos, 1.0);
+};
+