When using texture arrays, why do I not have to bind the sampler to the shader?

I am creating an array of textures using GL_TEXTURE_2D_ARRAY in my code:

// Load all images ito opengl
unsigned int width, height;
std::vector<unsigned char> textures;
int num = 0;
for ( auto each : image_list )
{
    // Load PNG
    std::vector<unsigned char> buffer, this_texture;

    lodepng::load_file(buffer, each.string().c_str());
    auto lode_error = lodepng::decode(this_texture, width, height, buffer);
    if (lode_error)
    {
        LOG_ERROR("lodepng has reported this error: " + std::string(lodepng_error_text(lode_error)));
        return false;
    }
    m_indexes.insert(std::make_pair(each.filename().string(), num));
    textures.insert(textures.end(), this_texture.begin(), this_texture.end());
    num++;
}

// Active texture
glActiveTexture(GL_TEXTURE0);

// Generate texture
glGenTextures(1, &m_texture_id);
glBindTexture(GL_TEXTURE_2D_ARRAY, m_texture_id);

// Send pixels
glTexImage3D(GL_TEXTURE_2D_ARRAY,
             0,
             GL_RGBA,
             width, height,
             image_list.size(),
             0,
             GL_RGBA,
             GL_UNSIGNED_BYTE,
             textures.data());

// Set options
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);

Here are the shaders I am using:

Vertex Shader

#version 430 core

/* layouts */
layout (location = 0) in vec3 in_vertex;
layout (location = 1) in vec2 in_uv;
layout (location = 2) in vec4 in_tint;
layout (location = 3) in mat4 in_model;
layout (location = 7) in vec3 in_scale;
layout (location = 8) in float in_textured_index;

/* uniforms */
uniform mat4 ortho;
uniform mat4 view;

/* outputs */
out vec4 tint;
out vec2 uv;
out float textured_index;

void main()
{
    mat4 mvp = ortho * view * in_model;
    gl_Position = mvp * vec4(in_vertex * in_scale, 1.0);
    tint = in_tint;
    uv = in_uv;
    textured_index = in_textured_index;
}

Fragment Shader

#version 430 core

/* inputs from vertex shader */
in vec4 tint;
in vec2 uv;
in float textured_index;

/* output to GPU */
out vec4 fragment;

/* texture sampler */
uniform sampler2DArray sampler_unit;


void main()
{
    fragment = texture(sampler_unit, vec3(uv.xy, textured_index)).rgba;
    fragment = fragment * tint;
}

Code to bind the texture array:

void ArrayTextures::attach()
{
    if (glIsTexture(m_texture_id)){
        glActiveTexture(GL_TEXTURE0);
        glBindTexture(GL_TEXTURE_2D_ARRAY, m_texture_id);
    }
}

What I have noticed, is that I do not have to attach the texture unit or the texture id to my shader, as long as the texture is bound with above function. It just works. I would like to understand why. In OpenGL 3.X, you have to bind the sampler to your shader before you can use it. Is there any automatism behind the scenes, that I am not aware of? Since I have a 5700XT, may this be an AMD specific oddity? What is the correct way here, so I can be sure it also works on NVIDIA?

Answer

This has nothing to do with the sampler type. The binding between the texture object and the texture sampler is the texture unit. The texture object must be bound to a texture unit, and the texture unit number must be set to the texture sampler uniform.

In GLSL almost everything is initialized with 0 respectively 0.0 by default. Therefore the default Binding point is 0. If the texture is bound to the texture unit 0 (GL_Texture0), it is not necessary to set the texture sampler uniform as it is 0 by default.