more royale work; almost there

This commit is contained in:
hunterk 2016-08-25 12:38:14 -05:00
parent d33a8be758
commit cc581b1418
15 changed files with 4231 additions and 373 deletions

View file

@ -16,7 +16,7 @@
# calculate scale_y5 (among other values): # calculate scale_y5 (among other values):
# 1.) geom_max_aspect_ratio = (geom_max_aspect_ratio used to calculate scale_y5) # 1.) geom_max_aspect_ratio = (geom_max_aspect_ratio used to calculate scale_y5)
shaders = "6"//"12" shaders = "11"//"12"
# Set an identifier, filename, and sampling traits for the phosphor mask texture. # Set an identifier, filename, and sampling traits for the phosphor mask texture.
# Load an aperture grille, slot mask, and an EDP shadow mask, and load a small # Load an aperture grille, slot mask, and an EDP shadow mask, and load a small

View file

@ -1,4 +1,5 @@
#define BLOOM_FUNCTIONS #ifndef BLOOM_FUNCTIONS_H
#define BLOOM_FUNCTIONS_H
/////////////////////////////// BLOOM CONSTANTS ////////////////////////////// /////////////////////////////// BLOOM CONSTANTS //////////////////////////////
@ -127,6 +128,72 @@ float get_center_weight(const float sigma)
#endif #endif
} }
vec3 tex2DblurNfast(const sampler2D tex, const vec2 tex_uv,
const vec2 dxdy, const float sigma)
{
// If sigma is static, we can safely branch and use the smallest blur
// that's big enough. Ignore #define hints, because we'll only use a
// large blur if we actually need it, and the branches cost nothing.
#ifndef RUNTIME_PHOSPHOR_BLOOM_SIGMA
#define PHOSPHOR_BLOOM_BRANCH_FOR_BLUR_SIZE
#else
// It's still worth branching if the profile supports dynamic branches:
// It's much faster than using a hugely excessive blur, but each branch
// eats ~1% FPS.
#ifdef DRIVERS_ALLOW_DYNAMIC_BRANCHES
#define PHOSPHOR_BLOOM_BRANCH_FOR_BLUR_SIZE
#endif
#endif
// Failed optimization notes:
// I originally created a same-size mipmapped 5-tap separable blur10 that
// could handle any sigma by reaching into lower mip levels. It was
// as fast as blur25fast for runtime sigmas and a tad faster than
// blur31fast for static sigmas, but mipmapping two viewport-size passes
// ate 10% of FPS across all codepaths, so it wasn't worth it.
#ifdef PHOSPHOR_BLOOM_BRANCH_FOR_BLUR_SIZE
if(sigma <= blur9_std_dev)
{
return tex2Dblur9fast(tex, tex_uv, dxdy, sigma);
}
else if(sigma <= blur17_std_dev)
{
return tex2Dblur17fast(tex, tex_uv, dxdy, sigma);
}
else if(sigma <= blur25_std_dev)
{
return tex2Dblur25fast(tex, tex_uv, dxdy, sigma);
}
else if(sigma <= blur31_std_dev)
{
return tex2Dblur31fast(tex, tex_uv, dxdy, sigma);
}
else
{
return tex2Dblur43fast(tex, tex_uv, dxdy, sigma);
}
#else
// If we can't afford to branch, we can only guess at what blur
// size we need. Therefore, use the largest blur allowed.
#ifdef PHOSPHOR_BLOOM_TRIADS_LARGER_THAN_12_PIXELS
return tex2Dblur43fast(tex, tex_uv, dxdy, sigma);
#else
#ifdef PHOSPHOR_BLOOM_TRIADS_LARGER_THAN_9_PIXELS
return tex2Dblur31fast(tex, tex_uv, dxdy, sigma);
#else
#ifdef PHOSPHOR_BLOOM_TRIADS_LARGER_THAN_6_PIXELS
return tex2Dblur25fast(tex, tex_uv, dxdy, sigma);
#else
#ifdef PHOSPHOR_BLOOM_TRIADS_LARGER_THAN_3_PIXELS
return tex2Dblur17fast(tex, tex_uv, dxdy, sigma);
#else
return tex2Dblur9fast(tex, tex_uv, dxdy, sigma);
#endif // PHOSPHOR_BLOOM_TRIADS_LARGER_THAN_3_PIXELS
#endif // PHOSPHOR_BLOOM_TRIADS_LARGER_THAN_6_PIXELS
#endif // PHOSPHOR_BLOOM_TRIADS_LARGER_THAN_9_PIXELS
#endif // PHOSPHOR_BLOOM_TRIADS_LARGER_THAN_12_PIXELS
#endif // PHOSPHOR_BLOOM_BRANCH_FOR_BLUR_SIZE
}
float get_bloom_approx_sigma(const float output_size_x_runtime, float get_bloom_approx_sigma(const float output_size_x_runtime,
const float estimated_viewport_size_x) const float estimated_viewport_size_x)
{ {
@ -212,3 +279,5 @@ float get_final_bloom_sigma(const float bloom_sigma_runtime)
return bloom_sigma_optimistic; return bloom_sigma_optimistic;
#endif #endif
} }
#endif // BLOOM_FUNCTIONS_H

View file

@ -233,6 +233,258 @@ float get_fast_gaussian_weight_sum_inv(const float sigma)
(sigma - 0.0860587260734721))), 0.399334576340352/sigma); (sigma - 0.0860587260734721))), 0.399334576340352/sigma);
} }
vec3 tex2Dblur17fast(const sampler2D tex, const vec2 tex_uv,
const vec2 dxdy, const float sigma)
{
// Requires: Same as tex2Dblur11()
// Returns: A 1D 17x Gaussian blurred texture lookup using 1 nearest
// neighbor and 8 linear taps. It may be mipmapped depending
// on settings and dxdy.
// First get the texel weights and normalization factor as above.
const float denom_inv = 0.5/(sigma*sigma);
const float w0 = 1.0;
const float w1 = exp(-1.0 * denom_inv);
const float w2 = exp(-4.0 * denom_inv);
const float w3 = exp(-9.0 * denom_inv);
const float w4 = exp(-16.0 * denom_inv);
const float w5 = exp(-25.0 * denom_inv);
const float w6 = exp(-36.0 * denom_inv);
const float w7 = exp(-49.0 * denom_inv);
const float w8 = exp(-64.0 * denom_inv);
//const float weight_sum_inv = 1.0 / (w0 + 2.0 * (
// w1 + w2 + w3 + w4 + w5 + w6 + w7 + w8));
const float weight_sum_inv = get_fast_gaussian_weight_sum_inv(sigma);
// Calculate combined weights and linear sample ratios between texel pairs.
const float w1_2 = w1 + w2;
const float w3_4 = w3 + w4;
const float w5_6 = w5 + w6;
const float w7_8 = w7 + w8;
const float w1_2_ratio = w2/w1_2;
const float w3_4_ratio = w4/w3_4;
const float w5_6_ratio = w6/w5_6;
const float w7_8_ratio = w8/w7_8;
// Statically normalize weights, sum weighted samples, and return:
vec3 sum = vec3(0.0);
sum += w7_8 * tex2D_linearize(tex, tex_uv - (7.0 + w7_8_ratio) * dxdy).rgb;
sum += w5_6 * tex2D_linearize(tex, tex_uv - (5.0 + w5_6_ratio) * dxdy).rgb;
sum += w3_4 * tex2D_linearize(tex, tex_uv - (3.0 + w3_4_ratio) * dxdy).rgb;
sum += w1_2 * tex2D_linearize(tex, tex_uv - (1.0 + w1_2_ratio) * dxdy).rgb;
sum += w0 * tex2D_linearize(tex, tex_uv).rgb;
sum += w1_2 * tex2D_linearize(tex, tex_uv + (1.0 + w1_2_ratio) * dxdy).rgb;
sum += w3_4 * tex2D_linearize(tex, tex_uv + (3.0 + w3_4_ratio) * dxdy).rgb;
sum += w5_6 * tex2D_linearize(tex, tex_uv + (5.0 + w5_6_ratio) * dxdy).rgb;
sum += w7_8 * tex2D_linearize(tex, tex_uv + (7.0 + w7_8_ratio) * dxdy).rgb;
return sum * weight_sum_inv;
}
vec3 tex2Dblur25fast(const sampler2D tex, const vec2 tex_uv,
const vec2 dxdy, const float sigma)
{
// Requires: Same as tex2Dblur11()
// Returns: A 1D 25x Gaussian blurred texture lookup using 1 nearest
// neighbor and 12 linear taps. It may be mipmapped depending
// on settings and dxdy.
// First get the texel weights and normalization factor as above.
const float denom_inv = 0.5/(sigma*sigma);
const float w0 = 1.0;
const float w1 = exp(-1.0 * denom_inv);
const float w2 = exp(-4.0 * denom_inv);
const float w3 = exp(-9.0 * denom_inv);
const float w4 = exp(-16.0 * denom_inv);
const float w5 = exp(-25.0 * denom_inv);
const float w6 = exp(-36.0 * denom_inv);
const float w7 = exp(-49.0 * denom_inv);
const float w8 = exp(-64.0 * denom_inv);
const float w9 = exp(-81.0 * denom_inv);
const float w10 = exp(-100.0 * denom_inv);
const float w11 = exp(-121.0 * denom_inv);
const float w12 = exp(-144.0 * denom_inv);
//const float weight_sum_inv = 1.0 / (w0 + 2.0 * (
// w1 + w2 + w3 + w4 + w5 + w6 + w7 + w8 + w9 + w10 + w11 + w12));
const float weight_sum_inv = get_fast_gaussian_weight_sum_inv(sigma);
// Calculate combined weights and linear sample ratios between texel pairs.
const float w1_2 = w1 + w2;
const float w3_4 = w3 + w4;
const float w5_6 = w5 + w6;
const float w7_8 = w7 + w8;
const float w9_10 = w9 + w10;
const float w11_12 = w11 + w12;
const float w1_2_ratio = w2/w1_2;
const float w3_4_ratio = w4/w3_4;
const float w5_6_ratio = w6/w5_6;
const float w7_8_ratio = w8/w7_8;
const float w9_10_ratio = w10/w9_10;
const float w11_12_ratio = w12/w11_12;
// Statically normalize weights, sum weighted samples, and return:
vec3 sum = vec3(0.0);
sum += w11_12 * tex2D_linearize(tex, tex_uv - (11.0 + w11_12_ratio) * dxdy).rgb;
sum += w9_10 * tex2D_linearize(tex, tex_uv - (9.0 + w9_10_ratio) * dxdy).rgb;
sum += w7_8 * tex2D_linearize(tex, tex_uv - (7.0 + w7_8_ratio) * dxdy).rgb;
sum += w5_6 * tex2D_linearize(tex, tex_uv - (5.0 + w5_6_ratio) * dxdy).rgb;
sum += w3_4 * tex2D_linearize(tex, tex_uv - (3.0 + w3_4_ratio) * dxdy).rgb;
sum += w1_2 * tex2D_linearize(tex, tex_uv - (1.0 + w1_2_ratio) * dxdy).rgb;
sum += w0 * tex2D_linearize(tex, tex_uv).rgb;
sum += w1_2 * tex2D_linearize(tex, tex_uv + (1.0 + w1_2_ratio) * dxdy).rgb;
sum += w3_4 * tex2D_linearize(tex, tex_uv + (3.0 + w3_4_ratio) * dxdy).rgb;
sum += w5_6 * tex2D_linearize(tex, tex_uv + (5.0 + w5_6_ratio) * dxdy).rgb;
sum += w7_8 * tex2D_linearize(tex, tex_uv + (7.0 + w7_8_ratio) * dxdy).rgb;
sum += w9_10 * tex2D_linearize(tex, tex_uv + (9.0 + w9_10_ratio) * dxdy).rgb;
sum += w11_12 * tex2D_linearize(tex, tex_uv + (11.0 + w11_12_ratio) * dxdy).rgb;
return sum * weight_sum_inv;
}
vec3 tex2Dblur31fast(const sampler2D tex, const vec2 tex_uv,
const vec2 dxdy, const float sigma)
{
// Requires: Same as tex2Dblur11()
// Returns: A 1D 31x Gaussian blurred texture lookup using 16 linear
// taps. It may be mipmapped depending on settings and dxdy.
// First get the texel weights and normalization factor as above.
const float denom_inv = 0.5/(sigma*sigma);
const float w0 = 1.0;
const float w1 = exp(-1.0 * denom_inv);
const float w2 = exp(-4.0 * denom_inv);
const float w3 = exp(-9.0 * denom_inv);
const float w4 = exp(-16.0 * denom_inv);
const float w5 = exp(-25.0 * denom_inv);
const float w6 = exp(-36.0 * denom_inv);
const float w7 = exp(-49.0 * denom_inv);
const float w8 = exp(-64.0 * denom_inv);
const float w9 = exp(-81.0 * denom_inv);
const float w10 = exp(-100.0 * denom_inv);
const float w11 = exp(-121.0 * denom_inv);
const float w12 = exp(-144.0 * denom_inv);
const float w13 = exp(-169.0 * denom_inv);
const float w14 = exp(-196.0 * denom_inv);
const float w15 = exp(-225.0 * denom_inv);
//const float weight_sum_inv = 1.0 /
// (w0 + 2.0 * (w1 + w2 + w3 + w4 + w5 + w6 + w7 + w8 +
// w9 + w10 + w11 + w12 + w13 + w14 + w15));
const float weight_sum_inv = get_fast_gaussian_weight_sum_inv(sigma);
// Calculate combined weights and linear sample ratios between texel pairs.
// The center texel (with weight w0) is used twice, so halve its weight.
const float w0_1 = w0 * 0.5 + w1;
const float w2_3 = w2 + w3;
const float w4_5 = w4 + w5;
const float w6_7 = w6 + w7;
const float w8_9 = w8 + w9;
const float w10_11 = w10 + w11;
const float w12_13 = w12 + w13;
const float w14_15 = w14 + w15;
const float w0_1_ratio = w1/w0_1;
const float w2_3_ratio = w3/w2_3;
const float w4_5_ratio = w5/w4_5;
const float w6_7_ratio = w7/w6_7;
const float w8_9_ratio = w9/w8_9;
const float w10_11_ratio = w11/w10_11;
const float w12_13_ratio = w13/w12_13;
const float w14_15_ratio = w15/w14_15;
// Statically normalize weights, sum weighted samples, and return:
vec3 sum = vec3(0.0);
sum += w14_15 * tex2D_linearize(tex, tex_uv - (14.0 + w14_15_ratio) * dxdy).rgb;
sum += w12_13 * tex2D_linearize(tex, tex_uv - (12.0 + w12_13_ratio) * dxdy).rgb;
sum += w10_11 * tex2D_linearize(tex, tex_uv - (10.0 + w10_11_ratio) * dxdy).rgb;
sum += w8_9 * tex2D_linearize(tex, tex_uv - (8.0 + w8_9_ratio) * dxdy).rgb;
sum += w6_7 * tex2D_linearize(tex, tex_uv - (6.0 + w6_7_ratio) * dxdy).rgb;
sum += w4_5 * tex2D_linearize(tex, tex_uv - (4.0 + w4_5_ratio) * dxdy).rgb;
sum += w2_3 * tex2D_linearize(tex, tex_uv - (2.0 + w2_3_ratio) * dxdy).rgb;
sum += w0_1 * tex2D_linearize(tex, tex_uv - w0_1_ratio * dxdy).rgb;
sum += w0_1 * tex2D_linearize(tex, tex_uv + w0_1_ratio * dxdy).rgb;
sum += w2_3 * tex2D_linearize(tex, tex_uv + (2.0 + w2_3_ratio) * dxdy).rgb;
sum += w4_5 * tex2D_linearize(tex, tex_uv + (4.0 + w4_5_ratio) * dxdy).rgb;
sum += w6_7 * tex2D_linearize(tex, tex_uv + (6.0 + w6_7_ratio) * dxdy).rgb;
sum += w8_9 * tex2D_linearize(tex, tex_uv + (8.0 + w8_9_ratio) * dxdy).rgb;
sum += w10_11 * tex2D_linearize(tex, tex_uv + (10.0 + w10_11_ratio) * dxdy).rgb;
sum += w12_13 * tex2D_linearize(tex, tex_uv + (12.0 + w12_13_ratio) * dxdy).rgb;
sum += w14_15 * tex2D_linearize(tex, tex_uv + (14.0 + w14_15_ratio) * dxdy).rgb;
return sum * weight_sum_inv;
}
vec3 tex2Dblur43fast(const sampler2D tex, const vec2 tex_uv,
const vec2 dxdy, const float sigma)
{
// Requires: Same as tex2Dblur11()
// Returns: A 1D 43x Gaussian blurred texture lookup using 22 linear
// taps. It may be mipmapped depending on settings and dxdy.
// First get the texel weights and normalization factor as above.
const float denom_inv = 0.5/(sigma*sigma);
const float w0 = 1.0;
const float w1 = exp(-1.0 * denom_inv);
const float w2 = exp(-4.0 * denom_inv);
const float w3 = exp(-9.0 * denom_inv);
const float w4 = exp(-16.0 * denom_inv);
const float w5 = exp(-25.0 * denom_inv);
const float w6 = exp(-36.0 * denom_inv);
const float w7 = exp(-49.0 * denom_inv);
const float w8 = exp(-64.0 * denom_inv);
const float w9 = exp(-81.0 * denom_inv);
const float w10 = exp(-100.0 * denom_inv);
const float w11 = exp(-121.0 * denom_inv);
const float w12 = exp(-144.0 * denom_inv);
const float w13 = exp(-169.0 * denom_inv);
const float w14 = exp(-196.0 * denom_inv);
const float w15 = exp(-225.0 * denom_inv);
const float w16 = exp(-256.0 * denom_inv);
const float w17 = exp(-289.0 * denom_inv);
const float w18 = exp(-324.0 * denom_inv);
const float w19 = exp(-361.0 * denom_inv);
const float w20 = exp(-400.0 * denom_inv);
const float w21 = exp(-441.0 * denom_inv);
//const float weight_sum_inv = 1.0 /
// (w0 + 2.0 * (w1 + w2 + w3 + w4 + w5 + w6 + w7 + w8 + w9 + w10 + w11 +
// w12 + w13 + w14 + w15 + w16 + w17 + w18 + w19 + w20 + w21));
const float weight_sum_inv = get_fast_gaussian_weight_sum_inv(sigma);
// Calculate combined weights and linear sample ratios between texel pairs.
// The center texel (with weight w0) is used twice, so halve its weight.
const float w0_1 = w0 * 0.5 + w1;
const float w2_3 = w2 + w3;
const float w4_5 = w4 + w5;
const float w6_7 = w6 + w7;
const float w8_9 = w8 + w9;
const float w10_11 = w10 + w11;
const float w12_13 = w12 + w13;
const float w14_15 = w14 + w15;
const float w16_17 = w16 + w17;
const float w18_19 = w18 + w19;
const float w20_21 = w20 + w21;
const float w0_1_ratio = w1/w0_1;
const float w2_3_ratio = w3/w2_3;
const float w4_5_ratio = w5/w4_5;
const float w6_7_ratio = w7/w6_7;
const float w8_9_ratio = w9/w8_9;
const float w10_11_ratio = w11/w10_11;
const float w12_13_ratio = w13/w12_13;
const float w14_15_ratio = w15/w14_15;
const float w16_17_ratio = w17/w16_17;
const float w18_19_ratio = w19/w18_19;
const float w20_21_ratio = w21/w20_21;
// Statically normalize weights, sum weighted samples, and return:
vec3 sum = vec3(0.0);
sum += w20_21 * tex2D_linearize(tex, tex_uv - (20.0 + w20_21_ratio) * dxdy).rgb;
sum += w18_19 * tex2D_linearize(tex, tex_uv - (18.0 + w18_19_ratio) * dxdy).rgb;
sum += w16_17 * tex2D_linearize(tex, tex_uv - (16.0 + w16_17_ratio) * dxdy).rgb;
sum += w14_15 * tex2D_linearize(tex, tex_uv - (14.0 + w14_15_ratio) * dxdy).rgb;
sum += w12_13 * tex2D_linearize(tex, tex_uv - (12.0 + w12_13_ratio) * dxdy).rgb;
sum += w10_11 * tex2D_linearize(tex, tex_uv - (10.0 + w10_11_ratio) * dxdy).rgb;
sum += w8_9 * tex2D_linearize(tex, tex_uv - (8.0 + w8_9_ratio) * dxdy).rgb;
sum += w6_7 * tex2D_linearize(tex, tex_uv - (6.0 + w6_7_ratio) * dxdy).rgb;
sum += w4_5 * tex2D_linearize(tex, tex_uv - (4.0 + w4_5_ratio) * dxdy).rgb;
sum += w2_3 * tex2D_linearize(tex, tex_uv - (2.0 + w2_3_ratio) * dxdy).rgb;
sum += w0_1 * tex2D_linearize(tex, tex_uv - w0_1_ratio * dxdy).rgb;
sum += w0_1 * tex2D_linearize(tex, tex_uv + w0_1_ratio * dxdy).rgb;
sum += w2_3 * tex2D_linearize(tex, tex_uv + (2.0 + w2_3_ratio) * dxdy).rgb;
sum += w4_5 * tex2D_linearize(tex, tex_uv + (4.0 + w4_5_ratio) * dxdy).rgb;
sum += w6_7 * tex2D_linearize(tex, tex_uv + (6.0 + w6_7_ratio) * dxdy).rgb;
sum += w8_9 * tex2D_linearize(tex, tex_uv + (8.0 + w8_9_ratio) * dxdy).rgb;
sum += w10_11 * tex2D_linearize(tex, tex_uv + (10.0 + w10_11_ratio) * dxdy).rgb;
sum += w12_13 * tex2D_linearize(tex, tex_uv + (12.0 + w12_13_ratio) * dxdy).rgb;
sum += w14_15 * tex2D_linearize(tex, tex_uv + (14.0 + w14_15_ratio) * dxdy).rgb;
sum += w16_17 * tex2D_linearize(tex, tex_uv + (16.0 + w16_17_ratio) * dxdy).rgb;
sum += w18_19 * tex2D_linearize(tex, tex_uv + (18.0 + w18_19_ratio) * dxdy).rgb;
sum += w20_21 * tex2D_linearize(tex, tex_uv + (20.0 + w20_21_ratio) * dxdy).rgb;
return sum * weight_sum_inv;
}
//////////////////// ARBITRARILY RESIZABLE ONE-PASS BLURS //////////////////// //////////////////// ARBITRARILY RESIZABLE ONE-PASS BLURS ////////////////////
vec3 tex2Dblur3x3resize(const sampler2D tex, const vec2 tex_uv, vec3 tex2Dblur3x3resize(const sampler2D tex, const vec2 tex_uv,
@ -317,4 +569,27 @@ vec3 tex2Dblur9fast(const sampler2D tex, const vec2 tex_uv,
return tex2Dblur9fast(tex, tex_uv, dxdy, blur9_std_dev); return tex2Dblur9fast(tex, tex_uv, dxdy, blur9_std_dev);
} }
vec3 tex2Dblur17fast(const sampler2D texture, const vec2 tex_uv,
const vec2 dxdy)
{
return tex2Dblur17fast(texture, tex_uv, dxdy, blur17_std_dev);
}
vec3 tex2Dblur25fast(const sampler2D texture, const vec2 tex_uv,
const vec2 dxdy)
{
return tex2Dblur25fast(texture, tex_uv, dxdy, blur25_std_dev);
}
vec3 tex2Dblur43fast(const sampler2D texture, const vec2 tex_uv,
const vec2 dxdy)
{
return tex2Dblur43fast(texture, tex_uv, dxdy, blur43_std_dev);
}
vec3 tex2Dblur31fast(const sampler2D texture, const vec2 tex_uv,
const vec2 dxdy)
{
return tex2Dblur31fast(texture, tex_uv, dxdy, blur31_std_dev);
}
#endif // BLUR_FUNCTIONS_H #endif // BLUR_FUNCTIONS_H

View file

@ -0,0 +1,131 @@
#version 450
layout(push_constant) uniform Push
{
vec4 SourceSize;
vec4 OutputSize;
vec4 ORIG_LINEARIZEDSize;
vec4 HALATION_BLURSize;
vec4 MASKED_SCANLINESSize;
vec4 BRIGHTPASSSize;
} registers;
#include "params.inc"
///////////////////////////// GPL LICENSE NOTICE /////////////////////////////
// crt-royale: A full-featured CRT shader, with cheese.
// Copyright (C) 2014 TroggleMonkey <trogglemonkey@gmx.com>
//
// This program is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by the Free
// Software Foundation; either version 2 of the License, or any later version.
//
// This program is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// You should have received a copy of the GNU General Public License along with
// this program; if not, write to the Free Software Foundation, Inc., 59 Temple
// Place, Suite 330, Boston, MA 02111-1307 USA
///////////////////////////// SETTINGS MANAGEMENT ////////////////////////////
#include "../user-settings.h"
#include "derived-settings-and-constants.h"
#include "bind-shader-params.h"
////////////////////////////////// INCLUDES //////////////////////////////////
//#include "../../../../include/gamma-management.h"
//#include "bloom-functions.h"
#include "phosphor-mask-resizing.h"
#include "scanline-functions.h"
#pragma stage vertex
layout(location = 0) in vec4 Position;
layout(location = 1) in vec2 TexCoord;
layout(location = 0) out vec2 video_uv;
layout(location = 1) out vec2 scanline_tex_uv;
layout(location = 2) out vec2 halation_tex_uv;
layout(location = 3) out vec2 brightpass_tex_uv;
layout(location = 4) out vec2 bloom_tex_uv;
layout(location = 5) out vec2 bloom_dxdy;
layout(location = 6) out float bloom_sigma_runtime;
void main()
{
gl_Position = params.MVP * Position;
video_uv = TexCoord;
// Our various input textures use different coords:
scanline_tex_uv = video_uv * registers.MASKED_SCANLINESSize.xy *
registers.MASKED_SCANLINESSize.zw;
halation_tex_uv = video_uv * registers.HALATION_BLURSize.xy *
registers.HALATION_BLURSize.zw;
brightpass_tex_uv = video_uv * registers.BRIGHTPASSSize.xy *
registers.BRIGHTPASSSize.zw;
bloom_tex_uv = TexCoord;
// We're horizontally blurring the bloom input (vertically blurred
// brightpass). Get the uv distance between output pixels / input texels
// in the horizontal direction (this pass must NOT resize):
bloom_dxdy = vec2(registers.SourceSize.z, 0.0);
// Calculate a runtime bloom_sigma in case it's needed:
const float mask_tile_size_x = get_resized_mask_tile_size(
registers.OutputSize.xy, registers.OutputSize.xy * mask_resize_viewport_scale, false).x;
bloom_sigma_runtime = get_min_sigma_to_blur_triad(
mask_tile_size_x / mask_triads_per_tile, bloom_diff_thresh);
}
#pragma stage fragment
layout(location = 0) in vec2 video_uv;
layout(location = 1) in vec2 scanline_tex_uv;
layout(location = 2) in vec2 halation_tex_uv;
layout(location = 3) in vec2 brightpass_tex_uv;
layout(location = 4) in vec2 bloom_tex_uv;
layout(location = 5) in vec2 bloom_dxdy;
layout(location = 6) in float bloom_sigma_runtime;
layout(location = 0) out vec4 FragColor;
layout(set = 0, binding = 2) uniform sampler2D Source;
layout(set = 0, binding = 3) uniform sampler2D MASKED_SCANLINES;
layout(set = 0, binding = 4) uniform sampler2D HALATION_BLUR;
layout(set = 0, binding = 5) uniform sampler2D BRIGHTPASS;
void main()
{
// Blur the vertically blurred brightpass horizontally by 9/17/25/43x:
const float bloom_sigma = get_final_bloom_sigma(bloom_sigma_runtime);
const vec3 blurred_brightpass = tex2DblurNfast(Source,
bloom_tex_uv, bloom_dxdy, bloom_sigma);
// Sample the masked scanlines. Alpha contains the auto-dim factor:
const vec3 intensity_dim =
tex2D_linearize(MASKED_SCANLINES, scanline_tex_uv).rgb;
const float auto_dim_factor = levels_autodim_temp;
const float undim_factor = 1.0/auto_dim_factor;
// Calculate the mask dimpass, add it to the blurred brightpass, and
// undim (from scanline auto-dim) and amplify (from mask dim) the result:
const float mask_amplify = get_mask_amplify();
const vec3 brightpass = tex2D_linearize(BRIGHTPASS,
brightpass_tex_uv).rgb;
const vec3 dimpass = intensity_dim - brightpass;
const vec3 phosphor_bloom = (dimpass + blurred_brightpass) *
mask_amplify * undim_factor * levels_contrast;
// Sample the halation texture, and let some light bleed into refractive
// diffusion. Conceptually this occurs before the phosphor bloom, but
// adding it in earlier passes causes black crush in the diffusion colors.
const vec3 diffusion_color = levels_contrast * tex2D_linearize(
HALATION_BLUR, halation_tex_uv).rgb;
const vec3 final_bloom = mix(phosphor_bloom,
diffusion_color, diffusion_weight);
// Encode and output the bloomed image:
FragColor = encode_output(vec4(final_bloom, 1.0));
}

View file

@ -0,0 +1,87 @@
#version 450
layout(push_constant) uniform Push
{
vec4 SourceSize;
vec4 OriginalSize;
vec4 OutputSize;
uint FrameCount;
} registers;
#include "params.inc"
///////////////////////////// GPL LICENSE NOTICE /////////////////////////////
// crt-royale: A full-featured CRT shader, with cheese.
// Copyright (C) 2014 TroggleMonkey <trogglemonkey@gmx.com>
//
// This program is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by the Free
// Software Foundation; either version 2 of the License, or any later version.
//
// This program is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// You should have received a copy of the GNU General Public License along with
// this program; if not, write to the Free Software Foundation, Inc., 59 Temple
// Place, Suite 330, Boston, MA 02111-1307 USA
///////////////////////////// SETTINGS MANAGEMENT ////////////////////////////
#include "../user-settings.h"
#include "derived-settings-and-constants.h"
#include "bind-shader-params.h"
////////////////////////////////// INCLUDES //////////////////////////////////
//#include "../../../../include/gamma-management.h"
//#include "bloom-functions.h"
#include "phosphor-mask-resizing.h"
#include "includes.h"
#pragma stage vertex
layout(location = 0) in vec4 Position;
layout(location = 1) in vec2 TexCoord;
layout(location = 0) out vec2 tex_uv;
layout(location = 1) out vec2 bloom_dxdy;
layout(location = 2) out float bloom_sigma_runtime;
void main()
{
gl_Position = params.MVP * Position;
tex_uv = TexCoord;
// Get the uv sample distance between output pixels. Calculate dxdy like
// blurs/vertex-shader-blur-fast-vertical.h.
const vec2 dxdy_scale = registers.SourceSize.xy * registers.OutputSize.zw;
const vec2 dxdy = dxdy_scale * registers.SourceSize.zw;
// This blur is vertical-only, so zero out the vertical offset:
bloom_dxdy = vec2(0.0, dxdy.y);
// Calculate a runtime bloom_sigma in case it's needed:
const float mask_tile_size_x = get_resized_mask_tile_size(
registers.OutputSize.xy, registers.OutputSize.xy * mask_resize_viewport_scale, false).x;
bloom_sigma_runtime = get_min_sigma_to_blur_triad(
mask_tile_size_x / mask_triads_per_tile, bloom_diff_thresh);
}
#pragma stage fragment
layout(location = 0) in vec2 tex_uv;
layout(location = 1) in vec2 bloom_dxdy;
layout(location = 2) in float bloom_sigma_runtime;
layout(location = 0) out vec4 FragColor;
layout(set = 0, binding = 2) uniform sampler2D Source;
void main()
{
// Blur the brightpass horizontally with a 9/17/25/43x blur:
const float bloom_sigma = get_final_bloom_sigma(bloom_sigma_runtime);
const vec3 color = tex2DblurNfast(Source, tex_uv,
bloom_dxdy, bloom_sigma);
// Encode and output the blurred image:
FragColor = encode_output(vec4(color, 1.0));
}

View file

@ -0,0 +1,142 @@
#version 450
layout(push_constant) uniform Push
{
vec4 SourceSize;
vec4 OriginalSize;
vec4 OutputSize;
uint FrameCount;
vec4 MASKED_SCANLINESSize;
vec4 BLOOM_APPROXSize;
} registers;
#include "params.inc"
///////////////////////////// GPL LICENSE NOTICE /////////////////////////////
// crt-royale: A full-featured CRT shader, with cheese.
// Copyright (C) 2014 TroggleMonkey <trogglemonkey@gmx.com>
//
// This program is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by the Free
// Software Foundation; either version 2 of the License, or any later version.
//
// This program is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// You should have received a copy of the GNU General Public License along with
// this program; if not, write to the Free Software Foundation, Inc., 59 Temple
// Place, Suite 330, Boston, MA 02111-1307 USA
///////////////////////////// SETTINGS MANAGEMENT ////////////////////////////
#include "../user-settings.h"
#include "derived-settings-and-constants.h"
#include "bind-shader-params.h"
////////////////////////////////// INCLUDES //////////////////////////////////
//#include "../../../../include/gamma-management.h"
//#include "../../../../include/blur-functions.h"
#include "phosphor-mask-resizing.h"
#include "scanline-functions.h"
#include "bloom-functions.h"
#include "includes.h"
#pragma stage vertex
layout(location = 0) in vec4 Position;
layout(location = 1) in vec2 TexCoord;
layout(location = 0) out vec2 video_uv;
layout(location = 1) out vec2 scanline_tex_uv;
layout(location = 2) out float bloom_sigma_runtime;
layout(location = 3) out vec2 blur3x3_tex_uv;
void main()
{
gl_Position = params.MVP * Position;
const vec2 tex_uv = TexCoord;
// Our various input textures use different coords:
video_uv = tex_uv;
scanline_tex_uv = video_uv * registers.MASKED_SCANLINESSize.xy *
registers.MASKED_SCANLINESSize.zw;
blur3x3_tex_uv = video_uv * registers.BLOOM_APPROXSize.xy * registers.BLOOM_APPROXSize.zw;
// Calculate a runtime bloom_sigma in case it's needed:
const float mask_tile_size_x = get_resized_mask_tile_size(
registers.OutputSize.xy, registers.OutputSize.xy * mask_resize_viewport_scale, false).x;
bloom_sigma_runtime = get_min_sigma_to_blur_triad(
mask_tile_size_x / mask_triads_per_tile, bloom_diff_thresh);
}
#pragma stage fragment
layout(location = 0) in vec2 video_uv;
layout(location = 1) in vec2 scanline_tex_uv;
layout(location = 2) in float bloom_sigma_runtime;
layout(location = 3) in vec2 blur3x3_tex_uv;
layout(location = 0) out vec4 FragColor;
layout(set = 0, binding = 2) uniform sampler2D Source;
layout(set = 0, binding = 3) uniform sampler2D MASKED_SCANLINES;
layout(set = 0, binding = 4) uniform sampler2D BLOOM_APPROX;
void main()
{
// Sample the masked scanlines:
const vec3 intensity_dim =
tex2D_linearize(MASKED_SCANLINES, scanline_tex_uv).rgb;
// Get the full intensity, including auto-undimming, and mask compensation:
const float auto_dim_factor = levels_autodim_temp;
const float undim_factor = 1.0/auto_dim_factor;
const float mask_amplify = get_mask_amplify();
const vec3 intensity = intensity_dim * undim_factor * mask_amplify *
levels_contrast;
// Sample BLOOM_APPROX to estimate what a straight blur of masked scanlines
// would look like, so we can estimate how much energy we'll receive from
// blooming neighbors:
const vec3 phosphor_blur_approx = levels_contrast * tex2D_linearize(
BLOOM_APPROX, blur3x3_tex_uv).rgb;
// Compute the blur weight for the center texel and the maximum energy we
// expect to receive from neighbors:
const float bloom_sigma = get_final_bloom_sigma(bloom_sigma_runtime);
const float center_weight = get_center_weight(bloom_sigma);
const vec3 max_area_contribution_approx =
max(vec3(0.0), phosphor_blur_approx - center_weight * intensity);
// Assume neighbors will blur 100% of their intensity (blur_ratio = 1.0),
// because it actually gets better results (on top of being very simple),
// but adjust all intensities for the user's desired underestimate factor:
const vec3 area_contrib_underestimate =
bloom_underestimate_levels * max_area_contribution_approx;
const vec3 intensity_underestimate =
bloom_underestimate_levels * intensity;
// Calculate the blur_ratio, the ratio of intensity we want to blur:
#ifdef BRIGHTPASS_AREA_BASED
// This area-based version changes blur_ratio more smoothly and blurs
// more, clipping less but offering less phosphor differentiation:
const vec3 phosphor_blur_underestimate = bloom_underestimate_levels *
phosphor_blur_approx;
const vec3 soft_intensity = max(intensity_underestimate,
phosphor_blur_underestimate * mask_amplify);
const vec3 blur_ratio_temp =
((vec3(1.0) - area_contrib_underestimate) /
soft_intensity - vec3(1.0)) / (center_weight - 1.0);
#else
const vec3 blur_ratio_temp =
((vec3(1.0) - area_contrib_underestimate) /
intensity_underestimate - vec3(1.0)) / (center_weight - 1.0);
#endif
const vec3 blur_ratio = clamp(blur_ratio_temp, 0.0, 1.0);
// Calculate the brightpass based on the auto-dimmed, unamplified, masked
// scanlines, encode if necessary, and return!
const vec3 brightpass = intensity_dim *
mix(blur_ratio, vec3(1.0), bloom_excess);
FragColor = encode_output(vec4(brightpass, 1.0));
}

View file

@ -0,0 +1,242 @@
#version 450
layout(push_constant) uniform Push
{
vec4 SourceSize;
vec4 OriginalSize;
vec4 OutputSize;
uint FrameCount;
} registers;
#include "params.inc"
///////////////////////////// GPL LICENSE NOTICE /////////////////////////////
// crt-royale: A full-featured CRT shader, with cheese.
// Copyright (C) 2014 TroggleMonkey <trogglemonkey@gmx.com>
//
// This program is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by the Free
// Software Foundation; either version 2 of the License, or any later version.
//
// This program is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// You should have received a copy of the GNU General Public License along with
// this program; if not, write to the Free Software Foundation, Inc., 59 Temple
// Place, Suite 330, Boston, MA 02111-1307 USA
///////////////////////////// SETTINGS MANAGEMENT ////////////////////////////
#define LAST_PASS
#define SIMULATE_CRT_ON_LCD
#include "../user-settings.h"
#include "derived-settings-and-constants.h"
#include "bind-shader-params.h"
#ifndef RUNTIME_GEOMETRY_TILT
// Create a local-to-global rotation matrix for the CRT's coordinate frame
// and its global-to-local inverse. See the vertex shader for details.
// It's faster to compute these statically if possible.
const vec2 sin_tilt = sin(geom_tilt_angle_static);
const vec2 cos_tilt = cos(geom_tilt_angle_static);
const mat3x3 geom_local_to_global_static = mat3x3(
cos_tilt.x, sin_tilt.y*sin_tilt.x, cos_tilt.y*sin_tilt.x,
0.0, cos_tilt.y, -sin_tilt.y,
-sin_tilt.x, sin_tilt.y*cos_tilt.x, cos_tilt.y*cos_tilt.x);
const mat3x3 geom_global_to_local_static = mat3x3(
cos_tilt.x, 0.0, -sin_tilt.x,
sin_tilt.y*sin_tilt.x, cos_tilt.y, sin_tilt.y*cos_tilt.x,
cos_tilt.y*sin_tilt.x, -sin_tilt.y, cos_tilt.y*cos_tilt.x);
#endif
////////////////////////////////// INCLUDES //////////////////////////////////
//#include "../../../../include/gamma-management.h"
#include "tex2Dantialias.h"
#include "geometry-functions.h"
#include "includes.h"
/////////////////////////////////// HELPERS //////////////////////////////////
mat2x2 mul_scale(vec2 scale, mat2x2 matrix)
{
//mat2x2 scale_matrix = mat2x2(scale.x, 0.0, 0.0, scale.y);
//return (matrix * scale_matrix);
return mat2x2(vec4(matrix[0].xy, matrix[1].xy) * scale.xxyy);
}
#pragma stage vertex
layout(location = 0) in vec4 Position;
layout(location = 1) in vec2 TexCoord;
layout(location = 0) out vec2 tex_uv;
layout(location = 1) out vec4 video_and_texture_size_inv;
layout(location = 2) out vec2 output_size_inv;
layout(location = 3) out vec3 eye_pos_local;
layout(location = 4) out vec4 geom_aspect_and_overscan;
#ifdef RUNTIME_GEOMETRY_TILT
layout(location = 5) out vec3 global_to_local_row0;
layout(location = 6) out vec3 global_to_local_row1;
layout(location = 7) out vec3 global_to_local_row2;
#endif
void main()
{
gl_Position = params.MVP * Position;
tex_uv = TexCoord;
video_and_texture_size_inv = vec4(registers.SourceSize.zw, registers.SourceSize.zw);
output_size_inv = registers.OutputSize.zw;
// Get aspect/overscan vectors from scalar parameters (likely uniforms):
const float viewport_aspect_ratio = registers.OutputSize.x * registers.OutputSize.w;
const vec2 geom_aspect = get_aspect_vector(viewport_aspect_ratio);
const vec2 geom_overscan = get_geom_overscan_vector();
geom_aspect_and_overscan = vec4(geom_aspect, geom_overscan);
#ifdef RUNTIME_GEOMETRY_TILT
// Create a local-to-global rotation matrix for the CRT's coordinate
// frame and its global-to-local inverse. Rotate around the x axis
// first (pitch) and then the y axis (yaw) with yucky Euler angles.
// Positive angles go clockwise around the right-vec and up-vec.
// Runtime shader parameters prevent us from computing these globally,
// but we can still combine the pitch/yaw matrices by hand to cut a
// few instructions. Note that cg matrices fill row1 first, then row2,
// etc. (row-major order).
const vec2 geom_tilt_angle = get_geom_tilt_angle_vector();
const vec2 sin_tilt = sin(geom_tilt_angle);
const vec2 cos_tilt = cos(geom_tilt_angle);
// Conceptual breakdown:
// const mat3x3 rot_x_matrix = mat3x3(
// 1.0, 0.0, 0.0,
// 0.0, cos_tilt.y, -sin_tilt.y,
// 0.0, sin_tilt.y, cos_tilt.y);
// const mat3x3 rot_y_matrix = mat3x3(
// cos_tilt.x, 0.0, sin_tilt.x,
// 0.0, 1.0, 0.0,
// -sin_tilt.x, 0.0, cos_tilt.x);
// const mat3x3 local_to_global =
// rot_x_matrix * rot_y_matrix;
// const mat3x3 global_to_local =
// transpose(local_to_global);
mat3x3 local_to_global = mat3x3(
cos_tilt.x, sin_tilt.y*sin_tilt.x, cos_tilt.y*sin_tilt.x,
0.0, cos_tilt.y, -sin_tilt.y,
-sin_tilt.x, sin_tilt.y*cos_tilt.x, cos_tilt.y*cos_tilt.x);
// This is a pure rotation, so transpose = inverse:
mat3x3 global_to_local = transpose(local_to_global);
// Decompose the matrix into 3 vec3's for output:
global_to_local_row0 = vec3(global_to_local[0].xyz);
global_to_local_row1 = vec3(global_to_local[1].xyz);
global_to_local_row2 = vec3(global_to_local[2].xyz);
#else
const mat3x3 global_to_local = geom_global_to_local_static;
const mat3x3 local_to_global = geom_local_to_global_static;
#endif
// Get an optimal eye position based on geom_view_dist, viewport_aspect,
// and CRT radius/rotation:
#ifdef RUNTIME_GEOMETRY_MODE
const float geom_mode = geom_mode_runtime;
#else
const float geom_mode = geom_mode_static;
#endif
const vec3 eye_pos_global = get_ideal_global_eye_pos(local_to_global, geom_aspect, geom_mode);
eye_pos_local = eye_pos_global, global_to_local;
}
#pragma stage fragment
layout(location = 0) in vec2 tex_uv;
layout(location = 1) in vec4 video_and_texture_size_inv;
layout(location = 2) in vec2 output_size_inv;
layout(location = 3) in vec3 eye_pos_local;
layout(location = 4) in vec4 geom_aspect_and_overscan;
#ifdef RUNTIME_GEOMETRY_TILT
layout(location = 5) in vec3 global_to_local_row0;
layout(location = 6) in vec3 global_to_local_row1;
layout(location = 7) in vec3 global_to_local_row2;
#endif
layout(location = 0) out vec4 FragColor;
layout(set = 0, binding = 2) uniform sampler2D Source;
void main()
{
// Localize some parameters:
const vec2 geom_aspect = geom_aspect_and_overscan.xy;
const vec2 geom_overscan = geom_aspect_and_overscan.zw;
const vec2 video_size_inv = video_and_texture_size_inv.xy;
const vec2 texture_size_inv = video_and_texture_size_inv.zw;
#ifdef RUNTIME_GEOMETRY_TILT
const mat3x3 global_to_local = mat3x3(global_to_local_row0,
global_to_local_row1, global_to_local_row2);
#else
const mat3x3 global_to_local = geom_global_to_local_static;
#endif
#ifdef RUNTIME_GEOMETRY_MODE
const float geom_mode = geom_mode_runtime;
#else
const float geom_mode = geom_mode_static;
#endif
// Get flat and curved texture coords for the current fragment point sample
// and a pixel_to_tangent_video_uv matrix for transforming pixel offsets:
// video_uv = relative position in video frame, mapped to [0.0, 1.0] range
// tex_uv = relative position in padded texture, mapped to [0.0, 1.0] range
const vec2 flat_video_uv = tex_uv * (registers.SourceSize.xy * video_size_inv);
mat2x2 pixel_to_video_uv;
vec2 video_uv_no_geom_overscan;
if(geom_mode > 0.5)
{
video_uv_no_geom_overscan =
get_curved_video_uv_coords_and_tangent_matrix(flat_video_uv,
eye_pos_local, output_size_inv, geom_aspect,
geom_mode, global_to_local, pixel_to_video_uv);
}
else
{
video_uv_no_geom_overscan = flat_video_uv;
pixel_to_video_uv = mat2x2(
output_size_inv.x, 0.0, 0.0, output_size_inv.y);
}
// Correct for overscan here (not in curvature code):
const vec2 video_uv =
(video_uv_no_geom_overscan - vec2(0.5))/geom_overscan + vec2(0.5);
const vec2 tex_uv = video_uv * (registers.SourceSize.xy * texture_size_inv);
// Get a matrix transforming pixel vectors to tex_uv vectors:
const mat2x2 pixel_to_tex_uv =
mul_scale(registers.SourceSize.xy * texture_size_inv /
geom_aspect_and_overscan.zw, pixel_to_video_uv);
// Sample! Skip antialiasing if aa_level < 0.5 or both of these hold:
// 1.) Geometry/curvature isn't used
// 2.) Overscan == vec2(1.0)
// Skipping AA is sharper, but it's only faster with dynamic branches.
const vec2 abs_aa_r_offset = abs(get_aa_subpixel_r_offset());
bool need_subpixel_aa = true;
if(abs_aa_r_offset.x + abs_aa_r_offset.y < 0.0) need_subpixel_aa = false;
vec3 color;
if(aa_level > 0.5 && (geom_mode > 0.5 || any(notEqual(geom_overscan , vec2(1.0)))))
{
// Sample the input with antialiasing (due to sharp phosphors, etc.):
color = tex2Daa(Source, tex_uv, pixel_to_tex_uv, registers.FrameCount);
}
else if(aa_level > 0.5 && need_subpixel_aa == true)
{
// Sample at each subpixel location:
color = tex2Daa_subpixel_weights_only(
Source, tex_uv, pixel_to_tex_uv);
}
else
{
color = tex2D_linearize(Source, tex_uv).rgb;
}
FragColor = vec4(texture(Source, tex_uv).rgb, 1.0);
}

View file

@ -0,0 +1,131 @@
#version 450
layout(push_constant) uniform Push
{
vec4 SourceSize;
vec4 OriginalSize;
vec4 OutputSize;
uint FrameCount;
} registers;
#include "params.inc"
///////////////////////////// GPL LICENSE NOTICE /////////////////////////////
// crt-royale: A full-featured CRT shader, with cheese.
// Copyright (C) 2014 TroggleMonkey <trogglemonkey@gmx.com>
//
// This program is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by the Free
// Software Foundation; either version 2 of the License, or any later version.
//
// This program is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// You should have received a copy of the GNU General Public License along with
// this program; if not, write to the Free Software Foundation, Inc., 59 Temple
// Place, Suite 330, Boston, MA 02111-1307 USA
///////////////////////////// SETTINGS MANAGEMENT ////////////////////////////
#include "../user-settings.h"
#include "derived-settings-and-constants.h"
#include "bind-shader-params.h"
////////////////////////////////// INCLUDES //////////////////////////////////
#include "phosphor-mask-resizing.h"
#pragma stage vertex
layout(location = 0) in vec4 Position;
layout(location = 1) in vec2 TexCoord;
layout(location = 0) out vec2 src_tex_uv_wrap;
layout(location = 1) out vec2 tile_uv_wrap;
layout(location = 2) out vec2 resize_magnification_scale;
layout(location = 3) out vec2 src_dxdy;
layout(location = 4) out vec2 tile_size_uv;
layout(location = 5) out vec2 input_tiles_per_texture;
layout(location = 6) out vec2 tex_uv;
void main()
{
gl_Position = params.MVP * Position;
tex_uv = TexCoord;
// First estimate the viewport size (the user will get the wrong number of
// triads if it's wrong and mask_specify_num_triads is 1.0/true).
const vec2 estimated_viewport_size =
registers.OutputSize.xy / mask_resize_viewport_scale;
// Find the final size of our resized phosphor mask tiles. We probably
// estimated the viewport size and MASK_RESIZE output size differently last
// pass, so do not swear they were the same. ;)
const vec2 mask_resize_tile_size = get_resized_mask_tile_size(
estimated_viewport_size, registers.OutputSize.xy, false);
// We'll render resized tiles until filling the output FBO or meeting a
// limit, so compute [wrapped] tile uv coords based on the output uv coords
// and the number of tiles that will fit in the FBO.
const vec2 output_tiles_this_pass = registers.OutputSize.xy / mask_resize_tile_size;
const vec2 output_video_uv = tex_uv;
const vec2 tile_uv_wrap = output_video_uv * output_tiles_this_pass;
// Get the texel size of an input tile and related values:
const vec2 input_tile_size = vec2(min(
mask_resize_src_lut_size.x, registers.SourceSize.x), mask_resize_tile_size.y);
const vec2 tile_size_uv = input_tile_size * registers.SourceSize.zw;
const vec2 input_tiles_per_texture = registers.SourceSize.xy / input_tile_size;
// Derive [wrapped] texture uv coords from [wrapped] tile uv coords and
// the tile size in uv coords, and save frac() for the fragment shader.
const vec2 src_tex_uv_wrap = tile_uv_wrap * tile_size_uv;
resize_magnification_scale = mask_resize_tile_size / input_tile_size;
src_dxdy = vec2(registers.SourceSize.z, 0.0);
}
#pragma stage fragment
layout(location = 0) in vec2 src_tex_uv_wrap;
layout(location = 1) in vec2 tile_uv_wrap;
layout(location = 2) in vec2 resize_magnification_scale;
layout(location = 3) in vec2 src_dxdy;
layout(location = 4) in vec2 tile_size_uv;
layout(location = 5) in vec2 input_tiles_per_texture;
layout(location = 6) in vec2 tex_uv;
layout(location = 0) out vec4 FragColor;
layout(set = 0, binding = 2) uniform sampler2D Source;
void main()
{
// The input contains one mask tile horizontally and a number vertically.
// Resize the tile horizontally to its final screen size and repeat it
// until drawing at least mask_resize_num_tiles, leaving it unchanged
// vertically. Lanczos-resizing the phosphor mask achieves much sharper
// results than mipmapping, outputting >= mask_resize_num_tiles makes for
// easier tiled sampling later.
#ifdef PHOSPHOR_MASK_MANUALLY_RESIZE
// Discard unneeded fragments in case our profile allows real branches.
const vec2 tile_uv_wrap = tile_uv_wrap;
if(get_mask_sample_mode() < 0.5 &&
max(tile_uv_wrap.x, tile_uv_wrap.y) <= mask_resize_num_tiles)
{
const float src_dx = src_dxdy.x;
const vec2 src_tex_uv = fract(src_tex_uv_wrap);
const vec3 pixel_color = downsample_horizontal_sinc_tiled(Source,
src_tex_uv, registers.SourceSize.xy, src_dxdy.x,
resize_magnification_scale.x, tile_size_uv.x);
// The input LUT was linear RGB, and so is our output:
FragColor = vec4(pixel_color, 1.0);
}
else
{
discard;
}
#else
discard;
FragColor = vec4(1.0);
#endif
}

View file

@ -0,0 +1,293 @@
#version 450
layout(push_constant) uniform Push
{
vec4 SourceSize;
vec4 OutputSize;
vec4 MASK_RESIZESize;
vec4 ORIG_LINEARIZEDSize;
vec4 VERTICAL_SCANLINESSize;
vec4 BLOOM_APPROXSize;
vec4 HALATION_BLURSize;
} registers;
#include "params.inc"
///////////////////////////// GPL LICENSE NOTICE /////////////////////////////
// crt-royale: A full-featured CRT shader, with cheese.
// Copyright (C) 2014 TroggleMonkey <trogglemonkey@gmx.com>
//
// This program is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by the Free
// Software Foundation; either version 2 of the License, or any later version.
//
// This program is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// You should have received a copy of the GNU General Public License along with
// this program; if not, write to the Free Software Foundation, Inc., 59 Temple
// Place, Suite 330, Boston, MA 02111-1307 USA
///////////////////////////// SETTINGS MANAGEMENT ////////////////////////////
#include "../user-settings.h"
#include "derived-settings-and-constants.h"
#include "bind-shader-params.h"
////////////////////////////////// INCLUDES //////////////////////////////////
//#include "scanline-functions.h"
#include "phosphor-mask-resizing.h"
//#include "bloom-functions.h"//"bloom-functions.h"
//#include "../../../../include/gamma-management.h"
#include "includes.h"
/////////////////////////////////// HELPERS //////////////////////////////////
vec4 tex2Dtiled_mask_linearize(const sampler2D tex,
const vec2 tex_uv)
{
// If we're manually tiling a texture, anisotropic filtering can get
// confused. One workaround is to just select the lowest mip level:
#ifdef PHOSPHOR_MASK_MANUALLY_RESIZE
#ifdef ANISOTROPIC_TILING_COMPAT_TEX2DLOD
// TODO: Use tex2Dlod_linearize with a calculated mip level.
return tex2Dlod_linearize(tex, vec4(tex_uv, 0.0, 0.0));
#else
#ifdef ANISOTROPIC_TILING_COMPAT_TEX2DBIAS
return tex2Dbias_linearize(tex, float4(tex_uv, 0.0, -16.0));
#else
return tex2D_linearize(tex, tex_uv);
#endif
#endif
#else
return tex2D_linearize(tex, tex_uv);
#endif
}
#pragma stage vertex
layout(location = 0) in vec4 Position;
layout(location = 1) in vec2 TexCoord;
layout(location = 0) out vec2 video_uv;
layout(location = 1) out vec2 scanline_tex_uv;
layout(location = 2) out vec2 blur3x3_tex_uv;
layout(location = 3) out vec2 halation_tex_uv;
layout(location = 4) out vec2 scanline_texture_size_inv;
layout(location = 5) out vec4 mask_tile_start_uv_and_size;
layout(location = 6) out vec2 mask_tiles_per_screen;
void main()
{
gl_Position = params.MVP * Position;
// Our various input textures use different coords.
video_uv = TexCoord;
const vec2 scanline_texture_size_inv =
registers.VERTICAL_SCANLINESSize.zw;
scanline_tex_uv = video_uv * registers.VERTICAL_SCANLINESSize.xy *
scanline_texture_size_inv;
blur3x3_tex_uv = video_uv * registers.BLOOM_APPROXSize.xy *
registers.BLOOM_APPROXSize.zw;
halation_tex_uv = video_uv * registers.HALATION_BLURSize.xy *
registers.HALATION_BLURSize.zw;
// Get a consistent name for the final mask texture size. Sample mode 0
// uses the manually resized mask, but ignore it if we never resized.
#ifdef PHOSPHOR_MASK_MANUALLY_RESIZE
const float mask_sample_mode = get_mask_sample_mode();
vec2 mask_resize_texture_size = registers.MASK_RESIZESize.xy;
if(mask_sample_mode < 0.5) mask_resize_texture_size = mask_texture_large_size;
vec2 mask_resize_video_size = registers.MASK_RESIZESize.xy;
if(mask_sample_mode < 0.5) mask_resize_video_size = mask_texture_large_size;
#else
const vec2 mask_resize_texture_size = mask_texture_large_size;
const vec2 mask_resize_video_size = mask_texture_large_size;
#endif
// Compute mask tile dimensions, starting points, etc.:
vec2 mask_tiles_per_screen;
mask_tile_start_uv_and_size = vec4(get_mask_sampling_parameters(
mask_resize_texture_size, mask_resize_video_size, registers.OutputSize.xy,
mask_tiles_per_screen)); //TODO/FIXME: is this right? I wrapped in a vec4 because that's what it needs to compile
}
#pragma stage fragment
layout(location = 0) in vec2 video_uv;
layout(location = 1) in vec2 scanline_tex_uv;
layout(location = 2) in vec2 blur3x3_tex_uv;
layout(location = 3) in vec2 halation_tex_uv;
layout(location = 4) in vec2 scanline_texture_size_inv;
layout(location = 5) in vec4 mask_tile_start_uv_and_size;
layout(location = 6) in vec2 mask_tiles_per_screen;
layout(location = 0) out vec4 FragColor;
layout(set = 0, binding = 2) uniform sampler2D Source;
layout(set = 0, binding = 3) uniform sampler2D mask_grille_texture_large;
layout(set = 0, binding = 4) uniform sampler2D mask_slot_texture_large;
layout(set = 0, binding = 5) uniform sampler2D mask_shadow_texture_large;
layout(set = 0, binding = 6) uniform sampler2D VERTICAL_SCANLINES;
layout(set = 0, binding = 7) uniform sampler2D BLOOM_APPROX;
layout(set = 0, binding = 8) uniform sampler2D HALATION_BLUR;
#ifdef PHOSPHOR_MASK_MANUALLY_RESIZE
layout(set = 0, binding = 9) uniform sampler2D MASK_RESIZE;
#endif
void main()
{
// This pass: Sample (misconverged?) scanlines to the final horizontal
// resolution, apply halation (bouncing electrons), and apply the phosphor
// mask. Fake a bloom if requested. Unless we fake a bloom, the output
// will be dim from the scanline auto-dim, mask dimming, and low gamma.
// Horizontally sample the current row (a vertically interpolated scanline)
// and account for horizontal convergence offsets, given in units of texels.
const vec3 scanline_color_dim = sample_rgb_scanline_horizontal(
VERTICAL_SCANLINES, scanline_tex_uv,
registers.VERTICAL_SCANLINESSize.xy, scanline_texture_size_inv);
const float auto_dim_factor = levels_autodim_temp;
// Sample the phosphor mask:
const vec2 tile_uv_wrap = video_uv * mask_tiles_per_screen;
const vec2 mask_tex_uv = convert_phosphor_tile_uv_wrap_to_tex_uv(
tile_uv_wrap, mask_tile_start_uv_and_size);
vec3 phosphor_mask_sample;
#ifdef PHOSPHOR_MASK_MANUALLY_RESIZE
bool sample_orig_luts = true;
if (get_mask_sample_mode() > 0.5) sample_orig_luts = false;
#else
const bool sample_orig_luts = true;
#endif
if(sample_orig_luts)
{
// If mask_type is static, this branch will be resolved statically.
if(mask_type < 0.5)
{
phosphor_mask_sample = tex2D_linearize(
mask_grille_texture_large, mask_tex_uv).rgb;
}
else if(mask_type < 1.5)
{
phosphor_mask_sample = tex2D_linearize(
mask_slot_texture_large, mask_tex_uv).rgb;
}
else
{
phosphor_mask_sample = tex2D_linearize(
mask_shadow_texture_large, mask_tex_uv).rgb;
}
}
else
{
// Sample the resized mask, and avoid tiling artifacts:
phosphor_mask_sample = tex2Dtiled_mask_linearize(
MASK_RESIZE, mask_tex_uv).rgb;
}
// Sample the halation texture (auto-dim to match the scanlines), and
// account for both horizontal and vertical convergence offsets, given
// in units of texels horizontally and same-field scanlines vertically:
const vec3 halation_color = tex2D_linearize(
HALATION_BLUR, halation_tex_uv).rgb;
// Apply halation: Halation models electrons flying around under the glass
// and hitting the wrong phosphors (of any color). It desaturates, so
// average the halation electrons to a scalar. Reduce the local scanline
// intensity accordingly to conserve energy.
const vec3 halation_intensity_dim =
vec3(dot(halation_color, vec3(auto_dim_factor/3.0)));
const vec3 electron_intensity_dim = mix(scanline_color_dim,
halation_intensity_dim, halation_weight);
// Apply the phosphor mask:
const vec3 phosphor_emission_dim = electron_intensity_dim *
phosphor_mask_sample;
#ifdef PHOSPHOR_BLOOM_FAKE
// The BLOOM_APPROX pass approximates a blurred version of a masked
// and scanlined image. It's usually used to compute the brightpass,
// but we can also use it to fake the bloom stage entirely. Caveats:
// 1.) A fake bloom is conceptually different, since we're mixing in a
// fully blurred low-res image, and the biggest implication are:
// 2.) If mask_amplify is incorrect, results deteriorate more quickly.
// 3.) The inaccurate blurring hurts quality in high-contrast areas.
// 4.) The bloom_underestimate_levels parameter seems less sensitive.
// Reverse the auto-dimming and amplify to compensate for mask dimming:
#define PHOSPHOR_BLOOM_FAKE_WITH_SIMPLE_BLEND
#ifdef PHOSPHOR_BLOOM_FAKE_WITH_SIMPLE_BLEND
const float blur_contrast = 1.05;
#else
const float blur_contrast = 1.0;
#endif
const float mask_amplify = get_mask_amplify();
const float undim_factor = 1.0/auto_dim_factor;
const vec3 phosphor_emission =
phosphor_emission_dim * undim_factor * mask_amplify;
// Get a phosphor blur estimate, accounting for convergence offsets:
const vec3 electron_intensity = electron_intensity_dim * undim_factor;
const vec3 phosphor_blur_approx_soft = tex2D_linearize(
BLOOM_APPROX, blur3x3_tex_uv).rgb;
const vec3 phosphor_blur_approx = mix(phosphor_blur_approx_soft,
electron_intensity, 0.1) * blur_contrast;
// We could blend between phosphor_emission and phosphor_blur_approx,
// solving for the minimum blend_ratio that avoids clipping past 1.0:
// 1.0 >= total_intensity
// 1.0 >= phosphor_emission * (1.0 - blend_ratio) +
// phosphor_blur_approx * blend_ratio
// blend_ratio = (phosphor_emission - 1.0)/
// (phosphor_emission - phosphor_blur_approx);
// However, this blurs far more than necessary, because it aims for
// full brightness, not minimal blurring. To fix it, base blend_ratio
// on a max area intensity only so it varies more smoothly:
const vec3 phosphor_blur_underestimate =
phosphor_blur_approx * bloom_underestimate_levels;
const vec3 area_max_underestimate =
phosphor_blur_underestimate * mask_amplify;
#ifdef PHOSPHOR_BLOOM_FAKE_WITH_SIMPLE_BLEND
const vec3 blend_ratio_temp =
(area_max_underestimate - vec3(1.0)) /
(area_max_underestimate - phosphor_blur_underestimate);
#else
// Try doing it like an area-based brightpass. This is nearly
// identical, but it's worth toying with the code in case I ever
// find a way to make it look more like a real bloom. (I've had
// some promising textures from combining an area-based blend ratio
// for the phosphor blur and a more brightpass-like blend-ratio for
// the phosphor emission, but I haven't found a way to make the
// brightness correct across the whole color range, especially with
// different bloom_underestimate_levels values.)
const float desired_triad_size = mix(mask_triad_size_desired,
registers.OutputSize.x/mask_num_triads_desired,
mask_specify_num_triads);
const float bloom_sigma = get_min_sigma_to_blur_triad(
desired_triad_size, bloom_diff_thresh);
const float center_weight = get_center_weight(bloom_sigma);
const vec3 max_area_contribution_approx =
max(vec3(0.0), phosphor_blur_approx -
center_weight * phosphor_emission);
const vec3 area_contrib_underestimate =
bloom_underestimate_levels * max_area_contribution_approx;
const vec3 blend_ratio_temp =
((vec3(1.0) - area_contrib_underestimate) /
area_max_underestimate - vec3(1.0)) / (center_weight - 1.0);
#endif
// Clamp blend_ratio in case it's out-of-range, but be SUPER careful:
// min/max/clamp are BIZARRELY broken with lerp (optimization bug?),
// and this redundant sequence avoids bugs, at least on nVidia cards:
const vec3 blend_ratio_clamped = max(clamp(blend_ratio_temp, 0.0, 1.0), 0.0);
const vec3 blend_ratio = mix(blend_ratio_clamped, vec3(1.0), bloom_excess);
// Blend the blurred and unblurred images:
const vec3 phosphor_emission_unclipped =
mix(phosphor_emission, phosphor_blur_approx, blend_ratio);
// Simulate refractive diffusion by reusing the halation sample.
const vec3 pixel_color = mix(phosphor_emission_unclipped,
halation_color, diffusion_weight);
#else
const vec3 pixel_color = phosphor_emission_dim;
#endif
FragColor = encode_output(vec4(pixel_color, 1.0));
}

View file

@ -0,0 +1,692 @@
#ifndef GEOMETRY_FUNCTIONS_H
#define GEOMETRY_FUNCTIONS_H
///////////////////////////// GPL LICENSE NOTICE /////////////////////////////
// crt-royale: A full-featured CRT shader, with cheese.
// Copyright (C) 2014 TroggleMonkey <trogglemonkey@gmx.com>
//
// This program is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by the Free
// Software Foundation; either version 2 of the License, or any later version.
//
// This program is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// You should have received a copy of the GNU General Public License along with
// this program; if not, write to the Free Software Foundation, Inc., 59 Temple
// Place, Suite 330, Boston, MA 02111-1307 USA
////////////////////////////////// INCLUDES //////////////////////////////////
#include "../user-settings.h"
#include "derived-settings-and-constants.h"
#include "bind-shader-params.h"
//////////////////////////// MACROS AND CONSTANTS ////////////////////////////
// Curvature-related constants:
#define MAX_POINT_CLOUD_SIZE 9
///////////////////////////// CURVATURE FUNCTIONS /////////////////////////////
vec2 quadratic_solve(const float a, const float b_over_2, const float c)
{
// Requires: 1.) a, b, and c are quadratic formula coefficients
// 2.) b_over_2 = b/2.0 (simplifies terms to factor 2 out)
// 3.) b_over_2 must be guaranteed < 0.0 (avoids a branch)
// Returns: Returns vec2(first_solution, discriminant), so the caller
// can choose how to handle the "no intersection" case. The
// Kahan or Citardauq formula is used for numerical robustness.
const float discriminant = b_over_2*b_over_2 - a*c;
const float solution0 = c/(-b_over_2 + sqrt(discriminant));
return vec2(solution0, discriminant);
}
vec2 intersect_sphere(const vec3 view_vec, const vec3 eye_pos_vec)
{
// Requires: 1.) view_vec and eye_pos_vec are 3D vectors in the sphere's
// local coordinate frame (eye_pos_vec is a position, i.e.
// a vector from the origin to the eye/camera)
// 2.) geom_radius is a global containing the sphere's radius
// Returns: Cast a ray of direction view_vec from eye_pos_vec at a
// sphere of radius geom_radius, and return the distance to
// the first intersection in units of length(view_vec).
// http://wiki.cgsociety.org/index.php/Ray_Sphere_Intersection
// Quadratic formula coefficients (b_over_2 is guaranteed negative):
const float a = dot(view_vec, view_vec);
const float b_over_2 = dot(view_vec, eye_pos_vec); // * 2.0 factored out
const float c = dot(eye_pos_vec, eye_pos_vec) - geom_radius*geom_radius;
return quadratic_solve(a, b_over_2, c);
}
vec2 intersect_cylinder(const vec3 view_vec, const vec3 eye_pos_vec)
{
// Requires: 1.) view_vec and eye_pos_vec are 3D vectors in the sphere's
// local coordinate frame (eye_pos_vec is a position, i.e.
// a vector from the origin to the eye/camera)
// 2.) geom_radius is a global containing the cylinder's radius
// Returns: Cast a ray of direction view_vec from eye_pos_vec at a
// cylinder of radius geom_radius, and return the distance to
// the first intersection in units of length(view_vec). The
// derivation of the coefficients is in Christer Ericson's
// Real-Time Collision Detection, p. 195-196, and this version
// uses LaGrange's identity to reduce operations.
// Arbitrary "cylinder top" reference point for an infinite cylinder:
const vec3 cylinder_top_vec = vec3(0.0, geom_radius, 0.0);
const vec3 cylinder_axis_vec = vec3(0.0, 1.0, 0.0);//vec3(0.0, 2.0*geom_radius, 0.0);
const vec3 top_to_eye_vec = eye_pos_vec - cylinder_top_vec;
const vec3 axis_x_view = cross(cylinder_axis_vec, view_vec);
const vec3 axis_x_top_to_eye = cross(cylinder_axis_vec, top_to_eye_vec);
// Quadratic formula coefficients (b_over_2 is guaranteed negative):
const float a = dot(axis_x_view, axis_x_view);
const float b_over_2 = dot(axis_x_top_to_eye, axis_x_view);
const float c = dot(axis_x_top_to_eye, axis_x_top_to_eye) -
geom_radius*geom_radius;//*dot(cylinder_axis_vec, cylinder_axis_vec);
return quadratic_solve(a, b_over_2, c);
}
vec2 cylinder_xyz_to_uv(const vec3 intersection_pos_local,
const vec2 geom_aspect)
{
// Requires: An xyz intersection position on a cylinder.
// Returns: video_uv coords mapped to range [-0.5, 0.5]
// Mapping: Define square_uv.x to be the signed arc length in xz-space,
// and define square_uv.y = -intersection_pos_local.y (+v = -y).
// Start with a numerically robust arc length calculation.
const float angle_from_image_center = atan(intersection_pos_local.z,
intersection_pos_local.x);
const float signed_arc_len = angle_from_image_center * geom_radius;
// Get a uv-mapping where [-0.5, 0.5] maps to a "square" area, then divide
// by the aspect ratio to stretch the mapping appropriately:
const vec2 square_uv = vec2(signed_arc_len, -intersection_pos_local.y);
const vec2 video_uv = square_uv / geom_aspect;
return video_uv;
}
vec3 cylinder_uv_to_xyz(const vec2 video_uv, const vec2 geom_aspect)
{
// Requires: video_uv coords mapped to range [-0.5, 0.5]
// Returns: An xyz intersection position on a cylinder. This is the
// inverse of cylinder_xyz_to_uv().
// Expand video_uv by the aspect ratio to get proportionate x/y lengths,
// then calculate an xyz position for the cylindrical mapping above.
const vec2 square_uv = video_uv * geom_aspect;
const float arc_len = square_uv.x;
const float angle_from_image_center = arc_len / geom_radius;
const float x_pos = sin(angle_from_image_center) * geom_radius;
const float z_pos = cos(angle_from_image_center) * geom_radius;
// Or: z = sqrt(geom_radius**2 - x**2)
// Or: z = geom_radius/sqrt(1.0 + tan(angle)**2), x = z * tan(angle)
const vec3 intersection_pos_local = vec3(x_pos, -square_uv.y, z_pos);
return intersection_pos_local;
}
vec2 sphere_xyz_to_uv(const vec3 intersection_pos_local,
const vec2 geom_aspect)
{
// Requires: An xyz intersection position on a sphere.
// Returns: video_uv coords mapped to range [-0.5, 0.5]
// Mapping: First define square_uv.x/square_uv.y ==
// intersection_pos_local.x/intersection_pos_local.y. Then,
// length(square_uv) is the arc length from the image center
// at (0.0, 0.0, geom_radius) along the tangent great circle.
// Credit for this mapping goes to cgwg: I never managed to
// understand his code, but he told me his mapping was based on
// great circle distances when I asked him about it, which
// informed this very similar (almost identical) mapping.
// Start with a numerically robust arc length calculation between the ray-
// sphere intersection point and the image center using a method posted by
// Roger Stafford on comp.soft-sys.matlab:
// https://groups.google.com/d/msg/comp.soft-sys.matlab/zNbUui3bjcA/c0HV_bHSx9cJ
const vec3 image_center_pos_local = vec3(0.0, 0.0, geom_radius);
const float cp_len =
length(cross(intersection_pos_local, image_center_pos_local));
const float dp = dot(intersection_pos_local, image_center_pos_local);
const float angle_from_image_center = atan(dp, cp_len);
const float arc_len = angle_from_image_center * geom_radius;
// Get a uv-mapping where [-0.5, 0.5] maps to a "square" area, then divide
// by the aspect ratio to stretch the mapping appropriately:
const vec2 square_uv_unit = normalize(vec2(intersection_pos_local.x,
-intersection_pos_local.y));
const vec2 square_uv = arc_len * square_uv_unit;
const vec2 video_uv = square_uv / geom_aspect;
return video_uv;
}
vec3 sphere_uv_to_xyz(const vec2 video_uv, const vec2 geom_aspect)
{
// Requires: video_uv coords mapped to range [-0.5, 0.5]
// Returns: An xyz intersection position on a sphere. This is the
// inverse of sphere_xyz_to_uv().
// Expand video_uv by the aspect ratio to get proportionate x/y lengths,
// then calculate an xyz position for the spherical mapping above.
const vec2 square_uv = video_uv * geom_aspect;
// Using length or sqrt here butchers the framerate on my 8800GTS if
// this function is called too many times, and so does taking the max
// component of square_uv/square_uv_unit (program length threshold?).
//float arc_len = length(square_uv);
const vec2 square_uv_unit = normalize(square_uv);
const float arc_len = square_uv.y/square_uv_unit.y;
const float angle_from_image_center = arc_len / geom_radius;
const float xy_dist_from_sphere_center =
sin(angle_from_image_center) * geom_radius;
//vec2 xy_pos = xy_dist_from_sphere_center * (square_uv/FIX_ZERO(arc_len));
const vec2 xy_pos = xy_dist_from_sphere_center * square_uv_unit;
const float z_pos = cos(angle_from_image_center) * geom_radius;
const vec3 intersection_pos_local = vec3(xy_pos.x, -xy_pos.y, z_pos);
return intersection_pos_local;
}
vec2 sphere_alt_xyz_to_uv(const vec3 intersection_pos_local,
const vec2 geom_aspect)
{
// Requires: An xyz intersection position on a cylinder.
// Returns: video_uv coords mapped to range [-0.5, 0.5]
// Mapping: Define square_uv.x to be the signed arc length in xz-space,
// and define square_uv.y == signed arc length in yz-space.
// See cylinder_xyz_to_uv() for implementation details (very similar).
const vec2 angle_from_image_center = atan((intersection_pos_local.zz),
vec2(intersection_pos_local.x, -intersection_pos_local.y));
const vec2 signed_arc_len = angle_from_image_center * geom_radius;
const vec2 video_uv = signed_arc_len / geom_aspect;
return video_uv;
}
vec3 sphere_alt_uv_to_xyz(const vec2 video_uv, const vec2 geom_aspect)
{
// Requires: video_uv coords mapped to range [-0.5, 0.5]
// Returns: An xyz intersection position on a sphere. This is the
// inverse of sphere_alt_xyz_to_uv().
// See cylinder_uv_to_xyz() for implementation details (very similar).
const vec2 square_uv = video_uv * geom_aspect;
const vec2 arc_len = square_uv;
const vec2 angle_from_image_center = arc_len / geom_radius;
const vec2 xy_pos = sin(angle_from_image_center) * geom_radius;
const float z_pos = sqrt(geom_radius*geom_radius - dot(xy_pos, xy_pos));
return vec3(xy_pos.x, -xy_pos.y, z_pos);
}
inline vec2 intersect(const vec3 view_vec_local, const vec3 eye_pos_local,
const float geom_mode)
{
return geom_mode < 2.5 ? intersect_sphere(view_vec_local, eye_pos_local) :
intersect_cylinder(view_vec_local, eye_pos_local);
}
inline vec2 xyz_to_uv(const vec3 intersection_pos_local,
const vec2 geom_aspect, const float geom_mode)
{
return geom_mode < 1.5 ?
sphere_xyz_to_uv(intersection_pos_local, geom_aspect) :
geom_mode < 2.5 ?
sphere_alt_xyz_to_uv(intersection_pos_local, geom_aspect) :
cylinder_xyz_to_uv(intersection_pos_local, geom_aspect);
}
inline vec3 uv_to_xyz(const vec2 uv, const vec2 geom_aspect,
const float geom_mode)
{
return geom_mode < 1.5 ? sphere_uv_to_xyz(uv, geom_aspect) :
geom_mode < 2.5 ? sphere_alt_uv_to_xyz(uv, geom_aspect) :
cylinder_uv_to_xyz(uv, geom_aspect);
}
vec2 view_vec_to_uv(const vec3 view_vec_local, const vec3 eye_pos_local,
const vec2 geom_aspect, const float geom_mode, out vec3 intersection_pos)
{
// Get the intersection point on the primitive, given an eye position
// and view vector already in its local coordinate frame:
const vec2 intersect_dist_and_discriminant = intersect(view_vec_local,
eye_pos_local, geom_mode);
const vec3 intersection_pos_local = eye_pos_local +
view_vec_local * intersect_dist_and_discriminant.x;
// Save the intersection position to an output parameter:
intersection_pos = intersection_pos_local;
// Transform into uv coords, but give out-of-range coords if the
// view ray doesn't intersect the primitive in the first place:
return intersect_dist_and_discriminant.y > 0.005 ?
xyz_to_uv(intersection_pos_local, geom_aspect, geom_mode) : vec2(1.0);
}
vec3 get_ideal_global_eye_pos_for_points(vec3 eye_pos,
const vec2 geom_aspect, const vec3 global_coords[MAX_POINT_CLOUD_SIZE],
const int num_points)
{
// Requires: Parameters:
// 1.) Starting eye_pos is a global 3D position at which the
// camera contains all points in global_coords[] in its FOV
// 2.) geom_aspect = get_aspect_vector(
// IN.output_size.x / IN.output_size.y);
// 3.) global_coords is a point cloud containing global xyz
// coords of extreme points on the simulated CRT screen.
// Globals:
// 1.) geom_view_dist must be > 0.0. It controls the "near
// plane" used to interpret flat_video_uv as a view
// vector, which controls the field of view (FOV).
// Eyespace coordinate frame: +x = right, +y = up, +z = back
// Returns: Return an eye position at which the point cloud spans as
// much of the screen as possible (given the FOV controlled by
// geom_view_dist) without being cropped or sheared.
// Algorithm:
// 1.) Move the eye laterally to a point which attempts to maximize the
// the amount we can move forward without clipping the CRT screen.
// 2.) Move forward by as much as possible without clipping the CRT.
// Get the allowed movement range by solving for the eye_pos offsets
// that result in each point being projected to a screen edge/corner in
// pseudo-normalized device coords (where xy ranges from [-0.5, 0.5]
// and z = eyespace z):
// pndc_coord = vec3(vec2(eyespace_xyz.x, -eyespace_xyz.y)*
// geom_view_dist / (geom_aspect * -eyespace_xyz.z), eyespace_xyz.z);
// Notes:
// The field of view is controlled by geom_view_dist's magnitude relative to
// the view vector's x and y components:
// view_vec.xy ranges from [-0.5, 0.5] * geom_aspect
// view_vec.z = -geom_view_dist
// But for the purposes of perspective divide, it should be considered:
// view_vec.xy ranges from [-0.5, 0.5] * geom_aspect / geom_view_dist
// view_vec.z = -1.0
const int max_centering_iters = 1; // Keep for easy testing.
for(int iter = 0; iter < max_centering_iters; iter++)
{
// 0.) Get the eyespace coordinates of our point cloud:
vec3 eyespace_coords[MAX_POINT_CLOUD_SIZE];
for(int i = 0; i < num_points; i++)
{
eyespace_coords[i] = global_coords[i] - eye_pos;
}
// 1a.)For each point, find out how far we can move eye_pos in each
// lateral direction without the point clipping the frustum.
// Eyespace +y = up, screenspace +y = down, so flip y after
// applying the eyespace offset (on the way to "clip space").
// Solve for two offsets per point based on:
// (eyespace_xyz.xy - offset_dr) * vec2(1.0, -1.0) *
// geom_view_dist / (geom_aspect * -eyespace_xyz.z) = vec2(-0.5)
// (eyespace_xyz.xy - offset_dr) * vec2(1.0, -1.0) *
// geom_view_dist / (geom_aspect * -eyespace_xyz.z) = vec2(0.5)
// offset_ul and offset_dr represent the farthest we can move the
// eye_pos up-left and down-right. Save the min of all offset_dr's
// and the max of all offset_ul's (since it's negative).
float abs_radius = abs(geom_radius); // In case anyone gets ideas. ;)
vec2 offset_dr_min = vec2(10.0 * abs_radius, 10.0 * abs_radius);
vec2 offset_ul_max = vec2(-10.0 * abs_radius, -10.0 * abs_radius);
for(int i = 0; i < num_points; i++)
{
const vec2 flipy = vec2(1.0, -1.0);
vec3 eyespace_xyz = eyespace_coords[i];
vec2 offset_dr = eyespace_xyz.xy - vec2(-0.5) *
(geom_aspect * -eyespace_xyz.z) / (geom_view_dist * flipy);
vec2 offset_ul = eyespace_xyz.xy - vec2(0.5) *
(geom_aspect * -eyespace_xyz.z) / (geom_view_dist * flipy);
offset_dr_min = min(offset_dr_min, offset_dr);
offset_ul_max = max(offset_ul_max, offset_ul);
}
// 1b.)Update eye_pos: Adding the average of offset_ul_max and
// offset_dr_min gives it equal leeway on the top vs. bottom
// and left vs. right. Recalculate eyespace_coords accordingly.
vec2 center_offset = 0.5 * (offset_ul_max + offset_dr_min);
eye_pos.xy += center_offset;
for(int i = 0; i < num_points; i++)
{
eyespace_coords[i] = global_coords[i] - eye_pos;
}
// 2a.)For each point, find out how far we can move eye_pos forward
// without the point clipping the frustum. Flip the y
// direction in advance (matters for a later step, not here).
// Solve for four offsets per point based on:
// eyespace_xyz_flipy.x * geom_view_dist /
// (geom_aspect.x * (offset_z - eyespace_xyz_flipy.z)) =-0.5
// eyespace_xyz_flipy.y * geom_view_dist /
// (geom_aspect.y * (offset_z - eyespace_xyz_flipy.z)) =-0.5
// eyespace_xyz_flipy.x * geom_view_dist /
// (geom_aspect.x * (offset_z - eyespace_xyz_flipy.z)) = 0.5
// eyespace_xyz_flipy.y * geom_view_dist /
// (geom_aspect.y * (offset_z - eyespace_xyz_flipy.z)) = 0.5
// We'll vectorize the actual computation. Take the maximum of
// these four for a single offset, and continue taking the max
// for every point (use max because offset.z is negative).
float offset_z_max = -10.0 * geom_radius * geom_view_dist;
for(int i = 0; i < num_points; i++)
{
vec3 eyespace_xyz_flipy = eyespace_coords[i] *
vec3(1.0, -1.0, 1.0);
vec4 offset_zzzz = eyespace_xyz_flipy.zzzz +
(eyespace_xyz_flipy.xyxy * geom_view_dist) /
(vec4(-0.5, -0.5, 0.5, 0.5) * vec4(geom_aspect, geom_aspect));
// Ignore offsets that push positive x/y values to opposite
// boundaries, and vice versa, and don't let the camera move
// past a point in the dead center of the screen:
offset_z_max = (eyespace_xyz_flipy.x < 0.0) ?
max(offset_z_max, offset_zzzz.x) : offset_z_max;
offset_z_max = (eyespace_xyz_flipy.y < 0.0) ?
max(offset_z_max, offset_zzzz.y) : offset_z_max;
offset_z_max = (eyespace_xyz_flipy.x > 0.0) ?
max(offset_z_max, offset_zzzz.z) : offset_z_max;
offset_z_max = (eyespace_xyz_flipy.y > 0.0) ?
max(offset_z_max, offset_zzzz.w) : offset_z_max;
offset_z_max = max(offset_z_max, eyespace_xyz_flipy.z);
}
// 2b.)Update eye_pos: Add the maximum (smallest negative) z offset.
eye_pos.z += offset_z_max;
}
return eye_pos;
}
vec3 get_ideal_global_eye_pos(const vec3x3 local_to_global,
const vec2 geom_aspect, const float geom_mode)
{
// Start with an initial eye_pos that includes the entire primitive
// (sphere or cylinder) in its field-of-view:
const vec3 high_view = vec3(0.0, geom_aspect.y, -geom_view_dist);
const vec3 low_view = high_view * vec3(1.0, -1.0, 1.0);
const float len_sq = dot(high_view, high_view);
const float fov = abs(acos(dot(high_view, low_view)/len_sq));
// Trigonometry/similar triangles say distance = geom_radius/sin(fov/2):
const float eye_z_spherical = geom_radius/sin(fov*0.5);
const vec3 eye_pos = geom_mode < 2.5 ?
vec3(0.0, 0.0, eye_z_spherical) :
vec3(0.0, 0.0, max(geom_view_dist, eye_z_spherical));
// Get global xyz coords of extreme sample points on the simulated CRT
// screen. Start with the center, edge centers, and corners of the
// video image. We can't ignore backfacing points: They're occluded
// by closer points on the primitive, but they may NOT be occluded by
// the convex hull of the remaining samples (i.e. the remaining convex
// hull might not envelope points that do occlude a back-facing point.)
const int num_points = MAX_POINT_CLOUD_SIZE;
vec3 global_coords[MAX_POINT_CLOUD_SIZE];
global_coords[0] = mul(local_to_global, uv_to_xyz(vec2(0.0, 0.0), geom_aspect, geom_mode));
global_coords[1] = mul(local_to_global, uv_to_xyz(vec2(0.0, -0.5), geom_aspect, geom_mode));
global_coords[2] = mul(local_to_global, uv_to_xyz(vec2(0.0, 0.5), geom_aspect, geom_mode));
global_coords[3] = mul(local_to_global, uv_to_xyz(vec2(-0.5, 0.0), geom_aspect, geom_mode));
global_coords[4] = mul(local_to_global, uv_to_xyz(vec2(0.5, 0.0), geom_aspect, geom_mode));
global_coords[5] = mul(local_to_global, uv_to_xyz(vec2(-0.5, -0.5), geom_aspect, geom_mode));
global_coords[6] = mul(local_to_global, uv_to_xyz(vec2(0.5, -0.5), geom_aspect, geom_mode));
global_coords[7] = mul(local_to_global, uv_to_xyz(vec2(-0.5, 0.5), geom_aspect, geom_mode));
global_coords[8] = mul(local_to_global, uv_to_xyz(vec2(0.5, 0.5), geom_aspect, geom_mode));
// Adding more inner image points could help in extreme cases, but too many
// points will kille the framerate. For safety, default to the initial
// eye_pos if any z coords are negative:
float num_negative_z_coords = 0.0;
for(int i = 0; i < num_points; i++)
{
num_negative_z_coords += float(global_coords[0].z < 0.0);
}
// Outsource the optimized eye_pos calculation:
return num_negative_z_coords > 0.5 ? eye_pos :
get_ideal_global_eye_pos_for_points(eye_pos, geom_aspect,
global_coords, num_points);
}
mat3x3 get_pixel_to_object_matrix(const mat3x3 global_to_local,
const vec3 eye_pos_local, const vec3 view_vec_global,
const vec3 intersection_pos_local, const vec3 normal,
const vec2 output_size_inv)
{
// Requires: See get_curved_video_uv_coords_and_tangent_matrix for
// descriptions of each parameter.
// Returns: Return a transformation matrix from 2D pixel-space vectors
// (where (+1.0, +1.0) is a vector to one pixel down-right,
// i.e. same directionality as uv texels) to 3D object-space
// vectors in the CRT's local coordinate frame (right-handed)
// ***which are tangent to the CRT surface at the intersection
// position.*** (Basically, we want to convert pixel-space
// vectors to 3D vectors along the CRT's surface, for later
// conversion to uv vectors.)
// Shorthand inputs:
const vec3 pos = intersection_pos_local;
const vec3 eye_pos = eye_pos_local;
// Get a piecewise-linear matrix transforming from "pixelspace" offset
// vectors (1.0 = one pixel) to object space vectors in the tangent
// plane (faster than finding 3 view-object intersections).
// 1.) Get the local view vecs for the pixels to the right and down:
const vec3 view_vec_right_global = view_vec_global +
vec3(output_size_inv.x, 0.0, 0.0);
const vec3 view_vec_down_global = view_vec_global +
vec3(0.0, -output_size_inv.y, 0.0);
const vec3 view_vec_right_local =
(view_vec_right_global * global_to_local);
const vec3 view_vec_down_local =
(view_vec_down_global * global_to_local);
// 2.) Using the true intersection point, intersect the neighboring
// view vectors with the tangent plane:
const vec3 intersection_vec_dot_normal = dot(pos - eye_pos, normal);
const vec3 right_pos = eye_pos + (intersection_vec_dot_normal /
dot(view_vec_right_local, normal))*view_vec_right_local;
const vec3 down_pos = eye_pos + (intersection_vec_dot_normal /
dot(view_vec_down_local, normal))*view_vec_down_local;
// 3.) Subtract the original intersection pos from its neighbors; the
// resulting vectors are object-space vectors tangent to the plane.
// These vectors are the object-space transformations of (1.0, 0.0)
// and (0.0, 1.0) pixel offsets, so they form the first two basis
// vectors of a pixelspace to object space transformation. This
// transformation is 2D to 3D, so use (0, 0, 0) for the third vector.
const vec3 object_right_vec = right_pos - pos;
const vec3 object_down_vec = down_pos - pos;
const vec3x3 pixel_to_object = vec3x3(
object_right_vec.x, object_down_vec.x, 0.0,
object_right_vec.y, object_down_vec.y, 0.0,
object_right_vec.z, object_down_vec.z, 0.0);
return pixel_to_object;
}
mat3x3 get_object_to_tangent_matrix(const vec3 intersection_pos_local,
const vec3 normal, const vec2 geom_aspect, const float geom_mode)
{
// Requires: See get_curved_video_uv_coords_and_tangent_matrix for
// descriptions of each parameter.
// Returns: Return a transformation matrix from 3D object-space vectors
// in the CRT's local coordinate frame (right-handed, +y = up)
// to 2D video_uv vectors (+v = down).
// Description:
// The TBN matrix formed by the [tangent, bitangent, normal] basis
// vectors transforms ordinary vectors from tangent->object space.
// The cotangent matrix formed by the [cotangent, cobitangent, normal]
// basis vectors transforms normal vectors (covectors) from
// tangent->object space. It's the inverse-transpose of the TBN matrix.
// We want the inverse of the TBN matrix (transpose of the cotangent
// matrix), which transforms ordinary vectors from object->tangent space.
// Start by calculating the relevant basis vectors in accordance with
// Christian Schüler's blog post "Followup: Normal Mapping Without
// Precomputed Tangents": http://www.thetenthplanet.de/archives/1180
// With our particular uv mapping, the scale of the u and v directions
// is determined entirely by the aspect ratio for cylindrical and ordinary
// spherical mappings, and so tangent and bitangent lengths are also
// determined by it (the alternate mapping is more complex). Therefore, we
// must ensure appropriate cotangent and cobitangent lengths as well.
// Base these off the uv<=>xyz mappings for each primitive.
const vec3 pos = intersection_pos_local;
const vec3 x_vec = vec3(1.0, 0.0, 0.0);
const vec3 y_vec = vec3(0.0, 1.0, 0.0);
// The tangent and bitangent vectors correspond with increasing u and v,
// respectively. Mathematically we'd base the cotangent/cobitangent on
// those, but we'll compute the cotangent/cobitangent directly when we can.
vec3 cotangent_unscaled, cobitangent_unscaled;
// geom_mode should be constant-folded without RUNTIME_GEOMETRY_MODE.
if(geom_mode < 1.5)
{
// Sphere:
// tangent = normalize(cross(normal, cross(x_vec, pos))) * geom_aspect.x
// bitangent = normalize(cross(cross(y_vec, pos), normal)) * geom_aspect.y
// inv_determinant = 1.0/length(cross(bitangent, tangent))
// cotangent = cross(normal, bitangent) * inv_determinant
// == normalize(cross(y_vec, pos)) * geom_aspect.y * inv_determinant
// cobitangent = cross(tangent, normal) * inv_determinant
// == normalize(cross(x_vec, pos)) * geom_aspect.x * inv_determinant
// Simplified (scale by inv_determinant below):
cotangent_unscaled = normalize(cross(y_vec, pos)) * geom_aspect.y;
cobitangent_unscaled = normalize(cross(x_vec, pos)) * geom_aspect.x;
}
else if(geom_mode < 2.5)
{
// Sphere, alternate mapping:
// This mapping works a bit like the cylindrical mapping in two
// directions, which makes the lengths and directions more complex.
// Unfortunately, I can't find much of a shortcut:
const vec3 tangent = normalize(
cross(y_vec, vec3(pos.x, 0.0, pos.z))) * geom_aspect.x;
const vec3 bitangent = normalize(
cross(x_vec, vec3(0.0, pos.yz))) * geom_aspect.y;
cotangent_unscaled = cross(normal, bitangent);
cobitangent_unscaled = cross(tangent, normal);
}
else
{
// Cylinder:
// tangent = normalize(cross(y_vec, normal)) * geom_aspect.x;
// bitangent = vec3(0.0, -geom_aspect.y, 0.0);
// inv_determinant = 1.0/length(cross(bitangent, tangent))
// cotangent = cross(normal, bitangent) * inv_determinant
// == normalize(cross(y_vec, pos)) * geom_aspect.y * inv_determinant
// cobitangent = cross(tangent, normal) * inv_determinant
// == vec3(0.0, -geom_aspect.x, 0.0) * inv_determinant
cotangent_unscaled = cross(y_vec, normal) * geom_aspect.y;
cobitangent_unscaled = vec3(0.0, -geom_aspect.x, 0.0);
}
const vec3 computed_normal =
cross(cobitangent_unscaled, cotangent_unscaled);
const float inv_determinant = rsqrt(dot(computed_normal, computed_normal));
const vec3 cotangent = cotangent_unscaled * inv_determinant;
const vec3 cobitangent = cobitangent_unscaled * inv_determinant;
// The [cotangent, cobitangent, normal] column vecs form the cotangent
// frame, i.e. the inverse-transpose TBN matrix. Get its transpose:
const mat3x3 object_to_tangent = mat3x3(cotangent, cobitangent, normal);
return object_to_tangent;
}
vec2 get_curved_video_uv_coords_and_tangent_matrix(
const vec2 flat_video_uv, const vec3 eye_pos_local,
const vec2 output_size_inv, const vec2 geom_aspect,
const float geom_mode, const mat3x3 global_to_local,
out mat2x2 pixel_to_tangent_video_uv)
{
// Requires: Parameters:
// 1.) flat_video_uv coords are in range [0.0, 1.0], where
// (0.0, 0.0) is the top-left corner of the screen and
// (1.0, 1.0) is the bottom-right corner.
// 2.) eye_pos_local is the 3D camera position in the simulated
// CRT's local coordinate frame. For best results, it must
// be computed based on the same geom_view_dist used here.
// 3.) output_size_inv = vec2(1.0)/IN.output_size
// 4.) geom_aspect = get_aspect_vector(
// IN.output_size.x / IN.output_size.y);
// 5.) geom_mode is a static or runtime mode setting:
// 0 = off, 1 = sphere, 2 = sphere alt., 3 = cylinder
// 6.) global_to_local is a 3x3 matrix transforming (ordinary)
// worldspace vectors to the CRT's local coordinate frame
// Globals:
// 1.) geom_view_dist must be > 0.0. It controls the "near
// plane" used to interpret flat_video_uv as a view
// vector, which controls the field of view (FOV).
// Returns: Return final uv coords in [0.0, 1.0], and return a pixel-
// space to video_uv tangent-space matrix in the out parameter.
// (This matrix assumes pixel-space +y = down, like +v = down.)
// We'll transform flat_video_uv into a view vector, project
// the view vector from the camera/eye, intersect with a sphere
// or cylinder representing the simulated CRT, and convert the
// intersection position into final uv coords and a local
// transformation matrix.
// First get the 3D view vector (geom_aspect and geom_view_dist are globals):
// 1.) Center uv around (0.0, 0.0) and make (-0.5, -0.5) and (0.5, 0.5)
// correspond to the top-left/bottom-right output screen corners.
// 2.) Multiply by geom_aspect to preemptively "undo" Retroarch's screen-
// space 2D aspect correction. We'll reapply it in uv-space.
// 3.) (x, y) = (u, -v), because +v is down in 2D screenspace, but +y
// is up in 3D worldspace (enforce a right-handed system).
// 4.) The view vector z controls the "near plane" distance and FOV.
// For the effect of "looking through a window" at a CRT, it should be
// set equal to the user's distance from their physical screen, in
// units of the viewport's physical diagonal size.
const vec2 view_uv = (flat_video_uv - vec2(0.5)) * geom_aspect;
const vec3 view_vec_global =
vec3(view_uv.x, -view_uv.y, -geom_view_dist);
// Transform the view vector into the CRT's local coordinate frame, convert
// to video_uv coords, and get the local 3D intersection position:
const vec3 view_vec_local = mul(global_to_local, view_vec_global);
vec3 pos;
const vec2 centered_uv = view_vec_to_uv(
view_vec_local, eye_pos_local, geom_aspect, geom_mode, pos);
const vec2 video_uv = centered_uv + vec2(0.5);
// Get a pixel-to-tangent-video-uv matrix. The caller could deal with
// all but one of these cases, but that would be more complicated.
#ifdef DRIVERS_ALLOW_DERIVATIVES
// Derivatives obtain a matrix very fast, but the direction of pixel-
// space +y seems to depend on the pass. Enforce the correct direction
// on a best-effort basis (but it shouldn't matter for antialiasing).
const vec2 duv_dx = ddx(video_uv);
const vec2 duv_dy = ddy(video_uv);
#ifdef LAST_PASS
pixel_to_tangent_video_uv = vec2x2(
duv_dx.x, duv_dy.x,
-duv_dx.y, -duv_dy.y);
#else
pixel_to_tangent_video_uv = vec2x2(
duv_dx.x, duv_dy.x,
duv_dx.y, duv_dy.y);
#endif
#else
// Manually define a transformation matrix. We'll assume pixel-space
// +y = down, just like +v = down.
if(geom_force_correct_tangent_matrix)
{
// Get the surface normal based on the local intersection position:
const vec3 normal_base = geom_mode < 2.5 ? pos :
vec3(pos.x, 0.0, pos.z);
const vec3 normal = normalize(normal_base);
// Get pixel-to-object and object-to-tangent matrices and combine
// them into a 2x2 pixel-to-tangent matrix for video_uv offsets:
const vec3x3 pixel_to_object = get_pixel_to_object_matrix(
global_to_local, eye_pos_local, view_vec_global, pos, normal,
output_size_inv);
const vec3x3 object_to_tangent = get_object_to_tangent_matrix(
pos, normal, geom_aspect, geom_mode);
const vec3x3 pixel_to_tangent3x3 =
mul(object_to_tangent, pixel_to_object);
pixel_to_tangent_video_uv = vec2x2(
pixel_to_tangent3x3._m00_m01_m10_m11);
}
else
{
// Ignore curvature, and just consider flat scaling. The
// difference is only apparent with strong curvature:
pixel_to_tangent_video_uv = vec2x2(
output_size_inv.x, 0.0, 0.0, output_size_inv.y);
}
#endif
return video_uv;
}
float get_border_dim_factor(const vec2 video_uv, const vec2 geom_aspect)
{
// COPYRIGHT NOTE FOR THIS FUNCTION:
// Copyright (C) 2010-2012 cgwg, 2014 TroggleMonkey
// This function uses an algorithm first coded in several of cgwg's GPL-
// licensed lines in crt-geom-curved.cg and its ancestors. The line
// between algorithm and code is nearly indistinguishable here, so it's
// unclear whether I could even release this project under a non-GPL
// license with this function included.
// Calculate border_dim_factor from the proximity to uv-space image
// borders; geom_aspect/border_size/border/darkness/border_compress are globals:
const vec2 edge_dists = min(video_uv, vec2(1.0) - video_uv) *
geom_aspect;
const vec2 border_penetration =
max(vec2(border_size) - edge_dists, vec2(0.0));
const float penetration_ratio = length(border_penetration)/border_size;
const float border_escape_ratio = max(1.0 - penetration_ratio, 0.0);
const float border_dim_factor =
pow(border_escape_ratio, border_darkness) * max(1.0, border_compress);
return min(border_dim_factor, 1.0);
}
#endif // GEOMETRY_FUNCTIONS_H

View file

@ -32,7 +32,6 @@
// Curvature-related constants: // Curvature-related constants:
#define MAX_POINT_CLOUD_SIZE 9 #define MAX_POINT_CLOUD_SIZE 9
///////////////////////////// CURVATURE FUNCTIONS ///////////////////////////// ///////////////////////////// CURVATURE FUNCTIONS /////////////////////////////
vec2 quadratic_solve(const float a, const float b_over_2, const float c) vec2 quadratic_solve(const float a, const float b_over_2, const float c)
@ -99,8 +98,8 @@ vec2 cylinder_xyz_to_uv(const vec3 intersection_pos_local,
// Mapping: Define square_uv.x to be the signed arc length in xz-space, // Mapping: Define square_uv.x to be the signed arc length in xz-space,
// and define square_uv.y = -intersection_pos_local.y (+v = -y). // and define square_uv.y = -intersection_pos_local.y (+v = -y).
// Start with a numerically robust arc length calculation. // Start with a numerically robust arc length calculation.
const float angle_from_image_center = atan2(intersection_pos_local.x, const float angle_from_image_center = atan(intersection_pos_local.z,
intersection_pos_local.z); intersection_pos_local.x);
const float signed_arc_len = angle_from_image_center * geom_radius; const float signed_arc_len = angle_from_image_center * geom_radius;
// Get a uv-mapping where [-0.5, 0.5] maps to a "square" area, then divide // Get a uv-mapping where [-0.5, 0.5] maps to a "square" area, then divide
// by the aspect ratio to stretch the mapping appropriately: // by the aspect ratio to stretch the mapping appropriately:
@ -148,7 +147,7 @@ vec2 sphere_xyz_to_uv(const vec3 intersection_pos_local,
const float cp_len = const float cp_len =
length(cross(intersection_pos_local, image_center_pos_local)); length(cross(intersection_pos_local, image_center_pos_local));
const float dp = dot(intersection_pos_local, image_center_pos_local); const float dp = dot(intersection_pos_local, image_center_pos_local);
const float angle_from_image_center = atan2(cp_len, dp); const float angle_from_image_center = atan(dp, cp_len);
const float arc_len = angle_from_image_center * geom_radius; const float arc_len = angle_from_image_center * geom_radius;
// Get a uv-mapping where [-0.5, 0.5] maps to a "square" area, then divide // Get a uv-mapping where [-0.5, 0.5] maps to a "square" area, then divide
// by the aspect ratio to stretch the mapping appropriately: // by the aspect ratio to stretch the mapping appropriately:
@ -191,9 +190,8 @@ vec2 sphere_alt_xyz_to_uv(const vec3 intersection_pos_local,
// Mapping: Define square_uv.x to be the signed arc length in xz-space, // Mapping: Define square_uv.x to be the signed arc length in xz-space,
// and define square_uv.y == signed arc length in yz-space. // and define square_uv.y == signed arc length in yz-space.
// See cylinder_xyz_to_uv() for implementation details (very similar). // See cylinder_xyz_to_uv() for implementation details (very similar).
const vec2 angle_from_image_center = atan2( const vec2 angle_from_image_center = atan((intersection_pos_local.zz),
vec2(intersection_pos_local.x, -intersection_pos_local.y), vec2(intersection_pos_local.x, -intersection_pos_local.y));
intersection_pos_local.zz);
const vec2 signed_arc_len = angle_from_image_center * geom_radius; const vec2 signed_arc_len = angle_from_image_center * geom_radius;
const vec2 video_uv = signed_arc_len / geom_aspect; const vec2 video_uv = signed_arc_len / geom_aspect;
return video_uv; return video_uv;
@ -213,29 +211,27 @@ vec3 sphere_alt_uv_to_xyz(const vec2 video_uv, const vec2 geom_aspect)
return vec3(xy_pos.x, -xy_pos.y, z_pos); return vec3(xy_pos.x, -xy_pos.y, z_pos);
} }
inline vec2 intersect(const vec3 view_vec_local, const vec3 eye_pos_local, vec2 intersect(const vec3 view_vec_local, const vec3 eye_pos_local,
const float geom_mode) const float geom_mode)
{ {
return geom_mode < 2.5 ? intersect_sphere(view_vec_local, eye_pos_local) : if (geom_mode < 2.5) return intersect_sphere(view_vec_local, eye_pos_local);
intersect_cylinder(view_vec_local, eye_pos_local); else return intersect_cylinder(view_vec_local, eye_pos_local);
} }
inline vec2 xyz_to_uv(const vec3 intersection_pos_local, vec2 xyz_to_uv(const vec3 intersection_pos_local,
const vec2 geom_aspect, const float geom_mode) const vec2 geom_aspect, const float geom_mode)
{ {
return geom_mode < 1.5 ? if (geom_mode < 1.5) return sphere_xyz_to_uv(intersection_pos_local, geom_aspect);
sphere_xyz_to_uv(intersection_pos_local, geom_aspect) : else if (geom_mode < 2.5) return sphere_alt_xyz_to_uv(intersection_pos_local, geom_aspect);
geom_mode < 2.5 ? else return cylinder_xyz_to_uv(intersection_pos_local, geom_aspect);
sphere_alt_xyz_to_uv(intersection_pos_local, geom_aspect) :
cylinder_xyz_to_uv(intersection_pos_local, geom_aspect);
} }
inline vec3 uv_to_xyz(const vec2 uv, const vec2 geom_aspect, vec3 uv_to_xyz(const vec2 uv, const vec2 geom_aspect,
const float geom_mode) const float geom_mode)
{ {
return geom_mode < 1.5 ? sphere_uv_to_xyz(uv, geom_aspect) : if (geom_mode < 1.5) return sphere_uv_to_xyz(uv, geom_aspect);
geom_mode < 2.5 ? sphere_alt_uv_to_xyz(uv, geom_aspect) : else if (geom_mode < 2.5) return sphere_alt_uv_to_xyz(uv, geom_aspect);
cylinder_uv_to_xyz(uv, geom_aspect); else return cylinder_uv_to_xyz(uv, geom_aspect);
} }
vec2 view_vec_to_uv(const vec3 view_vec_local, const vec3 eye_pos_local, vec2 view_vec_to_uv(const vec3 view_vec_local, const vec3 eye_pos_local,
@ -251,8 +247,8 @@ vec2 view_vec_to_uv(const vec3 view_vec_local, const vec3 eye_pos_local,
intersection_pos = intersection_pos_local; intersection_pos = intersection_pos_local;
// Transform into uv coords, but give out-of-range coords if the // Transform into uv coords, but give out-of-range coords if the
// view ray doesn't intersect the primitive in the first place: // view ray doesn't intersect the primitive in the first place:
return intersect_dist_and_discriminant.y > 0.005 ? if (intersect_dist_and_discriminant.y > 0.005) return xyz_to_uv(intersection_pos_local, geom_aspect, geom_mode);
xyz_to_uv(intersection_pos_local, geom_aspect, geom_mode) : vec2(1.0); else return vec2(1.0);
} }
vec3 get_ideal_global_eye_pos_for_points(vec3 eye_pos, vec3 get_ideal_global_eye_pos_for_points(vec3 eye_pos,
@ -378,7 +374,7 @@ vec3 get_ideal_global_eye_pos_for_points(vec3 eye_pos,
return eye_pos; return eye_pos;
} }
vec3 get_ideal_global_eye_pos(const vec3x3 local_to_global, vec3 get_ideal_global_eye_pos(const mat3x3 local_to_global,
const vec2 geom_aspect, const float geom_mode) const vec2 geom_aspect, const float geom_mode)
{ {
// Start with an initial eye_pos that includes the entire primitive // Start with an initial eye_pos that includes the entire primitive
@ -389,9 +385,8 @@ vec3 get_ideal_global_eye_pos(const vec3x3 local_to_global,
const float fov = abs(acos(dot(high_view, low_view)/len_sq)); const float fov = abs(acos(dot(high_view, low_view)/len_sq));
// Trigonometry/similar triangles say distance = geom_radius/sin(fov/2): // Trigonometry/similar triangles say distance = geom_radius/sin(fov/2):
const float eye_z_spherical = geom_radius/sin(fov*0.5); const float eye_z_spherical = geom_radius/sin(fov*0.5);
const vec3 eye_pos = geom_mode < 2.5 ? vec3 eye_pos = vec3(0.0, 0.0, eye_z_spherical);
vec3(0.0, 0.0, eye_z_spherical) : if (geom_mode < 2.5) eye_pos = vec3(0.0, 0.0, max(geom_view_dist, eye_z_spherical));
vec3(0.0, 0.0, max(geom_view_dist, eye_z_spherical));
// Get global xyz coords of extreme sample points on the simulated CRT // Get global xyz coords of extreme sample points on the simulated CRT
// screen. Start with the center, edge centers, and corners of the // screen. Start with the center, edge centers, and corners of the
@ -401,30 +396,32 @@ vec3 get_ideal_global_eye_pos(const vec3x3 local_to_global,
// hull might not envelope points that do occlude a back-facing point.) // hull might not envelope points that do occlude a back-facing point.)
const int num_points = MAX_POINT_CLOUD_SIZE; const int num_points = MAX_POINT_CLOUD_SIZE;
vec3 global_coords[MAX_POINT_CLOUD_SIZE]; vec3 global_coords[MAX_POINT_CLOUD_SIZE];
global_coords[0] = mul(local_to_global, uv_to_xyz(vec2(0.0, 0.0), geom_aspect, geom_mode)); global_coords[0] = (uv_to_xyz(vec2(0.0, 0.0), geom_aspect, geom_mode) * local_to_global);
global_coords[1] = mul(local_to_global, uv_to_xyz(vec2(0.0, -0.5), geom_aspect, geom_mode)); global_coords[1] = (uv_to_xyz(vec2(0.0, -0.5), geom_aspect, geom_mode) * local_to_global);
global_coords[2] = mul(local_to_global, uv_to_xyz(vec2(0.0, 0.5), geom_aspect, geom_mode)); global_coords[2] = (uv_to_xyz(vec2(0.0, 0.5), geom_aspect, geom_mode) * local_to_global);
global_coords[3] = mul(local_to_global, uv_to_xyz(vec2(-0.5, 0.0), geom_aspect, geom_mode)); global_coords[3] = (uv_to_xyz(vec2(-0.5, 0.0), geom_aspect, geom_mode) * local_to_global);
global_coords[4] = mul(local_to_global, uv_to_xyz(vec2(0.5, 0.0), geom_aspect, geom_mode)); global_coords[4] = (uv_to_xyz(vec2(0.5, 0.0), geom_aspect, geom_mode) * local_to_global);
global_coords[5] = mul(local_to_global, uv_to_xyz(vec2(-0.5, -0.5), geom_aspect, geom_mode)); global_coords[5] = (uv_to_xyz(vec2(-0.5, -0.5), geom_aspect, geom_mode) * local_to_global);
global_coords[6] = mul(local_to_global, uv_to_xyz(vec2(0.5, -0.5), geom_aspect, geom_mode)); global_coords[6] = (uv_to_xyz(vec2(0.5, -0.5), geom_aspect, geom_mode) * local_to_global);
global_coords[7] = mul(local_to_global, uv_to_xyz(vec2(-0.5, 0.5), geom_aspect, geom_mode)); global_coords[7] = (uv_to_xyz(vec2(-0.5, 0.5), geom_aspect, geom_mode) * local_to_global);
global_coords[8] = mul(local_to_global, uv_to_xyz(vec2(0.5, 0.5), geom_aspect, geom_mode)); global_coords[8] = (uv_to_xyz(vec2(0.5, 0.5), geom_aspect, geom_mode) * local_to_global);
// Adding more inner image points could help in extreme cases, but too many // Adding more inner image points could help in extreme cases, but too many
// points will kille the framerate. For safety, default to the initial // points will kille the framerate. For safety, default to the initial
// eye_pos if any z coords are negative: // eye_pos if any z coords are negative:
float num_negative_z_coords = 0.0; float num_negative_z_coords = 0.0;
for(int i = 0; i < num_points; i++) for(int i = 0; i < num_points; i++)
{ {
num_negative_z_coords += float(global_coords[0].z < 0.0); if (global_coords[0].z < 0.0)
{num_negative_z_coords += float(global_coords[0].z);}
} }
// Outsource the optimized eye_pos calculation: // Outsource the optimized eye_pos calculation:
return num_negative_z_coords > 0.5 ? eye_pos : if (num_negative_z_coords > 0.5)
get_ideal_global_eye_pos_for_points(eye_pos, geom_aspect, return eye_pos;
global_coords, num_points); else
return get_ideal_global_eye_pos_for_points(eye_pos, geom_aspect, global_coords, num_points);
} }
vec3x3 get_pixel_to_object_matrix(const vec3x3 global_to_local, mat3x3 get_pixel_to_object_matrix(const mat3x3 global_to_local,
const vec3 eye_pos_local, const vec3 view_vec_global, const vec3 eye_pos_local, const vec3 view_vec_global,
const vec3 intersection_pos_local, const vec3 normal, const vec3 intersection_pos_local, const vec3 normal,
const vec2 output_size_inv) const vec2 output_size_inv)
@ -451,12 +448,12 @@ vec3x3 get_pixel_to_object_matrix(const vec3x3 global_to_local,
const vec3 view_vec_down_global = view_vec_global + const vec3 view_vec_down_global = view_vec_global +
vec3(0.0, -output_size_inv.y, 0.0); vec3(0.0, -output_size_inv.y, 0.0);
const vec3 view_vec_right_local = const vec3 view_vec_right_local =
mul(global_to_local, view_vec_right_global); (view_vec_right_global * global_to_local);
const vec3 view_vec_down_local = const vec3 view_vec_down_local =
mul(global_to_local, view_vec_down_global); (view_vec_down_global * global_to_local);
// 2.) Using the true intersection point, intersect the neighboring // 2.) Using the true intersection point, intersect the neighboring
// view vectors with the tangent plane: // view vectors with the tangent plane:
const vec3 intersection_vec_dot_normal = dot(pos - eye_pos, normal); const vec3 intersection_vec_dot_normal = vec3(dot(pos - eye_pos, normal));
const vec3 right_pos = eye_pos + (intersection_vec_dot_normal / const vec3 right_pos = eye_pos + (intersection_vec_dot_normal /
dot(view_vec_right_local, normal))*view_vec_right_local; dot(view_vec_right_local, normal))*view_vec_right_local;
const vec3 down_pos = eye_pos + (intersection_vec_dot_normal / const vec3 down_pos = eye_pos + (intersection_vec_dot_normal /
@ -469,14 +466,14 @@ vec3x3 get_pixel_to_object_matrix(const vec3x3 global_to_local,
// transformation is 2D to 3D, so use (0, 0, 0) for the third vector. // transformation is 2D to 3D, so use (0, 0, 0) for the third vector.
const vec3 object_right_vec = right_pos - pos; const vec3 object_right_vec = right_pos - pos;
const vec3 object_down_vec = down_pos - pos; const vec3 object_down_vec = down_pos - pos;
const vec3x3 pixel_to_object = vec3x3( const mat3x3 pixel_to_object = mat3x3(
object_right_vec.x, object_down_vec.x, 0.0, object_right_vec.x, object_down_vec.x, 0.0,
object_right_vec.y, object_down_vec.y, 0.0, object_right_vec.y, object_down_vec.y, 0.0,
object_right_vec.z, object_down_vec.z, 0.0); object_right_vec.z, object_down_vec.z, 0.0);
return pixel_to_object; return pixel_to_object;
} }
vec3x3 get_object_to_tangent_matrix(const vec3 intersection_pos_local, mat3x3 get_object_to_tangent_matrix(const vec3 intersection_pos_local,
const vec3 normal, const vec2 geom_aspect, const float geom_mode) const vec3 normal, const vec2 geom_aspect, const float geom_mode)
{ {
// Requires: See get_curved_video_uv_coords_and_tangent_matrix for // Requires: See get_curved_video_uv_coords_and_tangent_matrix for
@ -493,7 +490,7 @@ vec3x3 get_object_to_tangent_matrix(const vec3 intersection_pos_local,
// We want the inverse of the TBN matrix (transpose of the cotangent // We want the inverse of the TBN matrix (transpose of the cotangent
// matrix), which transforms ordinary vectors from object->tangent space. // matrix), which transforms ordinary vectors from object->tangent space.
// Start by calculating the relevant basis vectors in accordance with // Start by calculating the relevant basis vectors in accordance with
// Christian Schüler's blog post "Followup: Normal Mapping Without // Christian Schüler's blog post "Followup: Normal Mapping Without
// Precomputed Tangents": http://www.thetenthplanet.de/archives/1180 // Precomputed Tangents": http://www.thetenthplanet.de/archives/1180
// With our particular uv mapping, the scale of the u and v directions // With our particular uv mapping, the scale of the u and v directions
// is determined entirely by the aspect ratio for cylindrical and ordinary // is determined entirely by the aspect ratio for cylindrical and ordinary
@ -551,20 +548,20 @@ vec3x3 get_object_to_tangent_matrix(const vec3 intersection_pos_local,
} }
const vec3 computed_normal = const vec3 computed_normal =
cross(cobitangent_unscaled, cotangent_unscaled); cross(cobitangent_unscaled, cotangent_unscaled);
const float inv_determinant = rsqrt(dot(computed_normal, computed_normal)); const float inv_determinant = inversesqrt(dot(computed_normal, computed_normal));
const vec3 cotangent = cotangent_unscaled * inv_determinant; const vec3 cotangent = cotangent_unscaled * inv_determinant;
const vec3 cobitangent = cobitangent_unscaled * inv_determinant; const vec3 cobitangent = cobitangent_unscaled * inv_determinant;
// The [cotangent, cobitangent, normal] column vecs form the cotangent // The [cotangent, cobitangent, normal] column vecs form the cotangent
// frame, i.e. the inverse-transpose TBN matrix. Get its transpose: // frame, i.e. the inverse-transpose TBN matrix. Get its transpose:
const vec3x3 object_to_tangent = vec3x3(cotangent, cobitangent, normal); const mat3x3 object_to_tangent = mat3x3(cotangent, cobitangent, normal);
return object_to_tangent; return object_to_tangent;
} }
vec2 get_curved_video_uv_coords_and_tangent_matrix( vec2 get_curved_video_uv_coords_and_tangent_matrix(
const vec2 flat_video_uv, const vec3 eye_pos_local, const vec2 flat_video_uv, const vec3 eye_pos_local,
const vec2 output_size_inv, const vec2 geom_aspect, const vec2 output_size_inv, const vec2 geom_aspect,
const float geom_mode, const vec3x3 global_to_local, const float geom_mode, const mat3x3 global_to_local,
out vec2x2 pixel_to_tangent_video_uv) out mat2x2 pixel_to_tangent_video_uv)
{ {
// Requires: Parameters: // Requires: Parameters:
// 1.) flat_video_uv coords are in range [0.0, 1.0], where // 1.) flat_video_uv coords are in range [0.0, 1.0], where
@ -608,7 +605,7 @@ vec2 get_curved_video_uv_coords_and_tangent_matrix(
vec3(view_uv.x, -view_uv.y, -geom_view_dist); vec3(view_uv.x, -view_uv.y, -geom_view_dist);
// Transform the view vector into the CRT's local coordinate frame, convert // Transform the view vector into the CRT's local coordinate frame, convert
// to video_uv coords, and get the local 3D intersection position: // to video_uv coords, and get the local 3D intersection position:
const vec3 view_vec_local = mul(global_to_local, view_vec_global); const vec3 view_vec_local = (view_vec_global * global_to_local);
vec3 pos; vec3 pos;
const vec2 centered_uv = view_vec_to_uv( const vec2 centered_uv = view_vec_to_uv(
view_vec_local, eye_pos_local, geom_aspect, geom_mode, pos); view_vec_local, eye_pos_local, geom_aspect, geom_mode, pos);
@ -622,11 +619,11 @@ vec2 get_curved_video_uv_coords_and_tangent_matrix(
const vec2 duv_dx = ddx(video_uv); const vec2 duv_dx = ddx(video_uv);
const vec2 duv_dy = ddy(video_uv); const vec2 duv_dy = ddy(video_uv);
#ifdef LAST_PASS #ifdef LAST_PASS
pixel_to_tangent_video_uv = vec2x2( pixel_to_tangent_video_uv = mat2x2(
duv_dx.x, duv_dy.x, duv_dx.x, duv_dy.x,
-duv_dx.y, -duv_dy.y); -duv_dx.y, -duv_dy.y);
#else #else
pixel_to_tangent_video_uv = vec2x2( pixel_to_tangent_video_uv = mat2x2(
duv_dx.x, duv_dy.x, duv_dx.x, duv_dy.x,
duv_dx.y, duv_dy.y); duv_dx.y, duv_dy.y);
#endif #endif
@ -636,58 +633,30 @@ vec2 get_curved_video_uv_coords_and_tangent_matrix(
if(geom_force_correct_tangent_matrix) if(geom_force_correct_tangent_matrix)
{ {
// Get the surface normal based on the local intersection position: // Get the surface normal based on the local intersection position:
const vec3 normal_base = geom_mode < 2.5 ? pos : vec3 normal_base = pos;
vec3(pos.x, 0.0, pos.z); if (geom_mode > 2.5) normal_base = vec3(pos.x, 0.0, pos.z);
const vec3 normal = normalize(normal_base); const vec3 normal = normalize(normal_base);
// Get pixel-to-object and object-to-tangent matrices and combine // Get pixel-to-object and object-to-tangent matrices and combine
// them into a 2x2 pixel-to-tangent matrix for video_uv offsets: // them into a 2x2 pixel-to-tangent matrix for video_uv offsets:
const vec3x3 pixel_to_object = get_pixel_to_object_matrix( const mat3x3 pixel_to_object = get_pixel_to_object_matrix(
global_to_local, eye_pos_local, view_vec_global, pos, normal, global_to_local, eye_pos_local, view_vec_global, pos, normal,
output_size_inv); output_size_inv);
const vec3x3 object_to_tangent = get_object_to_tangent_matrix( const mat3x3 object_to_tangent = get_object_to_tangent_matrix(
pos, normal, geom_aspect, geom_mode); pos, normal, geom_aspect, geom_mode);
const vec3x3 pixel_to_tangent3x3 = const mat3x3 pixel_to_tangent3x3 =
mul(object_to_tangent, pixel_to_object); (pixel_to_object * object_to_tangent);
pixel_to_tangent_video_uv = vec2x2( pixel_to_tangent_video_uv = mat2x2(
pixel_to_tangent3x3._m00_m01_m10_m11); pixel_to_tangent3x3[0].xyz, pixel_to_tangent3x3[1].x);
} }
else else
{ {
// Ignore curvature, and just consider flat scaling. The // Ignore curvature, and just consider flat scaling. The
// difference is only apparent with strong curvature: // difference is only apparent with strong curvature:
pixel_to_tangent_video_uv = vec2x2( pixel_to_tangent_video_uv = mat2x2(
output_size_inv.x, 0.0, 0.0, output_size_inv.y); output_size_inv.x, 0.0, 0.0, output_size_inv.y);
} }
#endif #endif
return video_uv; return video_uv;
} }
float get_border_dim_factor(const vec2 video_uv, const vec2 geom_aspect)
{
// COPYRIGHT NOTE FOR THIS FUNCTION:
// Copyright (C) 2010-2012 cgwg, 2014 TroggleMonkey
// This function uses an algorithm first coded in several of cgwg's GPL-
// licensed lines in crt-geom-curved.cg and its ancestors. The line
// between algorithm and code is nearly indistinguishable here, so it's
// unclear whether I could even release this project under a non-GPL
// license with this function included.
// Calculate border_dim_factor from the proximity to uv-space image
// borders; geom_aspect/border_size/border/darkness/border_compress are globals:
const vec2 edge_dists = min(video_uv, vec2(1.0) - video_uv) *
geom_aspect;
const vec2 border_penetration =
max(vec2(border_size) - edge_dists, vec2(0.0));
const float penetration_ratio = length(border_penetration)/border_size;
const float border_escape_ratio = max(1.0 - penetration_ratio, 0.0);
const float border_dim_factor =
pow(border_escape_ratio, border_darkness) * max(1.0, border_compress);
return min(border_dim_factor, 1.0);
}
#endif // GEOMETRY_FUNCTIONS_H #endif // GEOMETRY_FUNCTIONS_H

View file

@ -80,6 +80,18 @@
const vec4 weights = min(sin(pi_dist)/pi_dist, vec4(1.0)); const vec4 weights = min(sin(pi_dist)/pi_dist, vec4(1.0));
#endif #endif
#define HORIZONTAL_SINC_RESAMPLE_LOOP_BODY \
CALCULATE_R_COORD_FOR_4_SAMPLES; \
const vec3 new_sample0 = tex2Dlod0try(texture, \
vec2(tex_uv_r.x, tex_uv.y)).rgb; \
const vec3 new_sample1 = tex2Dlod0try(texture, \
vec2(tex_uv_r.y, tex_uv.y)).rgb; \
const vec3 new_sample2 = tex2Dlod0try(texture, \
vec2(tex_uv_r.z, tex_uv.y)).rgb; \
const vec3 new_sample3 = tex2Dlod0try(texture, \
vec2(tex_uv_r.w, tex_uv.y)).rgb; \
UPDATE_COLOR_AND_WEIGHT_SUMS;
////////////////////////////////// CONSTANTS ///////////////////////////////// ////////////////////////////////// CONSTANTS /////////////////////////////////
// The larger the resized tile, the fewer samples we'll need for downsizing. // The larger the resized tile, the fewer samples we'll need for downsizing.
@ -282,6 +294,69 @@ vec2 get_resized_mask_tile_size(const vec2 estimated_viewport_size,
return final_resized_tile_size; return final_resized_tile_size;
} }
///////////////////////// FINAL MASK SAMPLING HELPERS ////////////////////////
vec4 get_mask_sampling_parameters(const vec2 mask_resize_texture_size,
const vec2 mask_resize_video_size, const vec2 true_viewport_size,
out vec2 mask_tiles_per_screen)
{
// Requires: 1.) Requirements of get_resized_mask_tile_size() must be
// met, particularly regarding global constants.
// The function parameters must be defined as follows:
// 1.) mask_resize_texture_size == MASK_RESIZE.texture_size
// if get_mask_sample_mode() is 0 (otherwise anything)
// 2.) mask_resize_video_size == MASK_RESIZE.video_size
// if get_mask_sample_mode() is 0 (otherwise anything)
// 3.) true_viewport_size == IN.output_size for a pass set to
// 1.0 viewport scale (i.e. it must be correct)
// Returns: Return a vec4 containing:
// xy: tex_uv coords for the start of the mask tile
// zw: tex_uv size of the mask tile from start to end
// mask_tiles_per_screen is an out parameter containing the
// number of mask tiles that will fit on the screen.
// First get the final resized tile size. The viewport size and mask
// resize viewport scale must be correct, but don't solemnly swear they
// were correct in both mask resize passes unless you know it's true.
// (We can better ensure a correct tile aspect ratio if the parameters are
// guaranteed correct in all passes...but if we lie, we'll get inconsistent
// sizes across passes, resulting in broken texture coordinates.)
const float mask_sample_mode = get_mask_sample_mode();
const vec2 mask_resize_tile_size = get_resized_mask_tile_size(
true_viewport_size, mask_resize_video_size, false);
if(mask_sample_mode < 0.5)
{
// Sample MASK_RESIZE: The resized tile is a fracttion of the texture
// size and starts at a nonzero offset to allow for border texels:
const vec2 mask_tile_uv_size = mask_resize_tile_size /
mask_resize_texture_size;
const vec2 skipped_tiles = mask_start_texels/mask_resize_tile_size;
const vec2 mask_tile_start_uv = skipped_tiles * mask_tile_uv_size;
// mask_tiles_per_screen must be based on the *true* viewport size:
mask_tiles_per_screen = true_viewport_size / mask_resize_tile_size;
return vec4(mask_tile_start_uv, mask_tile_uv_size);
}
else
{
// If we're tiling at the original size (1:1 pixel:texel), redefine a
// "tile" to be the full texture containing many triads. Otherwise,
// we're hardware-resampling an LUT, and the texture truly contains a
// single unresized phosphor mask tile anyway.
const vec2 mask_tile_uv_size = vec2(1.0);
const vec2 mask_tile_start_uv = vec2(0.0);
if(mask_sample_mode > 1.5)
{
// Repeat the full LUT at a 1:1 pixel:texel ratio without resizing:
mask_tiles_per_screen = true_viewport_size/mask_texture_large_size;
}
else
{
// Hardware-resize the original LUT:
mask_tiles_per_screen = true_viewport_size / mask_resize_tile_size;
}
return vec4(mask_tile_start_uv, mask_tile_uv_size);
}
}
//////////////////////////// RESAMPLING FUNCTIONS //////////////////////////// //////////////////////////// RESAMPLING FUNCTIONS ////////////////////////////
vec3 downsample_vertical_sinc_tiled(const sampler2D texture, vec3 downsample_vertical_sinc_tiled(const sampler2D texture,
@ -392,5 +467,159 @@ vec3 downsample_vertical_sinc_tiled(const sampler2D texture,
return (pixel_color/scalar_weight_sum); return (pixel_color/scalar_weight_sum);
} }
vec3 downsample_horizontal_sinc_tiled(const sampler2D texture,
const vec2 tex_uv, const vec2 texture_size, const float dr,
const float magnification_scale, const float tile_size_uv_r)
{
// Differences from downsample_horizontal_sinc_tiled:
// 1.) The dr and tile_size_uv_r parameters are not static consts.
// 2.) The "vertical" parameter to get_first_texel_tile_uv_and_dist is
// set to false instead of true.
// 3.) The horizontal version of the loop body is used.
// TODO: If we can get guaranteed compile-time dead code elimination,
// we can combine the vertical/horizontal downsampling functions by:
// 1.) Add an extra static const bool parameter called "vertical."
// 2.) Supply it with the result of get_first_texel_tile_uv_and_dist().
// 3.) Use a conditional assignment in the loop body macro. This is the
// tricky part: We DO NOT want to incur the extra conditional
// assignment in the inner loop at runtime!
// The "r" in "dr," "tile_size_uv_r," etc. refers to the dimension
// we're resizing along, e.g. "dx" in this case.
#ifdef USE_SINGLE_STATIC_LOOP
// If we have to load all samples, we might as well use them.
const int samples = int(max_sinc_resize_samples_m4);
#else
const int samples = int(get_dynamic_loop_size(magnification_scale));
#endif
// Get the first sample location (scalar tile uv coord along resized
// dimension) and distance from the output location (in texels):
const float input_tiles_per_texture_r = 1.0/tile_size_uv_r;
// false = horizontal resize:
const vec2 first_texel_tile_r_and_dist = get_first_texel_tile_uv_and_dist(
tex_uv, texture_size, dr, input_tiles_per_texture_r, samples, false);
const vec4 first_texel_tile_uv_rrrr = first_texel_tile_r_and_dist.xxxx;
const vec4 first_dist_unscaled = first_texel_tile_r_and_dist.yyyy;
// Get the tile sample offset:
const float tile_dr = dr * input_tiles_per_texture_r;
// Sum up each weight and weighted sample color, varying the looping
// strategy based on our expected dynamic loop capabilities. See the
// loop body macros above.
int i_base = 0;
vec4 weight_sum = vec4(0.0);
vec3 pixel_color = vec3(0.0);
const int i_step = 4;
#ifdef BREAK_LOOPS_INTO_PIECES
if(samples - i_base >= 64)
{
for(int i = 0; i < 64; i += i_step)
{
HORIZONTAL_SINC_RESAMPLE_LOOP_BODY;
}
i_base += 64;
}
if(samples - i_base >= 32)
{
for(int i = 0; i < 32; i += i_step)
{
HORIZONTAL_SINC_RESAMPLE_LOOP_BODY;
}
i_base += 32;
}
if(samples - i_base >= 16)
{
for(int i = 0; i < 16; i += i_step)
{
HORIZONTAL_SINC_RESAMPLE_LOOP_BODY;
}
i_base += 16;
}
if(samples - i_base >= 8)
{
for(int i = 0; i < 8; i += i_step)
{
HORIZONTAL_SINC_RESAMPLE_LOOP_BODY;
}
i_base += 8;
}
if(samples - i_base >= 4)
{
for(int i = 0; i < 4; i += i_step)
{
HORIZONTAL_SINC_RESAMPLE_LOOP_BODY;
}
i_base += 4;
}
// Do another 4-sample block for a total of 128 max samples.
if(samples - i_base > 0)
{
for(int i = 0; i < 4; i += i_step)
{
HORIZONTAL_SINC_RESAMPLE_LOOP_BODY;
}
}
#else
for(int i = 0; i < samples; i += i_step)
{
HORIZONTAL_SINC_RESAMPLE_LOOP_BODY;
}
#endif
// Normalize so the weight_sum == 1.0, and return:
const vec2 weight_sum_reduce = weight_sum.xy + weight_sum.zw;
const vec3 scalar_weight_sum = vec3(weight_sum_reduce.x +
weight_sum_reduce.y);
return (pixel_color/scalar_weight_sum);
}
vec2 convert_phosphor_tile_uv_wrap_to_tex_uv(const vec2 tile_uv_wrap,
const vec4 mask_tile_start_uv_and_size)
{
// Requires: 1.) tile_uv_wrap contains tile-relative uv coords, where the
// tile spans from [0, 1], such that (0.5, 0.5) is at the
// tile center. The input coords can range from [0, inf],
// and their fracttional parts map to a repeated tile.
// ("Tile" can mean texture, the video embedded in the
// texture, or some other "tile" embedded in a texture.)
// 2.) mask_tile_start_uv_and_size.xy contains tex_uv coords
// for the start of the embedded tile in the full texture.
// 3.) mask_tile_start_uv_and_size.zw contains the [fracttional]
// tex_uv size of the embedded tile in the full texture.
// Returns: Return tex_uv coords (used for texture sampling)
// corresponding to tile_uv_wrap.
if(get_mask_sample_mode() < 0.5)
{
// Manually repeat the resized mask tile to fill the screen:
// First get fracttional tile_uv coords. Using fract/fmod on coords
// confuses anisotropic filtering; fix it as user options dictate.
// derived-settings-and-constants.h disables incompatible options.
#ifdef ANISOTROPIC_TILING_COMPAT_TILE_FLAT_TWICE
vec2 tile_uv = fract(tile_uv_wrap * 0.5) * 2.0;
#else
vec2 tile_uv = fract(tile_uv_wrap);
#endif
#ifdef ANISOTROPIC_TILING_COMPAT_FIX_DISCONTINUITIES
const vec2 tile_uv_dx = ddx(tile_uv);
const vec2 tile_uv_dy = ddy(tile_uv);
tile_uv = fix_tiling_discontinuities_normalized(tile_uv,
tile_uv_dx, tile_uv_dy);
#endif
// The tile is embedded in a padded FBO, and it may start at a
// nonzero offset if border texels are used to avoid artifacts:
const vec2 mask_tex_uv = mask_tile_start_uv_and_size.xy +
tile_uv * mask_tile_start_uv_and_size.zw;
return mask_tex_uv;
}
else
{
// Sample from the input phosphor mask texture with hardware tiling.
// If we're tiling at the original size (mode 2), the "tile" is the
// whole texture, and it contains a large number of triads mapped with
// a 1:1 pixel:texel ratio. OTHERWISE, the texture contains a single
// unresized tile. tile_uv_wrap already has correct coords for both!
return tile_uv_wrap;
}
}
#endif // PHOSPHOR_MASK_RESIZING_H #endif // PHOSPHOR_MASK_RESIZING_H

View file

@ -29,6 +29,188 @@
///////////////////////////// SCANLINE FUNCTIONS ///////////////////////////// ///////////////////////////// SCANLINE FUNCTIONS /////////////////////////////
vec3 get_raw_interpolated_color(const vec3 color0,
const vec3 color1, const vec3 color2, const vec3 color3,
const vec4 weights)
{
// Use max to avoid bizarre artifacts from negative colors:
return max(mat4x3(color0, color1, color2, color3) * weights, 0.0);
}
vec3 get_interpolated_linear_color(const vec3 color0, const vec3 color1,
const vec3 color2, const vec3 color3, const vec4 weights)
{
// Requires: 1.) Requirements of include/gamma-management.h must be met:
// intermediate_gamma must be globally defined, and input
// colors are interpreted as linear RGB unless you #define
// GAMMA_ENCODE_EVERY_FBO (in which case they are
// interpreted as gamma-encoded with intermediate_gamma).
// 2.) color0-3 are colors sampled from a texture with tex2D().
// They are interpreted as defined in requirement 1.
// 3.) weights contains weights for each color, summing to 1.0.
// 4.) beam_horiz_linear_rgb_weight must be defined as a global
// float in [0.0, 1.0] describing how much blending should
// be done in linear RGB (rest is gamma-corrected RGB).
// 5.) RUNTIME_SCANLINES_HORIZ_FILTER_COLORSPACE must be #defined
// if beam_horiz_linear_rgb_weight is anything other than a
// static constant, or we may try branching at runtime
// without dynamic branches allowed (slow).
// Returns: Return an interpolated color lookup between the four input
// colors based on the weights in weights. The final color will
// be a linear RGB value, but the blending will be done as
// indicated above.
const float intermediate_gamma = get_intermediate_gamma();
// Branch if beam_horiz_linear_rgb_weight is static (for free) or if the
// profile allows dynamic branches (faster than computing extra pows):
#ifndef RUNTIME_SCANLINES_HORIZ_FILTER_COLORSPACE
#define SCANLINES_BRANCH_FOR_LINEAR_RGB_WEIGHT
#else
#ifdef DRIVERS_ALLOW_DYNAMIC_BRANCHES
#define SCANLINES_BRANCH_FOR_LINEAR_RGB_WEIGHT
#endif
#endif
#ifdef SCANLINES_BRANCH_FOR_LINEAR_RGB_WEIGHT
// beam_horiz_linear_rgb_weight is static, so we can branch:
#ifdef GAMMA_ENCODE_EVERY_FBO
const vec3 gamma_mixed_color = pow(get_raw_interpolated_color(
color0, color1, color2, color3, weights), vec3(intermediate_gamma));
if(beam_horiz_linear_rgb_weight > 0.0)
{
const vec3 linear_mixed_color = get_raw_interpolated_color(
pow(color0, vec3(intermediate_gamma)),
pow(color1, vec3(intermediate_gamma)),
pow(color2, vec3(intermediate_gamma)),
pow(color3, vec3(intermediate_gamma)),
weights);
return mix(gamma_mixed_color, linear_mixed_color,
beam_horiz_linear_rgb_weight);
}
else
{
return gamma_mixed_color;
}
#else
const vec3 linear_mixed_color = get_raw_interpolated_color(
color0, color1, color2, color3, weights);
if(beam_horiz_linear_rgb_weight < 1.0)
{
const vec3 gamma_mixed_color = get_raw_interpolated_color(
pow(color0, vec3(1.0/intermediate_gamma)),
pow(color1, vec3(1.0/intermediate_gamma)),
pow(color2, vec3(1.0/intermediate_gamma)),
pow(color3, vec3(1.0/intermediate_gamma)),
weights);
return mix(gamma_mixed_color, linear_mixed_color,
beam_horiz_linear_rgb_weight);
}
else
{
return linear_mixed_color;
}
#endif // GAMMA_ENCODE_EVERY_FBO
#else
#ifdef GAMMA_ENCODE_EVERY_FBO
// Inputs: color0-3 are colors in gamma-encoded RGB.
const vec3 gamma_mixed_color = pow(get_raw_interpolated_color(
color0, color1, color2, color3, weights), vec3(intermediate_gamma));
const vec3 linear_mixed_color = get_raw_interpolated_color(
pow(color0, vec3(intermediate_gamma)),
pow(color1, vec3(intermediate_gamma)),
pow(color2, vec3(intermediate_gamma)),
pow(color3, vec3(intermediate_gamma)),
weights);
return mix(gamma_mixed_color, linear_mixed_color,
beam_horiz_linear_rgb_weight);
#else
// Inputs: color0-3 are colors in linear RGB.
const vec3 linear_mixed_color = get_raw_interpolated_color(
color0, color1, color2, color3, weights);
const vec3 gamma_mixed_color = get_raw_interpolated_color(
pow(color0, vec3(1.0/intermediate_gamma)),
pow(color1, vec3(1.0/intermediate_gamma)),
pow(color2, vec3(1.0/intermediate_gamma)),
pow(color3, vec3(1.0/intermediate_gamma)),
weights);
return mix(gamma_mixed_color, linear_mixed_color,
beam_horiz_linear_rgb_weight);
#endif // GAMMA_ENCODE_EVERY_FBO
#endif // SCANLINES_BRANCH_FOR_LINEAR_RGB_WEIGHT
}
vec3 get_scanline_color(const sampler2D tex, const vec2 scanline_uv,
const vec2 uv_step_x, const vec4 weights)
{
// Requires: 1.) scanline_uv must be vertically snapped to the caller's
// desired line or scanline and horizontally snapped to the
// texel just left of the output pixel (color1)
// 2.) uv_step_x must contain the horizontal uv distance
// between texels.
// 3.) weights must contain interpolation filter weights for
// color0, color1, color2, and color3, where color1 is just
// left of the output pixel.
// Returns: Return a horizontally interpolated texture lookup using 2-4
// nearby texels, according to weights and the conventions of
// get_interpolated_linear_color().
// We can ignore the outside texture lookups for Quilez resampling.
const vec3 color1 = texture(tex, scanline_uv).rgb;
const vec3 color2 = texture(tex, scanline_uv + uv_step_x).rgb;
vec3 color0 = vec3(0.0);
vec3 color3 = vec3(0.0);
if(beam_horiz_filter > 0.5)
{
color0 = texture(tex, scanline_uv - uv_step_x).rgb;
color3 = texture(tex, scanline_uv + 2.0 * uv_step_x).rgb;
}
// Sample the texture as-is, whether it's linear or gamma-encoded:
// get_interpolated_linear_color() will handle the difference.
return get_interpolated_linear_color(color0, color1, color2, color3, weights);
}
vec3 sample_single_scanline_horizontal(const sampler2D texture,
const vec2 tex_uv, const vec2 texture_size,
const vec2 texture_size_inv)
{
// TODO: Add function requirements.
// Snap to the previous texel and get sample dists from 2/4 nearby texels:
const vec2 curr_texel = tex_uv * texture_size;
// Use under_half to fix a rounding bug right around exact texel locations.
const vec2 prev_texel =
floor(curr_texel - vec2(under_half)) + vec2(0.5);
const vec2 prev_texel_hor = vec2(prev_texel.x, curr_texel.y);
const vec2 prev_texel_hor_uv = prev_texel_hor * texture_size_inv;
const float prev_dist = curr_texel.x - prev_texel_hor.x;
const vec4 sample_dists = vec4(1.0 + prev_dist, prev_dist,
1.0 - prev_dist, 2.0 - prev_dist);
// Get Quilez, Lanczos2, or Gaussian resize weights for 2/4 nearby texels:
vec4 weights;
if(beam_horiz_filter < 0.5)
{
// Quilez:
const float x = sample_dists.y;
const float w2 = x*x*x*(x*(x*6.0 - 15.0) + 10.0);
weights = vec4(0.0, 1.0 - w2, w2, 0.0);
}
else if(beam_horiz_filter < 1.5)
{
// Gaussian:
float inner_denom_inv = 1.0/(2.0*beam_horiz_sigma*beam_horiz_sigma);
weights = exp(-(sample_dists*sample_dists)*inner_denom_inv);
}
else
{
// Lanczos2:
const vec4 pi_dists = FIX_ZERO(sample_dists * pi);
weights = 2.0 * sin(pi_dists) * sin(pi_dists * 0.5) /
(pi_dists * pi_dists);
}
// Ensure the weight sum == 1.0:
const vec4 final_weights = weights/dot(weights, vec4(1.0));
// Get the interpolated horizontal scanline color:
const vec2 uv_step_x = vec2(texture_size_inv.x, 0.0);
return get_scanline_color(
texture, prev_texel_hor_uv, uv_step_x, final_weights);
}
bool is_interlaced(float num_lines) bool is_interlaced(float num_lines)
{ {
// Detect interlacing based on the number of lines in the source. // Detect interlacing based on the number of lines in the source.
@ -64,6 +246,36 @@ bool is_interlaced(float num_lines)
} }
} }
vec3 sample_rgb_scanline_horizontal(const sampler2D tex,
const vec2 tex_uv, const vec2 texture_size,
const vec2 texture_size_inv)
{
// TODO: Add function requirements.
// Rely on a helper to make convergence easier.
if(beam_misconvergence)
{
const vec3 convergence_offsets_rgb =
get_convergence_offsets_x_vector();
const vec3 offset_u_rgb =
convergence_offsets_rgb * texture_size_inv.xxx;
const vec2 scanline_uv_r = tex_uv - vec2(offset_u_rgb.r, 0.0);
const vec2 scanline_uv_g = tex_uv - vec2(offset_u_rgb.g, 0.0);
const vec2 scanline_uv_b = tex_uv - vec2(offset_u_rgb.b, 0.0);
const vec3 sample_r = sample_single_scanline_horizontal(
tex, scanline_uv_r, texture_size, texture_size_inv);
const vec3 sample_g = sample_single_scanline_horizontal(
tex, scanline_uv_g, texture_size, texture_size_inv);
const vec3 sample_b = sample_single_scanline_horizontal(
tex, scanline_uv_b, texture_size, texture_size_inv);
return vec3(sample_r.r, sample_g.g, sample_b.b);
}
else
{
return sample_single_scanline_horizontal(tex, tex_uv, texture_size,
texture_size_inv);
}
}
vec2 get_last_scanline_uv(const vec2 tex_uv, const vec2 texture_size, vec2 get_last_scanline_uv(const vec2 tex_uv, const vec2 texture_size,
const vec2 texture_size_inv, const vec2 il_step_multiple, const vec2 texture_size_inv, const vec2 il_step_multiple,
const float frame_count, out float dist) const float frame_count, out float dist)

File diff suppressed because it is too large Load diff

View file

@ -29,7 +29,7 @@
// space offsets to texture uv offsets. You can get this with: // space offsets to texture uv offsets. You can get this with:
// const vec2 duv_dx = ddx(tex_uv); // const vec2 duv_dx = ddx(tex_uv);
// const vec2 duv_dy = ddy(tex_uv); // const vec2 duv_dy = ddy(tex_uv);
// const vec2x2 pixel_to_tex_uv = vec2x2( // const mat2x2 pixel_to_tex_uv = mat2x2(
// duv_dx.x, duv_dy.x, // duv_dx.x, duv_dy.x,
// duv_dx.y, duv_dy.y); // duv_dx.y, duv_dy.y);
// This is left to the user in case the current Cg profile // This is left to the user in case the current Cg profile
@ -38,7 +38,7 @@
// If not, a simple flat mapping can be obtained with: // If not, a simple flat mapping can be obtained with:
// const vec2 xy_to_uv_scale = IN.output_size * // const vec2 xy_to_uv_scale = IN.output_size *
// IN.video_size/IN.texture_size; // IN.video_size/IN.texture_size;
// const vec2x2 pixel_to_tex_uv = vec2x2( // const mat2x2 pixel_to_tex_uv = mat2x2(
// xy_to_uv_scale.x, 0.0, // xy_to_uv_scale.x, 0.0,
// 0.0, xy_to_uv_scale.y); // 0.0, xy_to_uv_scale.y);
// Optional: To set basic AA settings, #define ANTIALIAS_OVERRIDE_BASICS and: // Optional: To set basic AA settings, #define ANTIALIAS_OVERRIDE_BASICS and:
@ -154,7 +154,6 @@
// exploit temporal AA better, but it would require a dynamic branch or a lot // exploit temporal AA better, but it would require a dynamic branch or a lot
// of conditional moves, so it's prohibitively slow for the minor benefit. // of conditional moves, so it's prohibitively slow for the minor benefit.
///////////////////////////// SETTINGS MANAGEMENT //////////////////////////// ///////////////////////////// SETTINGS MANAGEMENT ////////////////////////////
#ifndef ANTIALIAS_OVERRIDE_BASICS #ifndef ANTIALIAS_OVERRIDE_BASICS
@ -176,7 +175,7 @@
// only, and use only the first lobe vertically or a box filter, over a // only, and use only the first lobe vertically or a box filter, over a
// correspondingly smaller range. This compensates for the sparse sampling // correspondingly smaller range. This compensates for the sparse sampling
// grid's typically large positive/negative x/y covariance. // grid's typically large positive/negative x/y covariance.
const vec2 aa_xy_axis_importance = vec2 aa_xy_axis_importance =
aa_filter < 5.5 ? vec2(1.0) : // Box, tent, Gaussian aa_filter < 5.5 ? vec2(1.0) : // Box, tent, Gaussian
aa_filter < 8.5 ? vec2(1.0, 0.0) : // Cubic and Lanczos sinc aa_filter < 8.5 ? vec2(1.0, 0.0) : // Cubic and Lanczos sinc
aa_filter < 9.5 ? vec2(1.0, 1.0/aa_lanczos_lobes) : // Lanczos jinc aa_filter < 9.5 ? vec2(1.0, 1.0/aa_lanczos_lobes) : // Lanczos jinc
@ -200,11 +199,10 @@
} }
#endif #endif
////////////////////////////////// INCLUDES ////////////////////////////////// ////////////////////////////////// INCLUDES //////////////////////////////////
#include "../../../../include/gamma-management.h" //#include "../../../../include/gamma-management.h"
#include "gamma-management.h"
////////////////////////////////// CONSTANTS ///////////////////////////////// ////////////////////////////////// CONSTANTS /////////////////////////////////
@ -216,14 +214,14 @@ const float aa_cubic_support = 2.0;
// We'll want to define these only once per fragment at most. // We'll want to define these only once per fragment at most.
#ifdef RUNTIME_ANTIALIAS_WEIGHTS #ifdef RUNTIME_ANTIALIAS_WEIGHTS
uniform float aa_cubic_b; float aa_cubic_b;
uniform float cubic_branch1_x3_coeff; float cubic_branch1_x3_coeff;
uniform float cubic_branch1_x2_coeff; float cubic_branch1_x2_coeff;
uniform float cubic_branch1_x0_coeff; float cubic_branch1_x0_coeff;
uniform float cubic_branch2_x3_coeff; float cubic_branch2_x3_coeff;
uniform float cubic_branch2_x2_coeff; float cubic_branch2_x2_coeff;
uniform float cubic_branch2_x1_coeff; float cubic_branch2_x1_coeff;
uniform float cubic_branch2_x0_coeff; float cubic_branch2_x0_coeff;
#endif #endif
@ -252,13 +250,13 @@ void assign_aa_cubic_constants()
vec4 get_subpixel_support_diam_and_final_axis_importance() vec4 get_subpixel_support_diam_and_final_axis_importance()
{ {
// Statically select the base support radius: // Statically select the base support radius:
const float base_support_radius = float base_support_radius;
aa_filter < 1.5 ? aa_box_support : if(aa_filter < 1.5) base_support_radius = aa_box_support;
aa_filter < 3.5 ? aa_tent_support : else if(aa_filter < 3.5) base_support_radius = aa_tent_support;
aa_filter < 5.5 ? aa_gauss_support : else if(aa_filter < 5.5) base_support_radius = aa_gauss_support;
aa_filter < 7.5 ? aa_cubic_support : else if(aa_filter < 7.5) base_support_radius = aa_cubic_support;
aa_filter < 9.5 ? aa_lanczos_lobes : else if(aa_filter < 9.5) base_support_radius = aa_lanczos_lobes;
aa_box_support; // Default to box else base_support_radius = aa_box_support; // Default to box
// Expand the filter support for subpixel filtering. // Expand the filter support for subpixel filtering.
const vec2 subpixel_support_radius_raw = const vec2 subpixel_support_radius_raw =
vec2(base_support_radius) + abs(get_aa_subpixel_r_offset()); vec2(base_support_radius) + abs(get_aa_subpixel_r_offset());
@ -286,17 +284,18 @@ vec4 get_subpixel_support_diam_and_final_axis_importance()
} }
} }
/////////////////////////// FILTER WEIGHT FUNCTIONS ////////////////////////// /////////////////////////// FILTER WEIGHT FUNCTIONS //////////////////////////
float eval_box_filter(const float dist) float eval_box_filter(const float dist)
{ {
return float(abs(dist) <= aa_box_support); if(abs(dist) <= aa_box_support) return 1.0;//abs(dist);
else return 0.0;
} }
float eval_separable_box_filter(const vec2 offset) float eval_separable_box_filter(const vec2 offset)
{ {
return float(all(abs(offset) <= vec2(aa_box_support))); if(all(lessThanEqual(abs(offset) , vec2(aa_box_support)))) return 1.0;//float(abs(offset));
else return 0.0;
} }
float eval_tent_filter(const float dist) float eval_tent_filter(const float dist)
@ -485,7 +484,6 @@ vec3 eval_unorm_rgb_weights(const vec2 offset,
} }
} }
////////////////////////////// HELPER FUNCTIONS ////////////////////////////// ////////////////////////////// HELPER FUNCTIONS //////////////////////////////
vec4 tex2Daa_tiled_linearize(const sampler2D samp, const vec2 s) vec4 tex2Daa_tiled_linearize(const sampler2D samp, const vec2 s)
@ -506,10 +504,12 @@ vec2 get_frame_sign(const float frame)
{ {
// Mirror the sampling pattern for odd frames in a direction that // Mirror the sampling pattern for odd frames in a direction that
// lets us keep the same subpixel sample weights: // lets us keep the same subpixel sample weights:
const float frame_odd = float(fmod(frame, 2.0) > 0.5); float frame_odd = float(mod(frame, 2.0) > 0.5);
const vec2 aa_r_offset = get_aa_subpixel_r_offset(); const vec2 aa_r_offset = get_aa_subpixel_r_offset();
const vec2 mirror = -vec2(abs(aa_r_offset) < vec2(FIX_ZERO(0.0))); vec2 mirror = vec2(FIX_ZERO(0.0));
return mirror; if ( abs(aa_r_offset.x) < FIX_ZERO(0.0)) mirror.x = abs(aa_r_offset.x);
if ( abs(aa_r_offset.y) < FIX_ZERO(0.0)) mirror.y = abs(aa_r_offset.y);
return vec2(-1.0) * mirror;
} }
else else
{ {
@ -517,26 +517,25 @@ vec2 get_frame_sign(const float frame)
} }
} }
///////////////////////// ANTIALIASED TEXTURE LOOKUPS //////////////////////// ///////////////////////// ANTIALIASED TEXTURE LOOKUPS ////////////////////////
vec3 tex2Daa_subpixel_weights_only(const sampler2D texture, vec3 tex2Daa_subpixel_weights_only(const sampler2D tex,
const vec2 tex_uv, const vec2x2 pixel_to_tex_uv) const vec2 tex_uv, const mat2x2 pixel_to_tex_uv)
{ {
// This function is unlike the others: Just perform a single independent // This function is unlike the others: Just perform a single independent
// lookup for each subpixel. It may be very aliased. // lookup for each subpixel. It may be very aliased.
const vec2 aa_r_offset = get_aa_subpixel_r_offset(); const vec2 aa_r_offset = get_aa_subpixel_r_offset();
const vec2 aa_r_offset_uv_offset = mul(pixel_to_tex_uv, aa_r_offset); const vec2 aa_r_offset_uv_offset = (aa_r_offset * pixel_to_tex_uv);
const float color_g = tex2D_linearize(texture, tex_uv).g; const float color_g = tex2D_linearize(tex, tex_uv).g;
const float color_r = tex2D_linearize(texture, tex_uv + aa_r_offset_uv_offset).r; const float color_r = tex2D_linearize(tex, tex_uv + aa_r_offset_uv_offset).r;
const float color_b = tex2D_linearize(texture, tex_uv - aa_r_offset_uv_offset).b; const float color_b = tex2D_linearize(tex, tex_uv - aa_r_offset_uv_offset).b;
return vec3(color_r, color_g, color_b); return vec3(color_r, color_g, color_b);
} }
// The tex2Daa* functions compile very slowly due to all the macros and // The tex2Daa* functions compile very slowly due to all the macros and
// compile-time math, so only include the ones we'll actually use! // compile-time math, so only include the ones we'll actually use!
vec3 tex2Daa4x(const sampler2D texture, const vec2 tex_uv, vec3 tex2Daa4x(const sampler2D tex, const vec2 tex_uv,
const vec2x2 pixel_to_tex_uv, const float frame) const mat2x2 pixel_to_tex_uv, const float frame)
{ {
// Use an RGMS4 pattern (4-queens): // Use an RGMS4 pattern (4-queens):
// . . Q . : off =(-1.5, -1.5)/4 + (2.0, 0.0)/4 // . . Q . : off =(-1.5, -1.5)/4 + (2.0, 0.0)/4
@ -564,25 +563,25 @@ vec3 tex2Daa4x(const sampler2D texture, const vec2 tex_uv,
const vec3 w_sum = half_sum + half_sum.bgr; const vec3 w_sum = half_sum + half_sum.bgr;
const vec3 w_sum_inv = vec3(1.0)/(w_sum); const vec3 w_sum_inv = vec3(1.0)/(w_sum);
// Scale the pixel-space to texture offset matrix by the pixel diameter. // Scale the pixel-space to texture offset matrix by the pixel diameter.
const vec2x2 true_pixel_to_tex_uv = const mat2x2 true_pixel_to_tex_uv =
vec2x2(vec4(pixel_to_tex_uv * aa_pixel_diameter)); mat2x2(vec4(pixel_to_tex_uv * aa_pixel_diameter));
// Get uv sample offsets, mirror on odd frames if directed, and exploit // Get uv sample offsets, mirror on odd frames if directed, and exploit
// diagonal symmetry: // diagonal symmetry:
const vec2 frame_sign = get_frame_sign(frame); const vec2 frame_sign = get_frame_sign(frame);
const vec2 uv_offset0 = mul(true_pixel_to_tex_uv, xy_offset0 * frame_sign); const vec2 uv_offset0 = (xy_offset0 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset1 = mul(true_pixel_to_tex_uv, xy_offset1 * frame_sign); const vec2 uv_offset1 = (xy_offset1 * frame_sign * true_pixel_to_tex_uv);
// Load samples, linearizing if necessary, etc.: // Load samples, linearizing if necessary, etc.:
const vec3 sample0 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset0).rgb; const vec3 sample0 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset0).rgb;
const vec3 sample1 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset1).rgb; const vec3 sample1 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset1).rgb;
const vec3 sample2 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset1).rgb; const vec3 sample2 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset1).rgb;
const vec3 sample3 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset0).rgb; const vec3 sample3 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset0).rgb;
// Sum weighted samples (weight sum must equal 1.0 for each channel): // Sum weighted samples (weight sum must equal 1.0 for each channel):
return w_sum_inv * (w0 * sample0 + w1 * sample1 + return w_sum_inv * (w0 * sample0 + w1 * sample1 +
w2 * sample2 + w3 * sample3); w2 * sample2 + w3 * sample3);
} }
vec3 tex2Daa5x(const sampler2D texture, const vec2 tex_uv, vec3 tex2Daa5x(const sampler2D tex, const vec2 tex_uv,
const vec2x2 pixel_to_tex_uv, const float frame) const mat2x2 pixel_to_tex_uv, const float frame)
{ {
// Use a diagonally symmetric 5-queens pattern: // Use a diagonally symmetric 5-queens pattern:
// . Q . . . : off =(-2.0, -2.0)/5 + (1.0, 0.0)/5 // . Q . . . : off =(-2.0, -2.0)/5 + (1.0, 0.0)/5
@ -611,26 +610,26 @@ vec3 tex2Daa5x(const sampler2D texture, const vec2 tex_uv,
// Get the weight sum to normalize the total to 1.0 later: // Get the weight sum to normalize the total to 1.0 later:
const vec3 w_sum_inv = vec3(1.0)/(w0 + w1 + w2 + w3 + w4); const vec3 w_sum_inv = vec3(1.0)/(w0 + w1 + w2 + w3 + w4);
// Scale the pixel-space to texture offset matrix by the pixel diameter. // Scale the pixel-space to texture offset matrix by the pixel diameter.
const vec2x2 true_pixel_to_tex_uv = const mat2x2 true_pixel_to_tex_uv =
vec2x2(vec4(pixel_to_tex_uv * aa_pixel_diameter)); mat2x2(vec4(pixel_to_tex_uv * aa_pixel_diameter));
// Get uv sample offsets, mirror on odd frames if directed, and exploit // Get uv sample offsets, mirror on odd frames if directed, and exploit
// diagonal symmetry: // diagonal symmetry:
const vec2 frame_sign = get_frame_sign(frame); const vec2 frame_sign = get_frame_sign(frame);
const vec2 uv_offset0 = mul(true_pixel_to_tex_uv, xy_offset0 * frame_sign); const vec2 uv_offset0 = (xy_offset0 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset1 = mul(true_pixel_to_tex_uv, xy_offset1 * frame_sign); const vec2 uv_offset1 = (xy_offset1 * frame_sign * true_pixel_to_tex_uv);
// Load samples, linearizing if necessary, etc.: // Load samples, linearizing if necessary, etc.:
const vec3 sample0 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset0).rgb; const vec3 sample0 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset0).rgb;
const vec3 sample1 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset1).rgb; const vec3 sample1 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset1).rgb;
const vec3 sample2 = tex2Daa_tiled_linearize(texture, tex_uv).rgb; const vec3 sample2 = tex2Daa_tiled_linearize(tex, tex_uv).rgb;
const vec3 sample3 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset1).rgb; const vec3 sample3 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset1).rgb;
const vec3 sample4 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset0).rgb; const vec3 sample4 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset0).rgb;
// Sum weighted samples (weight sum must equal 1.0 for each channel): // Sum weighted samples (weight sum must equal 1.0 for each channel):
return w_sum_inv * (w0 * sample0 + w1 * sample1 + return w_sum_inv * (w0 * sample0 + w1 * sample1 +
w2 * sample2 + w3 * sample3 + w4 * sample4); w2 * sample2 + w3 * sample3 + w4 * sample4);
} }
vec3 tex2Daa6x(const sampler2D texture, const vec2 tex_uv, vec3 tex2Daa6x(const sampler2D tex, const vec2 tex_uv,
const vec2x2 pixel_to_tex_uv, const float frame) const mat2x2 pixel_to_tex_uv, const float frame)
{ {
// Use a diagonally symmetric 6-queens pattern with a stronger horizontal // Use a diagonally symmetric 6-queens pattern with a stronger horizontal
// than vertical slant: // than vertical slant:
@ -664,28 +663,28 @@ vec3 tex2Daa6x(const sampler2D texture, const vec2 tex_uv,
const vec3 w_sum = half_sum + half_sum.bgr; const vec3 w_sum = half_sum + half_sum.bgr;
const vec3 w_sum_inv = vec3(1.0)/(w_sum); const vec3 w_sum_inv = vec3(1.0)/(w_sum);
// Scale the pixel-space to texture offset matrix by the pixel diameter. // Scale the pixel-space to texture offset matrix by the pixel diameter.
const vec2x2 true_pixel_to_tex_uv = const mat2x2 true_pixel_to_tex_uv =
vec2x2(vec4(pixel_to_tex_uv * aa_pixel_diameter)); mat2x2(vec4(pixel_to_tex_uv * aa_pixel_diameter));
// Get uv sample offsets, mirror on odd frames if directed, and exploit // Get uv sample offsets, mirror on odd frames if directed, and exploit
// diagonal symmetry: // diagonal symmetry:
const vec2 frame_sign = get_frame_sign(frame); const vec2 frame_sign = get_frame_sign(frame);
const vec2 uv_offset0 = mul(true_pixel_to_tex_uv, xy_offset0 * frame_sign); const vec2 uv_offset0 = (xy_offset0 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset1 = mul(true_pixel_to_tex_uv, xy_offset1 * frame_sign); const vec2 uv_offset1 = (xy_offset1 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset2 = mul(true_pixel_to_tex_uv, xy_offset2 * frame_sign); const vec2 uv_offset2 = (xy_offset2 * frame_sign * true_pixel_to_tex_uv);
// Load samples, linearizing if necessary, etc.: // Load samples, linearizing if necessary, etc.:
const vec3 sample0 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset0).rgb; const vec3 sample0 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset0).rgb;
const vec3 sample1 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset1).rgb; const vec3 sample1 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset1).rgb;
const vec3 sample2 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset2).rgb; const vec3 sample2 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset2).rgb;
const vec3 sample3 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset2).rgb; const vec3 sample3 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset2).rgb;
const vec3 sample4 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset1).rgb; const vec3 sample4 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset1).rgb;
const vec3 sample5 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset0).rgb; const vec3 sample5 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset0).rgb;
// Sum weighted samples (weight sum must equal 1.0 for each channel): // Sum weighted samples (weight sum must equal 1.0 for each channel):
return w_sum_inv * (w0 * sample0 + w1 * sample1 + w2 * sample2 + return w_sum_inv * (w0 * sample0 + w1 * sample1 + w2 * sample2 +
w3 * sample3 + w4 * sample4 + w5 * sample5); w3 * sample3 + w4 * sample4 + w5 * sample5);
} }
vec3 tex2Daa7x(const sampler2D texture, const vec2 tex_uv, vec3 tex2Daa7x(const sampler2D tex, const vec2 tex_uv,
const vec2x2 pixel_to_tex_uv, const float frame) const mat2x2 pixel_to_tex_uv, const float frame)
{ {
// Use a diagonally symmetric 7-queens pattern with a queen in the center: // Use a diagonally symmetric 7-queens pattern with a queen in the center:
// . Q . . . . . : off =(-3.0, -3.0)/7 + (1.0, 0.0)/7 // . Q . . . . . : off =(-3.0, -3.0)/7 + (1.0, 0.0)/7
@ -720,30 +719,30 @@ vec3 tex2Daa7x(const sampler2D texture, const vec2 tex_uv,
const vec3 w_sum = half_sum + half_sum.bgr + w3; const vec3 w_sum = half_sum + half_sum.bgr + w3;
const vec3 w_sum_inv = vec3(1.0)/(w_sum); const vec3 w_sum_inv = vec3(1.0)/(w_sum);
// Scale the pixel-space to texture offset matrix by the pixel diameter. // Scale the pixel-space to texture offset matrix by the pixel diameter.
const vec2x2 true_pixel_to_tex_uv = const mat2x2 true_pixel_to_tex_uv =
vec2x2(vec4(pixel_to_tex_uv * aa_pixel_diameter)); mat2x2(vec4(pixel_to_tex_uv * aa_pixel_diameter));
// Get uv sample offsets, mirror on odd frames if directed, and exploit // Get uv sample offsets, mirror on odd frames if directed, and exploit
// diagonal symmetry: // diagonal symmetry:
const vec2 frame_sign = get_frame_sign(frame); const vec2 frame_sign = get_frame_sign(frame);
const vec2 uv_offset0 = mul(true_pixel_to_tex_uv, xy_offset0 * frame_sign); const vec2 uv_offset0 = (xy_offset0 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset1 = mul(true_pixel_to_tex_uv, xy_offset1 * frame_sign); const vec2 uv_offset1 = (xy_offset1 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset2 = mul(true_pixel_to_tex_uv, xy_offset2 * frame_sign); const vec2 uv_offset2 = (xy_offset2 * frame_sign * true_pixel_to_tex_uv);
// Load samples, linearizing if necessary, etc.: // Load samples, linearizing if necessary, etc.:
const vec3 sample0 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset0).rgb; const vec3 sample0 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset0).rgb;
const vec3 sample1 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset1).rgb; const vec3 sample1 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset1).rgb;
const vec3 sample2 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset2).rgb; const vec3 sample2 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset2).rgb;
const vec3 sample3 = tex2Daa_tiled_linearize(texture, tex_uv).rgb; const vec3 sample3 = tex2Daa_tiled_linearize(tex, tex_uv).rgb;
const vec3 sample4 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset2).rgb; const vec3 sample4 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset2).rgb;
const vec3 sample5 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset1).rgb; const vec3 sample5 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset1).rgb;
const vec3 sample6 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset0).rgb; const vec3 sample6 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset0).rgb;
// Sum weighted samples (weight sum must equal 1.0 for each channel): // Sum weighted samples (weight sum must equal 1.0 for each channel):
return w_sum_inv * ( return w_sum_inv * (
w0 * sample0 + w1 * sample1 + w2 * sample2 + w3 * sample3 + w0 * sample0 + w1 * sample1 + w2 * sample2 + w3 * sample3 +
w4 * sample4 + w5 * sample5 + w6 * sample6); w4 * sample4 + w5 * sample5 + w6 * sample6);
} }
vec3 tex2Daa8x(const sampler2D texture, const vec2 tex_uv, vec3 tex2Daa8x(const sampler2D tex, const vec2 tex_uv,
const vec2x2 pixel_to_tex_uv, const float frame) const mat2x2 pixel_to_tex_uv, const float frame)
{ {
// Use a diagonally symmetric 8-queens pattern. // Use a diagonally symmetric 8-queens pattern.
// . . Q . . . . . : off =(-3.5, -3.5)/8 + (2.0, 0.0)/8 // . . Q . . . . . : off =(-3.5, -3.5)/8 + (2.0, 0.0)/8
@ -780,31 +779,31 @@ vec3 tex2Daa8x(const sampler2D texture, const vec2 tex_uv,
const vec3 w_sum = half_sum + half_sum.bgr; const vec3 w_sum = half_sum + half_sum.bgr;
const vec3 w_sum_inv = vec3(1.0)/(w_sum); const vec3 w_sum_inv = vec3(1.0)/(w_sum);
// Scale the pixel-space to texture offset matrix by the pixel diameter. // Scale the pixel-space to texture offset matrix by the pixel diameter.
const vec2x2 true_pixel_to_tex_uv = const mat2x2 true_pixel_to_tex_uv =
vec2x2(vec4(pixel_to_tex_uv * aa_pixel_diameter)); mat2x2(vec4(pixel_to_tex_uv * aa_pixel_diameter));
// Get uv sample offsets, and mirror on odd frames if directed: // Get uv sample offsets, and mirror on odd frames if directed:
const vec2 frame_sign = get_frame_sign(frame); const vec2 frame_sign = get_frame_sign(frame);
const vec2 uv_offset0 = mul(true_pixel_to_tex_uv, xy_offset0 * frame_sign); const vec2 uv_offset0 = (xy_offset0 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset1 = mul(true_pixel_to_tex_uv, xy_offset1 * frame_sign); const vec2 uv_offset1 = (xy_offset1 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset2 = mul(true_pixel_to_tex_uv, xy_offset2 * frame_sign); const vec2 uv_offset2 = (xy_offset2 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset3 = mul(true_pixel_to_tex_uv, xy_offset3 * frame_sign); const vec2 uv_offset3 = (xy_offset3 * frame_sign * true_pixel_to_tex_uv);
// Load samples, linearizing if necessary, etc.: // Load samples, linearizing if necessary, etc.:
const vec3 sample0 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset0).rgb; const vec3 sample0 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset0).rgb;
const vec3 sample1 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset1).rgb; const vec3 sample1 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset1).rgb;
const vec3 sample2 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset2).rgb; const vec3 sample2 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset2).rgb;
const vec3 sample3 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset3).rgb; const vec3 sample3 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset3).rgb;
const vec3 sample4 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset3).rgb; const vec3 sample4 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset3).rgb;
const vec3 sample5 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset2).rgb; const vec3 sample5 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset2).rgb;
const vec3 sample6 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset1).rgb; const vec3 sample6 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset1).rgb;
const vec3 sample7 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset0).rgb; const vec3 sample7 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset0).rgb;
// Sum weighted samples (weight sum must equal 1.0 for each channel): // Sum weighted samples (weight sum must equal 1.0 for each channel):
return w_sum_inv * ( return w_sum_inv * (
w0 * sample0 + w1 * sample1 + w2 * sample2 + w3 * sample3 + w0 * sample0 + w1 * sample1 + w2 * sample2 + w3 * sample3 +
w4 * sample4 + w5 * sample5 + w6 * sample6 + w7 * sample7); w4 * sample4 + w5 * sample5 + w6 * sample6 + w7 * sample7);
} }
vec3 tex2Daa12x(const sampler2D texture, const vec2 tex_uv, vec3 tex2Daa12x(const sampler2D tex, const vec2 tex_uv,
const vec2x2 pixel_to_tex_uv, const float frame) const mat2x2 pixel_to_tex_uv, const float frame)
{ {
// Use a diagonally symmetric 12-superqueens pattern where no 3 points are // Use a diagonally symmetric 12-superqueens pattern where no 3 points are
// exactly collinear. // exactly collinear.
@ -852,30 +851,30 @@ vec3 tex2Daa12x(const sampler2D texture, const vec2 tex_uv,
const vec3 w_sum = half_sum + half_sum.bgr; const vec3 w_sum = half_sum + half_sum.bgr;
const vec3 w_sum_inv = vec3(1.0)/w_sum; const vec3 w_sum_inv = vec3(1.0)/w_sum;
// Scale the pixel-space to texture offset matrix by the pixel diameter. // Scale the pixel-space to texture offset matrix by the pixel diameter.
const vec2x2 true_pixel_to_tex_uv = const mat2x2 true_pixel_to_tex_uv =
vec2x2(vec4(pixel_to_tex_uv * aa_pixel_diameter)); mat2x2(vec4(pixel_to_tex_uv * aa_pixel_diameter));
// Get uv sample offsets, mirror on odd frames if directed, and exploit // Get uv sample offsets, mirror on odd frames if directed, and exploit
// diagonal symmetry: // diagonal symmetry:
const vec2 frame_sign = get_frame_sign(frame); const vec2 frame_sign = get_frame_sign(frame);
const vec2 uv_offset0 = mul(true_pixel_to_tex_uv, xy_offset0 * frame_sign); const vec2 uv_offset0 = (xy_offset0 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset1 = mul(true_pixel_to_tex_uv, xy_offset1 * frame_sign); const vec2 uv_offset1 = (xy_offset1 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset2 = mul(true_pixel_to_tex_uv, xy_offset2 * frame_sign); const vec2 uv_offset2 = (xy_offset2 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset3 = mul(true_pixel_to_tex_uv, xy_offset3 * frame_sign); const vec2 uv_offset3 = (xy_offset3 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset4 = mul(true_pixel_to_tex_uv, xy_offset4 * frame_sign); const vec2 uv_offset4 = (xy_offset4 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset5 = mul(true_pixel_to_tex_uv, xy_offset5 * frame_sign); const vec2 uv_offset5 = (xy_offset5 * frame_sign * true_pixel_to_tex_uv);
// Load samples, linearizing if necessary, etc.: // Load samples, linearizing if necessary, etc.:
const vec3 sample0 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset0).rgb; const vec3 sample0 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset0).rgb;
const vec3 sample1 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset1).rgb; const vec3 sample1 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset1).rgb;
const vec3 sample2 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset2).rgb; const vec3 sample2 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset2).rgb;
const vec3 sample3 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset3).rgb; const vec3 sample3 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset3).rgb;
const vec3 sample4 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset4).rgb; const vec3 sample4 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset4).rgb;
const vec3 sample5 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset5).rgb; const vec3 sample5 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset5).rgb;
const vec3 sample6 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset5).rgb; const vec3 sample6 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset5).rgb;
const vec3 sample7 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset4).rgb; const vec3 sample7 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset4).rgb;
const vec3 sample8 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset3).rgb; const vec3 sample8 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset3).rgb;
const vec3 sample9 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset2).rgb; const vec3 sample9 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset2).rgb;
const vec3 sample10 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset1).rgb; const vec3 sample10 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset1).rgb;
const vec3 sample11 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset0).rgb; const vec3 sample11 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset0).rgb;
// Sum weighted samples (weight sum must equal 1.0 for each channel): // Sum weighted samples (weight sum must equal 1.0 for each channel):
return w_sum_inv * ( return w_sum_inv * (
w0 * sample0 + w1 * sample1 + w2 * sample2 + w3 * sample3 + w0 * sample0 + w1 * sample1 + w2 * sample2 + w3 * sample3 +
@ -883,8 +882,8 @@ vec3 tex2Daa12x(const sampler2D texture, const vec2 tex_uv,
w8 * sample8 + w9 * sample9 + w10 * sample10 + w11 * sample11); w8 * sample8 + w9 * sample9 + w10 * sample10 + w11 * sample11);
} }
vec3 tex2Daa16x(const sampler2D texture, const vec2 tex_uv, vec3 tex2Daa16x(const sampler2D tex, const vec2 tex_uv,
const vec2x2 pixel_to_tex_uv, const float frame) const mat2x2 pixel_to_tex_uv, const float frame)
{ {
// Use a diagonally symmetric 16-superqueens pattern where no 3 points are // Use a diagonally symmetric 16-superqueens pattern where no 3 points are
// exactly collinear. // exactly collinear.
@ -942,36 +941,36 @@ vec3 tex2Daa16x(const sampler2D texture, const vec2 tex_uv,
const vec3 w_sum = half_sum + half_sum.bgr; const vec3 w_sum = half_sum + half_sum.bgr;
const vec3 w_sum_inv = vec3(1.0)/(w_sum); const vec3 w_sum_inv = vec3(1.0)/(w_sum);
// Scale the pixel-space to texture offset matrix by the pixel diameter. // Scale the pixel-space to texture offset matrix by the pixel diameter.
const vec2x2 true_pixel_to_tex_uv = const mat2x2 true_pixel_to_tex_uv =
vec2x2(vec4(pixel_to_tex_uv * aa_pixel_diameter)); mat2x2(vec4(pixel_to_tex_uv * aa_pixel_diameter));
// Get uv sample offsets, mirror on odd frames if directed, and exploit // Get uv sample offsets, mirror on odd frames if directed, and exploit
// diagonal symmetry: // diagonal symmetry:
const vec2 frame_sign = get_frame_sign(frame); const vec2 frame_sign = get_frame_sign(frame);
const vec2 uv_offset0 = mul(true_pixel_to_tex_uv, xy_offset0 * frame_sign); const vec2 uv_offset0 = (xy_offset0 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset1 = mul(true_pixel_to_tex_uv, xy_offset1 * frame_sign); const vec2 uv_offset1 = (xy_offset1 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset2 = mul(true_pixel_to_tex_uv, xy_offset2 * frame_sign); const vec2 uv_offset2 = (xy_offset2 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset3 = mul(true_pixel_to_tex_uv, xy_offset3 * frame_sign); const vec2 uv_offset3 = (xy_offset3 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset4 = mul(true_pixel_to_tex_uv, xy_offset4 * frame_sign); const vec2 uv_offset4 = (xy_offset4 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset5 = mul(true_pixel_to_tex_uv, xy_offset5 * frame_sign); const vec2 uv_offset5 = (xy_offset5 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset6 = mul(true_pixel_to_tex_uv, xy_offset6 * frame_sign); const vec2 uv_offset6 = (xy_offset6 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset7 = mul(true_pixel_to_tex_uv, xy_offset7 * frame_sign); const vec2 uv_offset7 = (xy_offset7 * frame_sign * true_pixel_to_tex_uv);
// Load samples, linearizing if necessary, etc.: // Load samples, linearizing if necessary, etc.:
const vec3 sample0 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset0).rgb; const vec3 sample0 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset0).rgb;
const vec3 sample1 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset1).rgb; const vec3 sample1 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset1).rgb;
const vec3 sample2 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset2).rgb; const vec3 sample2 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset2).rgb;
const vec3 sample3 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset3).rgb; const vec3 sample3 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset3).rgb;
const vec3 sample4 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset4).rgb; const vec3 sample4 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset4).rgb;
const vec3 sample5 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset5).rgb; const vec3 sample5 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset5).rgb;
const vec3 sample6 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset6).rgb; const vec3 sample6 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset6).rgb;
const vec3 sample7 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset7).rgb; const vec3 sample7 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset7).rgb;
const vec3 sample8 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset7).rgb; const vec3 sample8 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset7).rgb;
const vec3 sample9 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset6).rgb; const vec3 sample9 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset6).rgb;
const vec3 sample10 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset5).rgb; const vec3 sample10 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset5).rgb;
const vec3 sample11 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset4).rgb; const vec3 sample11 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset4).rgb;
const vec3 sample12 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset3).rgb; const vec3 sample12 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset3).rgb;
const vec3 sample13 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset2).rgb; const vec3 sample13 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset2).rgb;
const vec3 sample14 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset1).rgb; const vec3 sample14 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset1).rgb;
const vec3 sample15 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset0).rgb; const vec3 sample15 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset0).rgb;
// Sum weighted samples (weight sum must equal 1.0 for each channel): // Sum weighted samples (weight sum must equal 1.0 for each channel):
return w_sum_inv * ( return w_sum_inv * (
w0 * sample0 + w1 * sample1 + w2 * sample2 + w3 * sample3 + w0 * sample0 + w1 * sample1 + w2 * sample2 + w3 * sample3 +
@ -980,8 +979,8 @@ vec3 tex2Daa16x(const sampler2D texture, const vec2 tex_uv,
w12 * sample12 + w13 * sample13 + w14 * sample14 + w15 * sample15); w12 * sample12 + w13 * sample13 + w14 * sample14 + w15 * sample15);
} }
vec3 tex2Daa20x(const sampler2D texture, const vec2 tex_uv, vec3 tex2Daa20x(const sampler2D tex, const vec2 tex_uv,
const vec2x2 pixel_to_tex_uv, const float frame) const mat2x2 pixel_to_tex_uv, const float frame)
{ {
// Use a diagonally symmetric 20-superqueens pattern where no 3 points are // Use a diagonally symmetric 20-superqueens pattern where no 3 points are
// exactly collinear and superqueens have a squared attack radius of 13. // exactly collinear and superqueens have a squared attack radius of 13.
@ -1049,42 +1048,42 @@ vec3 tex2Daa20x(const sampler2D texture, const vec2 tex_uv,
const vec3 w_sum = half_sum + half_sum.bgr; const vec3 w_sum = half_sum + half_sum.bgr;
const vec3 w_sum_inv = vec3(1.0)/(w_sum); const vec3 w_sum_inv = vec3(1.0)/(w_sum);
// Scale the pixel-space to texture offset matrix by the pixel diameter. // Scale the pixel-space to texture offset matrix by the pixel diameter.
const vec2x2 true_pixel_to_tex_uv = const mat2x2 true_pixel_to_tex_uv =
vec2x2(vec4(pixel_to_tex_uv * aa_pixel_diameter)); mat2x2(vec4(pixel_to_tex_uv * aa_pixel_diameter));
// Get uv sample offsets, mirror on odd frames if directed, and exploit // Get uv sample offsets, mirror on odd frames if directed, and exploit
// diagonal symmetry: // diagonal symmetry:
const vec2 frame_sign = get_frame_sign(frame); const vec2 frame_sign = get_frame_sign(frame);
const vec2 uv_offset0 = mul(true_pixel_to_tex_uv, xy_offset0 * frame_sign); const vec2 uv_offset0 = (xy_offset0 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset1 = mul(true_pixel_to_tex_uv, xy_offset1 * frame_sign); const vec2 uv_offset1 = (xy_offset1 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset2 = mul(true_pixel_to_tex_uv, xy_offset2 * frame_sign); const vec2 uv_offset2 = (xy_offset2 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset3 = mul(true_pixel_to_tex_uv, xy_offset3 * frame_sign); const vec2 uv_offset3 = (xy_offset3 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset4 = mul(true_pixel_to_tex_uv, xy_offset4 * frame_sign); const vec2 uv_offset4 = (xy_offset4 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset5 = mul(true_pixel_to_tex_uv, xy_offset5 * frame_sign); const vec2 uv_offset5 = (xy_offset5 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset6 = mul(true_pixel_to_tex_uv, xy_offset6 * frame_sign); const vec2 uv_offset6 = (xy_offset6 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset7 = mul(true_pixel_to_tex_uv, xy_offset7 * frame_sign); const vec2 uv_offset7 = (xy_offset7 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset8 = mul(true_pixel_to_tex_uv, xy_offset8 * frame_sign); const vec2 uv_offset8 = (xy_offset8 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset9 = mul(true_pixel_to_tex_uv, xy_offset9 * frame_sign); const vec2 uv_offset9 = (xy_offset9 * frame_sign * true_pixel_to_tex_uv);
// Load samples, linearizing if necessary, etc.: // Load samples, linearizing if necessary, etc.:
const vec3 sample0 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset0).rgb; const vec3 sample0 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset0).rgb;
const vec3 sample1 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset1).rgb; const vec3 sample1 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset1).rgb;
const vec3 sample2 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset2).rgb; const vec3 sample2 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset2).rgb;
const vec3 sample3 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset3).rgb; const vec3 sample3 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset3).rgb;
const vec3 sample4 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset4).rgb; const vec3 sample4 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset4).rgb;
const vec3 sample5 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset5).rgb; const vec3 sample5 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset5).rgb;
const vec3 sample6 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset6).rgb; const vec3 sample6 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset6).rgb;
const vec3 sample7 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset7).rgb; const vec3 sample7 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset7).rgb;
const vec3 sample8 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset8).rgb; const vec3 sample8 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset8).rgb;
const vec3 sample9 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset9).rgb; const vec3 sample9 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset9).rgb;
const vec3 sample10 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset9).rgb; const vec3 sample10 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset9).rgb;
const vec3 sample11 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset8).rgb; const vec3 sample11 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset8).rgb;
const vec3 sample12 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset7).rgb; const vec3 sample12 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset7).rgb;
const vec3 sample13 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset6).rgb; const vec3 sample13 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset6).rgb;
const vec3 sample14 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset5).rgb; const vec3 sample14 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset5).rgb;
const vec3 sample15 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset4).rgb; const vec3 sample15 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset4).rgb;
const vec3 sample16 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset3).rgb; const vec3 sample16 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset3).rgb;
const vec3 sample17 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset2).rgb; const vec3 sample17 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset2).rgb;
const vec3 sample18 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset1).rgb; const vec3 sample18 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset1).rgb;
const vec3 sample19 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset0).rgb; const vec3 sample19 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset0).rgb;
// Sum weighted samples (weight sum must equal 1.0 for each channel): // Sum weighted samples (weight sum must equal 1.0 for each channel):
return w_sum_inv * ( return w_sum_inv * (
w0 * sample0 + w1 * sample1 + w2 * sample2 + w3 * sample3 + w0 * sample0 + w1 * sample1 + w2 * sample2 + w3 * sample3 +
@ -1094,8 +1093,8 @@ vec3 tex2Daa20x(const sampler2D texture, const vec2 tex_uv,
w16 * sample16 + w17 * sample17 + w18 * sample18 + w19 * sample19); w16 * sample16 + w17 * sample17 + w18 * sample18 + w19 * sample19);
} }
vec3 tex2Daa24x(const sampler2D texture, const vec2 tex_uv, vec3 tex2Daa24x(const sampler2D tex, const vec2 tex_uv,
const vec2x2 pixel_to_tex_uv, const float frame) const mat2x2 pixel_to_tex_uv, const float frame)
{ {
// Use a diagonally symmetric 24-superqueens pattern where no 3 points are // Use a diagonally symmetric 24-superqueens pattern where no 3 points are
// exactly collinear and superqueens have a squared attack radius of 13. // exactly collinear and superqueens have a squared attack radius of 13.
@ -1174,48 +1173,48 @@ vec3 tex2Daa24x(const sampler2D texture, const vec2 tex_uv,
const vec3 w_sum = half_sum + half_sum.bgr; const vec3 w_sum = half_sum + half_sum.bgr;
const vec3 w_sum_inv = vec3(1.0)/(w_sum); const vec3 w_sum_inv = vec3(1.0)/(w_sum);
// Scale the pixel-space to texture offset matrix by the pixel diameter. // Scale the pixel-space to texture offset matrix by the pixel diameter.
const vec2x2 true_pixel_to_tex_uv = const mat2x2 true_pixel_to_tex_uv =
vec2x2(vec4(pixel_to_tex_uv * aa_pixel_diameter)); mat2x2(vec4(pixel_to_tex_uv * aa_pixel_diameter));
// Get uv sample offsets, mirror on odd frames if directed, and exploit // Get uv sample offsets, mirror on odd frames if directed, and exploit
// diagonal symmetry: // diagonal symmetry:
const vec2 frame_sign = get_frame_sign(frame); const vec2 frame_sign = get_frame_sign(frame);
const vec2 uv_offset0 = mul(true_pixel_to_tex_uv, xy_offset0 * frame_sign); const vec2 uv_offset0 = (xy_offset0 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset1 = mul(true_pixel_to_tex_uv, xy_offset1 * frame_sign); const vec2 uv_offset1 = (xy_offset1 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset2 = mul(true_pixel_to_tex_uv, xy_offset2 * frame_sign); const vec2 uv_offset2 = (xy_offset2 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset3 = mul(true_pixel_to_tex_uv, xy_offset3 * frame_sign); const vec2 uv_offset3 = (xy_offset3 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset4 = mul(true_pixel_to_tex_uv, xy_offset4 * frame_sign); const vec2 uv_offset4 = (xy_offset4 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset5 = mul(true_pixel_to_tex_uv, xy_offset5 * frame_sign); const vec2 uv_offset5 = (xy_offset5 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset6 = mul(true_pixel_to_tex_uv, xy_offset6 * frame_sign); const vec2 uv_offset6 = (xy_offset6 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset7 = mul(true_pixel_to_tex_uv, xy_offset7 * frame_sign); const vec2 uv_offset7 = (xy_offset7 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset8 = mul(true_pixel_to_tex_uv, xy_offset8 * frame_sign); const vec2 uv_offset8 = (xy_offset8 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset9 = mul(true_pixel_to_tex_uv, xy_offset9 * frame_sign); const vec2 uv_offset9 = (xy_offset9 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset10 = mul(true_pixel_to_tex_uv, xy_offset10 * frame_sign); const vec2 uv_offset10 = (xy_offset10 * frame_sign * true_pixel_to_tex_uv);
const vec2 uv_offset11 = mul(true_pixel_to_tex_uv, xy_offset11 * frame_sign); const vec2 uv_offset11 = (xy_offset11 * frame_sign * true_pixel_to_tex_uv);
// Load samples, linearizing if necessary, etc.: // Load samples, linearizing if necessary, etc.:
const vec3 sample0 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset0).rgb; const vec3 sample0 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset0).rgb;
const vec3 sample1 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset1).rgb; const vec3 sample1 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset1).rgb;
const vec3 sample2 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset2).rgb; const vec3 sample2 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset2).rgb;
const vec3 sample3 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset3).rgb; const vec3 sample3 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset3).rgb;
const vec3 sample4 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset4).rgb; const vec3 sample4 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset4).rgb;
const vec3 sample5 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset5).rgb; const vec3 sample5 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset5).rgb;
const vec3 sample6 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset6).rgb; const vec3 sample6 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset6).rgb;
const vec3 sample7 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset7).rgb; const vec3 sample7 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset7).rgb;
const vec3 sample8 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset8).rgb; const vec3 sample8 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset8).rgb;
const vec3 sample9 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset9).rgb; const vec3 sample9 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset9).rgb;
const vec3 sample10 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset10).rgb; const vec3 sample10 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset10).rgb;
const vec3 sample11 = tex2Daa_tiled_linearize(texture, tex_uv + uv_offset11).rgb; const vec3 sample11 = tex2Daa_tiled_linearize(tex, tex_uv + uv_offset11).rgb;
const vec3 sample12 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset11).rgb; const vec3 sample12 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset11).rgb;
const vec3 sample13 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset10).rgb; const vec3 sample13 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset10).rgb;
const vec3 sample14 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset9).rgb; const vec3 sample14 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset9).rgb;
const vec3 sample15 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset8).rgb; const vec3 sample15 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset8).rgb;
const vec3 sample16 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset7).rgb; const vec3 sample16 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset7).rgb;
const vec3 sample17 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset6).rgb; const vec3 sample17 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset6).rgb;
const vec3 sample18 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset5).rgb; const vec3 sample18 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset5).rgb;
const vec3 sample19 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset4).rgb; const vec3 sample19 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset4).rgb;
const vec3 sample20 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset3).rgb; const vec3 sample20 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset3).rgb;
const vec3 sample21 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset2).rgb; const vec3 sample21 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset2).rgb;
const vec3 sample22 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset1).rgb; const vec3 sample22 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset1).rgb;
const vec3 sample23 = tex2Daa_tiled_linearize(texture, tex_uv - uv_offset0).rgb; const vec3 sample23 = tex2Daa_tiled_linearize(tex, tex_uv - uv_offset0).rgb;
// Sum weighted samples (weight sum must equal 1.0 for each channel): // Sum weighted samples (weight sum must equal 1.0 for each channel):
return w_sum_inv * ( return w_sum_inv * (
w0 * sample0 + w1 * sample1 + w2 * sample2 + w3 * sample3 + w0 * sample0 + w1 * sample1 + w2 * sample2 + w3 * sample3 +
@ -1226,8 +1225,8 @@ vec3 tex2Daa24x(const sampler2D texture, const vec2 tex_uv,
w20 * sample20 + w21 * sample21 + w22 * sample22 + w23 * sample23); w20 * sample20 + w21 * sample21 + w22 * sample22 + w23 * sample23);
} }
vec3 tex2Daa_debug_16x_regular(const sampler2D texture, const vec2 tex_uv, vec3 tex2Daa_debug_16x_regular(const sampler2D tex, const vec2 tex_uv,
const vec2x2 pixel_to_tex_uv, const float frame) const mat2x2 pixel_to_tex_uv, const float frame)
{ {
// Sample on a regular 4x4 grid. This is mainly for testing. // Sample on a regular 4x4 grid. This is mainly for testing.
const float grid_size = 4.0; const float grid_size = 4.0;
@ -1271,33 +1270,33 @@ vec3 tex2Daa_debug_16x_regular(const sampler2D texture, const vec2 tex_uv,
const vec3 w_sum = half_sum + half_sum.bgr; const vec3 w_sum = half_sum + half_sum.bgr;
const vec3 w_sum_inv = vec3(1.0)/(w_sum); const vec3 w_sum_inv = vec3(1.0)/(w_sum);
// Scale the pixel-space to texture offset matrix by the pixel diameter. // Scale the pixel-space to texture offset matrix by the pixel diameter.
const vec2x2 true_pixel_to_tex_uv = const mat2x2 true_pixel_to_tex_uv =
vec2x2(vec4(pixel_to_tex_uv * aa_pixel_diameter)); mat2x2(vec4(pixel_to_tex_uv * aa_pixel_diameter));
// Get uv sample offsets, taking advantage of row alignment: // Get uv sample offsets, taking advantage of row alignment:
const vec2 uv_step_x = mul(true_pixel_to_tex_uv, vec2(xy_step.x, 0.0)); const vec2 uv_step_x = (vec2(xy_step.x, 0.0) * true_pixel_to_tex_uv);
const vec2 uv_step_y = mul(true_pixel_to_tex_uv, vec2(0.0, xy_step.y)); const vec2 uv_step_y = (vec2(0.0, xy_step.y) * true_pixel_to_tex_uv);
const vec2 uv_offset0 = -1.5 * (uv_step_x + uv_step_y); const vec2 uv_offset0 = -1.5 * (uv_step_x + uv_step_y);
const vec2 sample0_uv = tex_uv + uv_offset0; const vec2 sample0_uv = tex_uv + uv_offset0;
const vec2 sample4_uv = sample0_uv + uv_step_y; const vec2 sample4_uv = sample0_uv + uv_step_y;
const vec2 sample8_uv = sample0_uv + uv_step_y * 2.0; const vec2 sample8_uv = sample0_uv + uv_step_y * 2.0;
const vec2 sample12_uv = sample0_uv + uv_step_y * 3.0; const vec2 sample12_uv = sample0_uv + uv_step_y * 3.0;
// Load samples, linearizing if necessary, etc.: // Load samples, linearizing if necessary, etc.:
const vec3 sample0 = tex2Daa_tiled_linearize(texture, sample0_uv).rgb; const vec3 sample0 = tex2Daa_tiled_linearize(tex, sample0_uv).rgb;
const vec3 sample1 = tex2Daa_tiled_linearize(texture, sample0_uv + uv_step_x).rgb; const vec3 sample1 = tex2Daa_tiled_linearize(tex, sample0_uv + uv_step_x).rgb;
const vec3 sample2 = tex2Daa_tiled_linearize(texture, sample0_uv + uv_step_x * 2.0).rgb; const vec3 sample2 = tex2Daa_tiled_linearize(tex, sample0_uv + uv_step_x * 2.0).rgb;
const vec3 sample3 = tex2Daa_tiled_linearize(texture, sample0_uv + uv_step_x * 3.0).rgb; const vec3 sample3 = tex2Daa_tiled_linearize(tex, sample0_uv + uv_step_x * 3.0).rgb;
const vec3 sample4 = tex2Daa_tiled_linearize(texture, sample4_uv).rgb; const vec3 sample4 = tex2Daa_tiled_linearize(tex, sample4_uv).rgb;
const vec3 sample5 = tex2Daa_tiled_linearize(texture, sample4_uv + uv_step_x).rgb; const vec3 sample5 = tex2Daa_tiled_linearize(tex, sample4_uv + uv_step_x).rgb;
const vec3 sample6 = tex2Daa_tiled_linearize(texture, sample4_uv + uv_step_x * 2.0).rgb; const vec3 sample6 = tex2Daa_tiled_linearize(tex, sample4_uv + uv_step_x * 2.0).rgb;
const vec3 sample7 = tex2Daa_tiled_linearize(texture, sample4_uv + uv_step_x * 3.0).rgb; const vec3 sample7 = tex2Daa_tiled_linearize(tex, sample4_uv + uv_step_x * 3.0).rgb;
const vec3 sample8 = tex2Daa_tiled_linearize(texture, sample8_uv).rgb; const vec3 sample8 = tex2Daa_tiled_linearize(tex, sample8_uv).rgb;
const vec3 sample9 = tex2Daa_tiled_linearize(texture, sample8_uv + uv_step_x).rgb; const vec3 sample9 = tex2Daa_tiled_linearize(tex, sample8_uv + uv_step_x).rgb;
const vec3 sample10 = tex2Daa_tiled_linearize(texture, sample8_uv + uv_step_x * 2.0).rgb; const vec3 sample10 = tex2Daa_tiled_linearize(tex, sample8_uv + uv_step_x * 2.0).rgb;
const vec3 sample11 = tex2Daa_tiled_linearize(texture, sample8_uv + uv_step_x * 3.0).rgb; const vec3 sample11 = tex2Daa_tiled_linearize(tex, sample8_uv + uv_step_x * 3.0).rgb;
const vec3 sample12 = tex2Daa_tiled_linearize(texture, sample12_uv).rgb; const vec3 sample12 = tex2Daa_tiled_linearize(tex, sample12_uv).rgb;
const vec3 sample13 = tex2Daa_tiled_linearize(texture, sample12_uv + uv_step_x).rgb; const vec3 sample13 = tex2Daa_tiled_linearize(tex, sample12_uv + uv_step_x).rgb;
const vec3 sample14 = tex2Daa_tiled_linearize(texture, sample12_uv + uv_step_x * 2.0).rgb; const vec3 sample14 = tex2Daa_tiled_linearize(tex, sample12_uv + uv_step_x * 2.0).rgb;
const vec3 sample15 = tex2Daa_tiled_linearize(texture, sample12_uv + uv_step_x * 3.0).rgb; const vec3 sample15 = tex2Daa_tiled_linearize(tex, sample12_uv + uv_step_x * 3.0).rgb;
// Sum weighted samples (weight sum must equal 1.0 for each channel): // Sum weighted samples (weight sum must equal 1.0 for each channel):
return w_sum_inv * ( return w_sum_inv * (
w0 * sample0 + w1 * sample1 + w2 * sample2 + w3 * sample3 + w0 * sample0 + w1 * sample1 + w2 * sample2 + w3 * sample3 +
@ -1306,8 +1305,8 @@ vec3 tex2Daa_debug_16x_regular(const sampler2D texture, const vec2 tex_uv,
w12 * sample12 + w13 * sample13 + w14 * sample14 + w15 * sample15); w12 * sample12 + w13 * sample13 + w14 * sample14 + w15 * sample15);
} }
vec3 tex2Daa_debug_dynamic(const sampler2D texture, const vec2 tex_uv, vec3 tex2Daa_debug_dynamic(const sampler2D tex, const vec2 tex_uv,
const vec2x2 pixel_to_tex_uv, const float frame) const mat2x2 pixel_to_tex_uv, const float frame)
{ {
// This function is for testing only: Use an NxN grid with dynamic weights. // This function is for testing only: Use an NxN grid with dynamic weights.
const int grid_size = 8; const int grid_size = 8;
@ -1322,7 +1321,7 @@ vec3 tex2Daa_debug_dynamic(const sampler2D texture, const vec2 tex_uv,
-grid_radius_in_samples * filter_space_offset_step; -grid_radius_in_samples * filter_space_offset_step;
// Compute xy sample offsets and subpixel weights: // Compute xy sample offsets and subpixel weights:
vec3 weights[grid_size * grid_size]; vec3 weights[grid_size * grid_size];
vec3 weight_sum = 0.0; vec3 weight_sum = vec3(0.0);
for(int i = 0; i < grid_size; ++i) for(int i = 0; i < grid_size; ++i)
{ {
for(int j = 0; j < grid_size; ++j) for(int j = 0; j < grid_size; ++j)
@ -1336,19 +1335,17 @@ vec3 tex2Daa_debug_dynamic(const sampler2D texture, const vec2 tex_uv,
} }
} }
// Get uv offset vectors along x and y directions: // Get uv offset vectors along x and y directions:
const vec2x2 true_pixel_to_tex_uv = const mat2x2 true_pixel_to_tex_uv =
vec2x2(vec4(pixel_to_tex_uv * aa_pixel_diameter)); mat2x2(vec4(pixel_to_tex_uv * aa_pixel_diameter));
const vec2 uv_offset_step_x = mul(true_pixel_to_tex_uv, const vec2 uv_offset_step_x = (vec2(filter_space_offset_step.x, 0.0) * true_pixel_to_tex_uv);
vec2(filter_space_offset_step.x, 0.0)); const vec2 uv_offset_step_y = (vec2(0.0, filter_space_offset_step.y) * true_pixel_to_tex_uv);
const vec2 uv_offset_step_y = mul(true_pixel_to_tex_uv,
vec2(0.0, filter_space_offset_step.y));
// Get a starting sample location: // Get a starting sample location:
const vec2 sample0_uv_offset = -grid_radius_in_samples * const vec2 sample0_uv_offset = -grid_radius_in_samples *
(uv_offset_step_x + uv_offset_step_y); (uv_offset_step_x + uv_offset_step_y);
const vec2 sample0_uv = tex_uv + sample0_uv_offset; const vec2 sample0_uv = tex_uv + sample0_uv_offset;
// Load, weight, and sum [linearized] samples: // Load, weight, and sum [linearized] samples:
vec3 sum = 0.0; vec3 sum = vec3(0.0);
const vec3 weight_sum_inv = vec3(1.0)/weight_sum; const vec3 weight_sum_inv = vec3(1.0)/vec3(weight_sum);
for(int i = 0; i < grid_size; ++i) for(int i = 0; i < grid_size; ++i)
{ {
const vec2 row_i_first_sample_uv = const vec2 row_i_first_sample_uv =
@ -1358,36 +1355,32 @@ vec3 tex2Daa_debug_dynamic(const sampler2D texture, const vec2 tex_uv,
const vec2 sample_uv = const vec2 sample_uv =
row_i_first_sample_uv + j * uv_offset_step_x; row_i_first_sample_uv + j * uv_offset_step_x;
sum += weights[i*grid_size + j] * sum += weights[i*grid_size + j] *
tex2Daa_tiled_linearize(texture, sample_uv).rgb; tex2Daa_tiled_linearize(tex, sample_uv).rgb;
} }
} }
return sum * weight_sum_inv; return sum * weight_sum_inv;
} }
/////////////////////// ANTIALIASING CODEPATH SELECTION ////////////////////// /////////////////////// ANTIALIASING CODEPATH SELECTION //////////////////////
vec3 tex2Daa(const sampler2D texture, const vec2 tex_uv, vec3 tex2Daa(const sampler2D tex, const vec2 tex_uv,
const vec2x2 pixel_to_tex_uv, const float frame) const mat2x2 pixel_to_tex_uv, const float frame)
{ {
// Statically switch between antialiasing modes/levels: // Statically switch between antialiasing modes/levels:
return aa_level < 0.5 ? tex2D_linearize(texture, tex_uv).rgb : if (aa_level < 0.5) return tex2D_linearize(tex, tex_uv).rgb;
aa_level < 3.5 ? tex2Daa_subpixel_weights_only( else if (aa_level < 3.5) return tex2Daa_subpixel_weights_only(
texture, tex_uv, pixel_to_tex_uv) : tex, tex_uv, pixel_to_tex_uv);
aa_level < 4.5 ? tex2Daa4x(texture, tex_uv, pixel_to_tex_uv, frame) : else if (aa_level < 4.5) return tex2Daa4x(tex, tex_uv, pixel_to_tex_uv, frame);
aa_level < 5.5 ? tex2Daa5x(texture, tex_uv, pixel_to_tex_uv, frame) : else if (aa_level < 5.5) return tex2Daa5x(tex, tex_uv, pixel_to_tex_uv, frame);
aa_level < 6.5 ? tex2Daa6x(texture, tex_uv, pixel_to_tex_uv, frame) : else if (aa_level < 6.5) return tex2Daa6x(tex, tex_uv, pixel_to_tex_uv, frame);
aa_level < 7.5 ? tex2Daa7x(texture, tex_uv, pixel_to_tex_uv, frame) : else if (aa_level < 7.5) return tex2Daa7x(tex, tex_uv, pixel_to_tex_uv, frame);
aa_level < 11.5 ? tex2Daa8x(texture, tex_uv, pixel_to_tex_uv, frame) : else if (aa_level < 11.5) return tex2Daa8x(tex, tex_uv, pixel_to_tex_uv, frame);
aa_level < 15.5 ? tex2Daa12x(texture, tex_uv, pixel_to_tex_uv, frame) : else if (aa_level < 15.5) return tex2Daa12x(tex, tex_uv, pixel_to_tex_uv, frame);
aa_level < 19.5 ? tex2Daa16x(texture, tex_uv, pixel_to_tex_uv, frame) : else if (aa_level < 19.5) return tex2Daa16x(tex, tex_uv, pixel_to_tex_uv, frame);
aa_level < 23.5 ? tex2Daa20x(texture, tex_uv, pixel_to_tex_uv, frame) : else if (aa_level < 23.5) return tex2Daa20x(tex, tex_uv, pixel_to_tex_uv, frame);
aa_level < 253.5 ? tex2Daa24x(texture, tex_uv, pixel_to_tex_uv, frame) : else if (aa_level < 253.5) return tex2Daa24x(tex, tex_uv, pixel_to_tex_uv, frame);
aa_level < 254.5 ? tex2Daa_debug_16x_regular( else if (aa_level < 254.5) return tex2Daa_debug_16x_regular(tex, tex_uv, pixel_to_tex_uv, frame);
texture, tex_uv, pixel_to_tex_uv, frame) : else return tex2Daa_debug_dynamic(tex, tex_uv, pixel_to_tex_uv, frame);
tex2Daa_debug_dynamic(texture, tex_uv, pixel_to_tex_uv, frame);
} }
#endif // TEX2DANTIALIAS_H #endif // TEX2DANTIALIAS_H