mirror of
https://github.com/italicsjenga/slang-shaders.git
synced 2024-11-22 15:51:30 +11:00
more work on royale, long way to go
This commit is contained in:
parent
382bd43f71
commit
13b985aaf9
214
crt/shaders/crt-royale/src/bloom-functions.h
Normal file
214
crt/shaders/crt-royale/src/bloom-functions.h
Normal file
|
@ -0,0 +1,214 @@
|
|||
#define BLOOM_FUNCTIONS
|
||||
|
||||
/////////////////////////////// BLOOM CONSTANTS //////////////////////////////
|
||||
|
||||
// Compute constants with manual inlines of the functions below:
|
||||
const float bloom_diff_thresh = 1.0/256.0;
|
||||
|
||||
// Assume an extremely large viewport size for asymptotic results:
|
||||
const float max_viewport_size_x = 1080.0*1024.0*(4.0/3.0);
|
||||
|
||||
/////////////////////////////////// HELPERS //////////////////////////////////
|
||||
|
||||
float get_min_sigma_to_blur_triad(const float triad_size,
|
||||
const float thresh)
|
||||
{
|
||||
// Requires: 1.) triad_size is the final phosphor triad size in pixels
|
||||
// 2.) thresh is the max desired pixel difference in the
|
||||
// blurred triad (e.g. 1.0/256.0).
|
||||
// Returns: Return the minimum sigma that will fully blur a phosphor
|
||||
// triad on the screen to an even color, within thresh.
|
||||
// This closed-form function was found by curve-fitting data.
|
||||
// Estimate: max error = ~0.086036, mean sq. error = ~0.0013387:
|
||||
return -0.05168 + 0.6113*triad_size -
|
||||
1.122*triad_size*sqrt(0.000416 + thresh);
|
||||
// Estimate: max error = ~0.16486, mean sq. error = ~0.0041041:
|
||||
//return 0.5985*triad_size - triad_size*sqrt(thresh)
|
||||
}
|
||||
|
||||
float get_absolute_scale_blur_sigma(const float thresh)
|
||||
{
|
||||
// Requires: 1.) min_expected_triads must be a global float. The number
|
||||
// of horizontal phosphor triads in the final image must be
|
||||
// >= min_allowed_viewport_triads.x for realistic results.
|
||||
// 2.) bloom_approx_scale_x must be a global float equal to the
|
||||
// absolute horizontal scale of BLOOM_APPROX.
|
||||
// 3.) bloom_approx_scale_x/min_allowed_viewport_triads.x
|
||||
// should be <= 1.1658025090 to keep the final result <
|
||||
// 0.62666015625 (the largest sigma ensuring the largest
|
||||
// unused texel weight stays < 1.0/256.0 for a 3x3 blur).
|
||||
// 4.) thresh is the max desired pixel difference in the
|
||||
// blurred triad (e.g. 1.0/256.0).
|
||||
// Returns: Return the minimum Gaussian sigma that will blur the pass
|
||||
// output as much as it would have taken to blur away
|
||||
// bloom_approx_scale_x horizontal phosphor triads.
|
||||
// Description:
|
||||
// BLOOM_APPROX should look like a downscaled phosphor blur. Ideally, we'd
|
||||
// use the same blur sigma as the actual phosphor bloom and scale it down
|
||||
// to the current resolution with (bloom_approx_scale_x/viewport_size_x), but
|
||||
// we don't know the viewport size in this pass. Instead, we'll blur as
|
||||
// much as it would take to blur away min_allowed_viewport_triads.x. This
|
||||
// will blur "more than necessary" if the user actually uses more triads,
|
||||
// but that's not terrible either, because blurring a constant fraction of
|
||||
// the viewport may better resemble a true optical bloom anyway (since the
|
||||
// viewport will generally be about the same fraction of each player's
|
||||
// field of view, regardless of screen size and resolution).
|
||||
// Assume an extremely large viewport size for asymptotic results.
|
||||
return bloom_approx_scale_x/max_viewport_size_x *
|
||||
get_min_sigma_to_blur_triad(
|
||||
max_viewport_size_x/min_allowed_viewport_triads.x, thresh);
|
||||
}
|
||||
|
||||
float get_center_weight(const float sigma)
|
||||
{
|
||||
// Given a Gaussian blur sigma, get the blur weight for the center texel.
|
||||
#ifdef RUNTIME_PHOSPHOR_BLOOM_SIGMA
|
||||
return get_fast_gaussian_weight_sum_inv(sigma);
|
||||
#else
|
||||
const float denom_inv = 0.5/(sigma*sigma);
|
||||
const float w0 = 1.0;
|
||||
const float w1 = exp(-1.0 * denom_inv);
|
||||
const float w2 = exp(-4.0 * denom_inv);
|
||||
const float w3 = exp(-9.0 * denom_inv);
|
||||
const float w4 = exp(-16.0 * denom_inv);
|
||||
const float w5 = exp(-25.0 * denom_inv);
|
||||
const float w6 = exp(-36.0 * denom_inv);
|
||||
const float w7 = exp(-49.0 * denom_inv);
|
||||
const float w8 = exp(-64.0 * denom_inv);
|
||||
const float w9 = exp(-81.0 * denom_inv);
|
||||
const float w10 = exp(-100.0 * denom_inv);
|
||||
const float w11 = exp(-121.0 * denom_inv);
|
||||
const float w12 = exp(-144.0 * denom_inv);
|
||||
const float w13 = exp(-169.0 * denom_inv);
|
||||
const float w14 = exp(-196.0 * denom_inv);
|
||||
const float w15 = exp(-225.0 * denom_inv);
|
||||
const float w16 = exp(-256.0 * denom_inv);
|
||||
const float w17 = exp(-289.0 * denom_inv);
|
||||
const float w18 = exp(-324.0 * denom_inv);
|
||||
const float w19 = exp(-361.0 * denom_inv);
|
||||
const float w20 = exp(-400.0 * denom_inv);
|
||||
const float w21 = exp(-441.0 * denom_inv);
|
||||
// Note: If the implementation uses a smaller blur than the max allowed,
|
||||
// the worst case scenario is that the center weight will be overestimated,
|
||||
// so we'll put a bit more energy into the brightpass...no huge deal.
|
||||
// Then again, if the implementation uses a larger blur than the max
|
||||
// "allowed" because of dynamic branching, the center weight could be
|
||||
// underestimated, which is more of a problem...consider always using
|
||||
#ifdef PHOSPHOR_BLOOM_TRIADS_LARGER_THAN_12_PIXELS
|
||||
// 43x blur:
|
||||
const float weight_sum_inv = 1.0 /
|
||||
(w0 + 2.0 * (w1 + w2 + w3 + w4 + w5 + w6 + w7 + w8 + w9 + w10 +
|
||||
w11 + w12 + w13 + w14 + w15 + w16 + w17 + w18 + w19 + w20 + w21));
|
||||
#else
|
||||
#ifdef PHOSPHOR_BLOOM_TRIADS_LARGER_THAN_9_PIXELS
|
||||
// 31x blur:
|
||||
const float weight_sum_inv = 1.0 /
|
||||
(w0 + 2.0 * (w1 + w2 + w3 + w4 + w5 + w6 + w7 +
|
||||
w8 + w9 + w10 + w11 + w12 + w13 + w14 + w15));
|
||||
#else
|
||||
#ifdef PHOSPHOR_BLOOM_TRIADS_LARGER_THAN_6_PIXELS
|
||||
// 25x blur:
|
||||
const float weight_sum_inv = 1.0 / (w0 + 2.0 * (
|
||||
w1 + w2 + w3 + w4 + w5 + w6 + w7 + w8 + w9 + w10 + w11 + w12));
|
||||
#else
|
||||
#ifdef PHOSPHOR_BLOOM_TRIADS_LARGER_THAN_3_PIXELS
|
||||
// 17x blur:
|
||||
const float weight_sum_inv = 1.0 / (w0 + 2.0 * (
|
||||
w1 + w2 + w3 + w4 + w5 + w6 + w7 + w8));
|
||||
#else
|
||||
// 9x blur:
|
||||
const float weight_sum_inv = 1.0 / (w0 + 2.0 * (w1 + w2 + w3 + w4));
|
||||
#endif // PHOSPHOR_BLOOM_TRIADS_LARGER_THAN_3_PIXELS
|
||||
#endif // PHOSPHOR_BLOOM_TRIADS_LARGER_THAN_6_PIXELS
|
||||
#endif // PHOSPHOR_BLOOM_TRIADS_LARGER_THAN_9_PIXELS
|
||||
#endif // PHOSPHOR_BLOOM_TRIADS_LARGER_THAN_12_PIXELS
|
||||
const float center_weight = weight_sum_inv * weight_sum_inv;
|
||||
return center_weight;
|
||||
#endif
|
||||
}
|
||||
|
||||
float get_bloom_approx_sigma(const float output_size_x_runtime,
|
||||
const float estimated_viewport_size_x)
|
||||
{
|
||||
// Requires: 1.) output_size_x_runtime == BLOOM_APPROX.output_size.x.
|
||||
// This is included for dynamic codepaths just in case the
|
||||
// following two globals are incorrect:
|
||||
// 2.) bloom_approx_size_x_for_skip should == the same
|
||||
// if PHOSPHOR_BLOOM_FAKE is #defined
|
||||
// 3.) bloom_approx_size_x should == the same otherwise
|
||||
// Returns: For gaussian4x4, return a dynamic small bloom sigma that's
|
||||
// as close to optimal as possible given available information.
|
||||
// For blur3x3, return the a static small bloom sigma that
|
||||
// works well for typical cases. Otherwise, we're using simple
|
||||
// bilinear filtering, so use static calculations.
|
||||
// Assume the default static value. This is a compromise that ensures
|
||||
// typical triads are blurred, even if unusually large ones aren't.
|
||||
const float mask_num_triads_static =
|
||||
max(min_allowed_viewport_triads.x, mask_num_triads_desired_static);
|
||||
const float mask_num_triads_from_size =
|
||||
estimated_viewport_size_x/mask_triad_size_desired;
|
||||
const float mask_num_triads_runtime = max(min_allowed_viewport_triads.x,
|
||||
mix(mask_num_triads_from_size, mask_num_triads_desired,
|
||||
mask_specify_num_triads));
|
||||
// Assume an extremely large viewport size for asymptotic results:
|
||||
const float max_viewport_size_x = 1080.0*1024.0*(4.0/3.0);
|
||||
if(bloom_approx_filter > 1.5) // 4x4 true Gaussian resize
|
||||
{
|
||||
// Use the runtime num triads and output size:
|
||||
const float asymptotic_triad_size =
|
||||
max_viewport_size_x/mask_num_triads_runtime;
|
||||
const float asymptotic_sigma = get_min_sigma_to_blur_triad(
|
||||
asymptotic_triad_size, bloom_diff_thresh);
|
||||
const float bloom_approx_sigma =
|
||||
asymptotic_sigma * output_size_x_runtime/max_viewport_size_x;
|
||||
// The BLOOM_APPROX input has to be ORIG_LINEARIZED to avoid moire, but
|
||||
// account for the Gaussian scanline sigma from the last pass too.
|
||||
// The bloom will be too wide horizontally but tall enough vertically.
|
||||
return length(vec2(bloom_approx_sigma, beam_max_sigma));
|
||||
}
|
||||
else // 3x3 blur resize (the bilinear resize doesn't need a sigma)
|
||||
{
|
||||
// We're either using blur3x3 or bilinear filtering. The biggest
|
||||
// reason to choose blur3x3 is to avoid dynamic weights, so use a
|
||||
// static calculation.
|
||||
#ifdef PHOSPHOR_BLOOM_FAKE
|
||||
const float output_size_x_static =
|
||||
bloom_approx_size_x_for_fake;
|
||||
#else
|
||||
const float output_size_x_static = bloom_approx_size_x;
|
||||
#endif
|
||||
const float asymptotic_triad_size =
|
||||
max_viewport_size_x/mask_num_triads_static;
|
||||
const float asymptotic_sigma = get_min_sigma_to_blur_triad(
|
||||
asymptotic_triad_size, bloom_diff_thresh);
|
||||
const float bloom_approx_sigma =
|
||||
asymptotic_sigma * output_size_x_static/max_viewport_size_x;
|
||||
// The BLOOM_APPROX input has to be ORIG_LINEARIZED to avoid moire, but
|
||||
// try accounting for the Gaussian scanline sigma from the last pass
|
||||
// too; use the static default value:
|
||||
return length(vec2(bloom_approx_sigma, beam_max_sigma_static));
|
||||
}
|
||||
}
|
||||
|
||||
float get_final_bloom_sigma(const float bloom_sigma_runtime)
|
||||
{
|
||||
// Requires: 1.) bloom_sigma_runtime is a precalculated sigma that's
|
||||
// optimal for the [known] triad size.
|
||||
// 2.) Call this from a fragment shader (not a vertex shader),
|
||||
// or blurring with static sigmas won't be constant-folded.
|
||||
// Returns: Return the optimistic static sigma if the triad size is
|
||||
// known at compile time. Otherwise return the optimal runtime
|
||||
// sigma (10% slower) or an implementation-specific compromise
|
||||
// between an optimistic or pessimistic static sigma.
|
||||
// Notes: Call this from the fragment shader, NOT the vertex shader,
|
||||
// so static sigmas can be constant-folded!
|
||||
const float bloom_sigma_optimistic = get_min_sigma_to_blur_triad(
|
||||
mask_triad_size_desired_static, bloom_diff_thresh);
|
||||
#ifdef RUNTIME_PHOSPHOR_BLOOM_SIGMA
|
||||
return bloom_sigma_runtime;
|
||||
#else
|
||||
// Overblurring looks as bad as underblurring, so assume average-size
|
||||
// triads, not worst-case huge triads:
|
||||
return bloom_sigma_optimistic;
|
||||
#endif
|
||||
}
|
1916
crt/shaders/crt-royale/src/blur-functions-old.h
Normal file
1916
crt/shaders/crt-royale/src/blur-functions-old.h
Normal file
File diff suppressed because it is too large
Load diff
281
crt/shaders/crt-royale/src/blur-functions.h
Normal file
281
crt/shaders/crt-royale/src/blur-functions.h
Normal file
|
@ -0,0 +1,281 @@
|
|||
#define BLUR_FUNCTIONS
|
||||
|
||||
///////////////////////////////// MIT LICENSE ////////////////////////////////
|
||||
|
||||
// Copyright (C) 2014 TroggleMonkey
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to
|
||||
// deal in the Software without restriction, including without limitation the
|
||||
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
// sell copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
// IN THE SOFTWARE.
|
||||
|
||||
///////////////////////////////// DESCRIPTION ////////////////////////////////
|
||||
|
||||
// This file provides reusable one-pass and separable (two-pass) blurs.
|
||||
// Requires: All blurs share these requirements (dxdy requirement is split):
|
||||
// 1.) All requirements of gamma-management.h must be satisfied!
|
||||
// 2.) filter_linearN must == "true" in your .cgp preset unless
|
||||
// you're using tex2DblurNresize at 1x scale.
|
||||
// 3.) mipmap_inputN must == "true" in your .cgp preset if
|
||||
// IN.output_size < IN.video_size.
|
||||
// 4.) IN.output_size == IN.video_size / pow(2, M), where M is some
|
||||
// positive integer. tex2Dblur*resize can resize arbitrarily
|
||||
// (and the blur will be done after resizing), but arbitrary
|
||||
// resizes "fail" with other blurs due to the way they mix
|
||||
// static weights with bilinear sample exploitation.
|
||||
// 5.) In general, dxdy should contain the uv pixel spacing:
|
||||
// dxdy = (IN.video_size/IN.output_size)/IN.texture_size
|
||||
// 6.) For separable blurs (tex2DblurNresize and tex2DblurNfast),
|
||||
// zero out the dxdy component in the unblurred dimension:
|
||||
// dxdy = vec2(dxdy.x, 0.0) or vec2(0.0, dxdy.y)
|
||||
// Many blurs share these requirements:
|
||||
// 1.) One-pass blurs require scale_xN == scale_yN or scales > 1.0,
|
||||
// or they will blur more in the lower-scaled dimension.
|
||||
// 2.) One-pass shared sample blurs require ddx(), ddy(), and
|
||||
// tex2Dlod() to be supported by the current Cg profile, and
|
||||
// the drivers must support high-quality derivatives.
|
||||
// 3.) One-pass shared sample blurs require:
|
||||
// tex_uv.w == log2(IN.video_size/IN.output_size).y;
|
||||
// Non-wrapper blurs share this requirement:
|
||||
// 1.) sigma is the intended standard deviation of the blur
|
||||
// Wrapper blurs share this requirement, which is automatically
|
||||
// met (unless OVERRIDE_BLUR_STD_DEVS is #defined; see below):
|
||||
// 1.) blurN_std_dev must be global static const float values
|
||||
// specifying standard deviations for Nx blurs in units
|
||||
// of destination pixels
|
||||
// Optional: 1.) The including file (or an earlier included file) may
|
||||
// optionally #define USE_BINOMIAL_BLUR_STD_DEVS to replace
|
||||
// default standard deviations with those matching a binomial
|
||||
// distribution. (See below for details/properties.)
|
||||
// 2.) The including file (or an earlier included file) may
|
||||
// optionally #define OVERRIDE_BLUR_STD_DEVS and override:
|
||||
// static const float blur3_std_dev
|
||||
// static const float blur4_std_dev
|
||||
// static const float blur5_std_dev
|
||||
// static const float blur6_std_dev
|
||||
// static const float blur7_std_dev
|
||||
// static const float blur8_std_dev
|
||||
// static const float blur9_std_dev
|
||||
// static const float blur10_std_dev
|
||||
// static const float blur11_std_dev
|
||||
// static const float blur12_std_dev
|
||||
// static const float blur17_std_dev
|
||||
// static const float blur25_std_dev
|
||||
// static const float blur31_std_dev
|
||||
// static const float blur43_std_dev
|
||||
// 3.) The including file (or an earlier included file) may
|
||||
// optionally #define OVERRIDE_ERROR_BLURRING and override:
|
||||
// static const float error_blurring
|
||||
// This tuning value helps mitigate weighting errors from one-
|
||||
// pass shared-sample blurs sharing bilinear samples between
|
||||
// fragments. Values closer to 0.0 have "correct" blurriness
|
||||
// but allow more artifacts, and values closer to 1.0 blur away
|
||||
// artifacts by sampling closer to halfway between texels.
|
||||
// UPDATE 6/21/14: The above static constants may now be overridden
|
||||
// by non-static uniform constants. This permits exposing blur
|
||||
// standard deviations as runtime GUI shader parameters. However,
|
||||
// using them keeps weights from being statically computed, and the
|
||||
// speed hit depends on the blur: On my machine, uniforms kill over
|
||||
// 53% of the framerate with tex2Dblur12x12shared, but they only
|
||||
// drop the framerate by about 18% with tex2Dblur11fast.
|
||||
// Quality and Performance Comparisons:
|
||||
// For the purposes of the following discussion, "no sRGB" means
|
||||
// GAMMA_ENCODE_EVERY_FBO is #defined, and "sRGB" means it isn't.
|
||||
// 1.) tex2DblurNfast is always faster than tex2DblurNresize.
|
||||
// 2.) tex2DblurNresize functions are the only ones that can arbitrarily resize
|
||||
// well, because they're the only ones that don't exploit bilinear samples.
|
||||
// This also means they're the only functions which can be truly gamma-
|
||||
// correct without linear (or sRGB FBO) input, but only at 1x scale.
|
||||
// 3.) One-pass shared sample blurs only have a speed advantage without sRGB.
|
||||
// They also have some inaccuracies due to their shared-[bilinear-]sample
|
||||
// design, which grow increasingly bothersome for smaller blurs and higher-
|
||||
// frequency source images (relative to their resolution). I had high
|
||||
// hopes for them, but their most realistic use case is limited to quickly
|
||||
// reblurring an already blurred input at full resolution. Otherwise:
|
||||
// a.) If you're blurring a low-resolution source, you want a better blur.
|
||||
// b.) If you're blurring a lower mipmap, you want a better blur.
|
||||
// c.) If you're blurring a high-resolution, high-frequency source, you
|
||||
// want a better blur.
|
||||
// 4.) The one-pass blurs without shared samples grow slower for larger blurs,
|
||||
// but they're competitive with separable blurs at 5x5 and smaller, and
|
||||
// even tex2Dblur7x7 isn't bad if you're wanting to conserve passes.
|
||||
// Here are some framerates from a GeForce 8800GTS. The first pass resizes to
|
||||
// viewport size (4x in this test) and linearizes for sRGB codepaths, and the
|
||||
// remaining passes perform 6 full blurs. Mipmapped tests are performed at the
|
||||
// same scale, so they just measure the cost of mipmapping each FBO (only every
|
||||
// other FBO is mipmapped for separable blurs, to mimic realistic usage).
|
||||
// Mipmap Neither sRGB+Mipmap sRGB Function
|
||||
// 76.0 92.3 131.3 193.7 tex2Dblur3fast
|
||||
// 63.2 74.4 122.4 175.5 tex2Dblur3resize
|
||||
// 93.7 121.2 159.3 263.2 tex2Dblur3x3
|
||||
// 59.7 68.7 115.4 162.1 tex2Dblur3x3resize
|
||||
// 63.2 74.4 122.4 175.5 tex2Dblur5fast
|
||||
// 49.3 54.8 100.0 132.7 tex2Dblur5resize
|
||||
// 59.7 68.7 115.4 162.1 tex2Dblur5x5
|
||||
// 64.9 77.2 99.1 137.2 tex2Dblur6x6shared
|
||||
// 55.8 63.7 110.4 151.8 tex2Dblur7fast
|
||||
// 39.8 43.9 83.9 105.8 tex2Dblur7resize
|
||||
// 40.0 44.2 83.2 104.9 tex2Dblur7x7
|
||||
// 56.4 65.5 71.9 87.9 tex2Dblur8x8shared
|
||||
// 49.3 55.1 99.9 132.5 tex2Dblur9fast
|
||||
// 33.3 36.2 72.4 88.0 tex2Dblur9resize
|
||||
// 27.8 29.7 61.3 72.2 tex2Dblur9x9
|
||||
// 37.2 41.1 52.6 60.2 tex2Dblur10x10shared
|
||||
// 44.4 49.5 91.3 117.8 tex2Dblur11fast
|
||||
// 28.8 30.8 63.6 75.4 tex2Dblur11resize
|
||||
// 33.6 36.5 40.9 45.5 tex2Dblur12x12shared
|
||||
// TODO: Fill in benchmarks for new untested blurs.
|
||||
// tex2Dblur17fast
|
||||
// tex2Dblur25fast
|
||||
// tex2Dblur31fast
|
||||
// tex2Dblur43fast
|
||||
// tex2Dblur3x3resize
|
||||
|
||||
///////////////////////////// SETTINGS MANAGEMENT ////////////////////////////
|
||||
|
||||
// Set static standard deviations, but allow users to override them with their
|
||||
// own constants (even non-static uniforms if they're okay with the speed hit):
|
||||
#ifndef OVERRIDE_BLUR_STD_DEVS
|
||||
// blurN_std_dev values are specified in terms of dxdy strides.
|
||||
#ifdef USE_BINOMIAL_BLUR_STD_DEVS
|
||||
// By request, we can define standard deviations corresponding to a
|
||||
// binomial distribution with p = 0.5 (related to Pascal's triangle).
|
||||
// This distribution works such that blurring multiple times should
|
||||
// have the same result as a single larger blur. These values are
|
||||
// larger than default for blurs up to 6x and smaller thereafter.
|
||||
const float blur3_std_dev = 0.84931640625;
|
||||
const float blur4_std_dev = 0.84931640625;
|
||||
const float blur5_std_dev = 1.0595703125;
|
||||
const float blur6_std_dev = 1.06591796875;
|
||||
const float blur7_std_dev = 1.17041015625;
|
||||
const float blur8_std_dev = 1.1720703125;
|
||||
const float blur9_std_dev = 1.2259765625;
|
||||
const float blur10_std_dev = 1.21982421875;
|
||||
const float blur11_std_dev = 1.25361328125;
|
||||
const float blur12_std_dev = 1.2423828125;
|
||||
const float blur17_std_dev = 1.27783203125;
|
||||
const float blur25_std_dev = 1.2810546875;
|
||||
const float blur31_std_dev = 1.28125;
|
||||
const float blur43_std_dev = 1.28125;
|
||||
#else
|
||||
// The defaults are the largest values that keep the largest unused
|
||||
// blur term on each side <= 1.0/256.0. (We could get away with more
|
||||
// or be more conservative, but this compromise is pretty reasonable.)
|
||||
const float blur3_std_dev = 0.62666015625;
|
||||
const float blur4_std_dev = 0.66171875;
|
||||
const float blur5_std_dev = 0.9845703125;
|
||||
const float blur6_std_dev = 1.02626953125;
|
||||
const float blur7_std_dev = 1.36103515625;
|
||||
const float blur8_std_dev = 1.4080078125;
|
||||
const float blur9_std_dev = 1.7533203125;
|
||||
const float blur10_std_dev = 1.80478515625;
|
||||
const float blur11_std_dev = 2.15986328125;
|
||||
const float blur12_std_dev = 2.215234375;
|
||||
const float blur17_std_dev = 3.45535583496;
|
||||
const float blur25_std_dev = 5.3409576416;
|
||||
const float blur31_std_dev = 6.86488037109;
|
||||
const float blur43_std_dev = 10.1852050781;
|
||||
#endif // USE_BINOMIAL_BLUR_STD_DEVS
|
||||
#endif // OVERRIDE_BLUR_STD_DEVS
|
||||
|
||||
#ifndef OVERRIDE_ERROR_BLURRING
|
||||
// error_blurring should be in [0.0, 1.0]. Higher values reduce ringing
|
||||
// in shared-sample blurs but increase blurring and feature shifting.
|
||||
const float error_blurring = 0.5;
|
||||
#endif
|
||||
|
||||
// Make a length squared helper macro (for usage with static constants):
|
||||
#define LENGTH_SQ(vec) (dot(vec, vec))
|
||||
|
||||
/////////////////////////////////// HELPERS //////////////////////////////////
|
||||
|
||||
vec4 uv2_to_uv4(vec2 tex_uv)
|
||||
{
|
||||
// Make a vec2 uv offset safe for adding to vec4 tex2Dlod coords:
|
||||
return vec4(tex_uv, 0.0, 0.0);
|
||||
}
|
||||
|
||||
// Make a length squared helper macro (for usage with static constants):
|
||||
#define LENGTH_SQ(vec) (dot(vec, vec))
|
||||
|
||||
float get_fast_gaussian_weight_sum_inv(const float sigma)
|
||||
{
|
||||
// We can use the Gaussian integral to calculate the asymptotic weight for
|
||||
// the center pixel. Since the unnormalized center pixel weight is 1.0,
|
||||
// the normalized weight is the same as the weight sum inverse. Given a
|
||||
// large enough blur (9+), the asymptotic weight sum is close and faster:
|
||||
// center_weight = 0.5 *
|
||||
// (erf(0.5/(sigma*sqrt(2.0))) - erf(-0.5/(sigma*sqrt(2.0))))
|
||||
// erf(-x) == -erf(x), so we get 0.5 * (2.0 * erf(blah blah)):
|
||||
// However, we can get even faster results with curve-fitting. These are
|
||||
// also closer than the asymptotic results, because they were constructed
|
||||
// from 64 blurs sizes from [3, 131) and 255 equally-spaced sigmas from
|
||||
// (0, blurN_std_dev), so the results for smaller sigmas are biased toward
|
||||
// smaller blurs. The max error is 0.0031793913.
|
||||
// Relative FPS: 134.3 with erf, 135.8 with curve-fitting.
|
||||
//static const float temp = 0.5/sqrt(2.0);
|
||||
//return erf(temp/sigma);
|
||||
return min(exp(exp(0.348348412457428/
|
||||
(sigma - 0.0860587260734721))), 0.399334576340352/sigma);
|
||||
}
|
||||
|
||||
//////////////////// ARBITRARILY RESIZABLE ONE-PASS BLURS ////////////////////
|
||||
|
||||
vec3 tex2Dblur3x3resize(const sampler2D tex, const vec2 tex_uv,
|
||||
const vec2 dxdy, const float sigma)
|
||||
{
|
||||
// Requires: Global requirements must be met (see file description).
|
||||
// Returns: A 3x3 Gaussian blurred mipmapped texture lookup of the
|
||||
// resized input.
|
||||
// Description:
|
||||
// This is the only arbitrarily resizable one-pass blur; tex2Dblur5x5resize
|
||||
// would perform like tex2Dblur9x9, MUCH slower than tex2Dblur5resize.
|
||||
const float denom_inv = 0.5/(sigma*sigma);
|
||||
// Load each sample. We need all 3x3 samples. Quad-pixel communication
|
||||
// won't help either: This should perform like tex2Dblur5x5, but sharing a
|
||||
// 4x4 sample field would perform more like tex2Dblur8x8shared (worse).
|
||||
const vec2 sample4_uv = tex_uv;
|
||||
const vec2 dx = vec2(dxdy.x, 0.0);
|
||||
const vec2 dy = vec2(0.0, dxdy.y);
|
||||
const vec2 sample1_uv = sample4_uv - dy;
|
||||
const vec2 sample7_uv = sample4_uv + dy;
|
||||
const vec3 sample0 = tex2D_linearize(tex, sample1_uv - dx).rgb;
|
||||
const vec3 sample1 = tex2D_linearize(tex, sample1_uv).rgb;
|
||||
const vec3 sample2 = tex2D_linearize(tex, sample1_uv + dx).rgb;
|
||||
const vec3 sample3 = tex2D_linearize(tex, sample4_uv - dx).rgb;
|
||||
const vec3 sample4 = tex2D_linearize(tex, sample4_uv).rgb;
|
||||
const vec3 sample5 = tex2D_linearize(tex, sample4_uv + dx).rgb;
|
||||
const vec3 sample6 = tex2D_linearize(tex, sample7_uv - dx).rgb;
|
||||
const vec3 sample7 = tex2D_linearize(tex, sample7_uv).rgb;
|
||||
const vec3 sample8 = tex2D_linearize(tex, sample7_uv + dx).rgb;
|
||||
// Statically compute Gaussian sample weights:
|
||||
const float w4 = 1.0;
|
||||
const float w1_3_5_7 = exp(-LENGTH_SQ(vec2(1.0, 0.0)) * denom_inv);
|
||||
const float w0_2_6_8 = exp(-LENGTH_SQ(vec2(1.0, 1.0)) * denom_inv);
|
||||
const float weight_sum_inv = 1.0/(w4 + 4.0 * (w1_3_5_7 + w0_2_6_8));
|
||||
// Weight and sum the samples:
|
||||
const vec3 sum = w4 * sample4 +
|
||||
w1_3_5_7 * (sample1 + sample3 + sample5 + sample7) +
|
||||
w0_2_6_8 * (sample0 + sample2 + sample6 + sample8);
|
||||
return sum * weight_sum_inv;
|
||||
}
|
||||
|
||||
// Resizable one-pass blurs:
|
||||
vec3 tex2Dblur3x3resize(const sampler2D texture, const vec2 tex_uv,
|
||||
const vec2 dxdy)
|
||||
{
|
||||
return tex2Dblur3x3resize(texture, tex_uv, dxdy, blur3_std_dev);
|
||||
}
|
349
crt/shaders/crt-royale/src/crt-royale-bloom-approx.slang
Normal file
349
crt/shaders/crt-royale/src/crt-royale-bloom-approx.slang
Normal file
|
@ -0,0 +1,349 @@
|
|||
#version 450
|
||||
|
||||
layout(push_constant) uniform Push
|
||||
{
|
||||
vec4 SourceSize;
|
||||
vec4 OriginalSize;
|
||||
vec4 OutputSize;
|
||||
uint FrameCount;
|
||||
vec4 ORIG_LINEARIZEDSize;
|
||||
} registers;
|
||||
|
||||
#include "params.inc"
|
||||
|
||||
///////////////////////////// GPL LICENSE NOTICE /////////////////////////////
|
||||
|
||||
// crt-royale: A full-featured CRT shader, with cheese.
|
||||
// Copyright (C) 2014 TroggleMonkey <trogglemonkey@gmx.com>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation; either version 2 of the License, or any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along with
|
||||
// this program; if not, write to the Free Software Foundation, Inc., 59 Temple
|
||||
// Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
|
||||
|
||||
////////////////////////////////// INCLUDES //////////////////////////////////
|
||||
|
||||
#include "includes.h"
|
||||
|
||||
/////////////////////////////////// HELPERS //////////////////////////////////
|
||||
|
||||
/////////////////////////////////// HELPERS //////////////////////////////////
|
||||
|
||||
vec3 tex2Dresize_gaussian4x4(const sampler2D tex, const vec2 tex_uv,
|
||||
const vec2 dxdy, const vec2 texture_size, const vec2 texture_size_inv,
|
||||
const vec2 tex_uv_to_pixel_scale, const float sigma)
|
||||
{
|
||||
// Requires: 1.) All requirements of gamma-management.h must be satisfied!
|
||||
// 2.) filter_linearN must == "true" in your .cgp preset.
|
||||
// 3.) mipmap_inputN must == "true" in your .cgp preset if
|
||||
// IN.output_size << SRC.video_size.
|
||||
// 4.) dxdy should contain the uv pixel spacing:
|
||||
// dxdy = max(vec2(1.0),
|
||||
// SRC.video_size/IN.output_size)/SRC.texture_size;
|
||||
// 5.) texture_size == SRC.texture_size
|
||||
// 6.) texture_size_inv == vec2(1.0)/SRC.texture_size
|
||||
// 7.) tex_uv_to_pixel_scale == IN.output_size *
|
||||
// SRC.texture_size / SRC.video_size;
|
||||
// 8.) sigma is the desired Gaussian standard deviation, in
|
||||
// terms of output pixels. It should be < ~0.66171875 to
|
||||
// ensure the first unused sample (outside the 4x4 box) has
|
||||
// a weight < 1.0/256.0.
|
||||
// Returns: A true 4x4 Gaussian resize of the input.
|
||||
// Description:
|
||||
// Given correct inputs, this Gaussian resizer samples 4 pixel locations
|
||||
// along each downsized dimension and/or 4 texel locations along each
|
||||
// upsized dimension. It computes dynamic weights based on the pixel-space
|
||||
// distance of each sample from the destination pixel. It is arbitrarily
|
||||
// resizable and higher quality than tex2Dblur3x3_resize, but it's slower.
|
||||
// TODO: Move this to a more suitable file once there are others like it.
|
||||
const float denom_inv = 0.5/(sigma*sigma);
|
||||
// We're taking 4x4 samples, and we're snapping to texels for upsizing.
|
||||
// Find texture coords for sample 5 (second row, second column):
|
||||
const vec2 curr_texel = tex_uv * texture_size;
|
||||
const vec2 prev_texel =
|
||||
floor(curr_texel - vec2(under_half)) + vec2(0.5);
|
||||
const vec2 prev_texel_uv = prev_texel * texture_size_inv;
|
||||
const bvec2 snap = lessThanEqual(dxdy , texture_size_inv);
|
||||
const vec2 sample5_downsize_uv = tex_uv - 0.5 * dxdy;
|
||||
const vec2 sample5_uv = mix(sample5_downsize_uv, prev_texel_uv, snap);
|
||||
// Compute texture coords for other samples:
|
||||
const vec2 dx = vec2(dxdy.x, 0.0);
|
||||
const vec2 sample0_uv = sample5_uv - dxdy;
|
||||
const vec2 sample10_uv = sample5_uv + dxdy;
|
||||
const vec2 sample15_uv = sample5_uv + 2.0 * dxdy;
|
||||
const vec2 sample1_uv = sample0_uv + dx;
|
||||
const vec2 sample2_uv = sample0_uv + 2.0 * dx;
|
||||
const vec2 sample3_uv = sample0_uv + 3.0 * dx;
|
||||
const vec2 sample4_uv = sample5_uv - dx;
|
||||
const vec2 sample6_uv = sample5_uv + dx;
|
||||
const vec2 sample7_uv = sample5_uv + 2.0 * dx;
|
||||
const vec2 sample8_uv = sample10_uv - 2.0 * dx;
|
||||
const vec2 sample9_uv = sample10_uv - dx;
|
||||
const vec2 sample11_uv = sample10_uv + dx;
|
||||
const vec2 sample12_uv = sample15_uv - 3.0 * dx;
|
||||
const vec2 sample13_uv = sample15_uv - 2.0 * dx;
|
||||
const vec2 sample14_uv = sample15_uv - dx;
|
||||
// Load each sample:
|
||||
const vec3 sample0 = tex2D_linearize(tex, sample0_uv).rgb;
|
||||
const vec3 sample1 = tex2D_linearize(tex, sample1_uv).rgb;
|
||||
const vec3 sample2 = tex2D_linearize(tex, sample2_uv).rgb;
|
||||
const vec3 sample3 = tex2D_linearize(tex, sample3_uv).rgb;
|
||||
const vec3 sample4 = tex2D_linearize(tex, sample4_uv).rgb;
|
||||
const vec3 sample5 = tex2D_linearize(tex, sample5_uv).rgb;
|
||||
const vec3 sample6 = tex2D_linearize(tex, sample6_uv).rgb;
|
||||
const vec3 sample7 = tex2D_linearize(tex, sample7_uv).rgb;
|
||||
const vec3 sample8 = tex2D_linearize(tex, sample8_uv).rgb;
|
||||
const vec3 sample9 = tex2D_linearize(tex, sample9_uv).rgb;
|
||||
const vec3 sample10 = tex2D_linearize(tex, sample10_uv).rgb;
|
||||
const vec3 sample11 = tex2D_linearize(tex, sample11_uv).rgb;
|
||||
const vec3 sample12 = tex2D_linearize(tex, sample12_uv).rgb;
|
||||
const vec3 sample13 = tex2D_linearize(tex, sample13_uv).rgb;
|
||||
const vec3 sample14 = tex2D_linearize(tex, sample14_uv).rgb;
|
||||
const vec3 sample15 = tex2D_linearize(tex, sample15_uv).rgb;
|
||||
// Compute destination pixel offsets for each sample:
|
||||
const vec2 dest_pixel = tex_uv * tex_uv_to_pixel_scale;
|
||||
const vec2 sample0_offset = sample0_uv * tex_uv_to_pixel_scale - dest_pixel;
|
||||
const vec2 sample1_offset = sample1_uv * tex_uv_to_pixel_scale - dest_pixel;
|
||||
const vec2 sample2_offset = sample2_uv * tex_uv_to_pixel_scale - dest_pixel;
|
||||
const vec2 sample3_offset = sample3_uv * tex_uv_to_pixel_scale - dest_pixel;
|
||||
const vec2 sample4_offset = sample4_uv * tex_uv_to_pixel_scale - dest_pixel;
|
||||
const vec2 sample5_offset = sample5_uv * tex_uv_to_pixel_scale - dest_pixel;
|
||||
const vec2 sample6_offset = sample6_uv * tex_uv_to_pixel_scale - dest_pixel;
|
||||
const vec2 sample7_offset = sample7_uv * tex_uv_to_pixel_scale - dest_pixel;
|
||||
const vec2 sample8_offset = sample8_uv * tex_uv_to_pixel_scale - dest_pixel;
|
||||
const vec2 sample9_offset = sample9_uv * tex_uv_to_pixel_scale - dest_pixel;
|
||||
const vec2 sample10_offset = sample10_uv * tex_uv_to_pixel_scale - dest_pixel;
|
||||
const vec2 sample11_offset = sample11_uv * tex_uv_to_pixel_scale - dest_pixel;
|
||||
const vec2 sample12_offset = sample12_uv * tex_uv_to_pixel_scale - dest_pixel;
|
||||
const vec2 sample13_offset = sample13_uv * tex_uv_to_pixel_scale - dest_pixel;
|
||||
const vec2 sample14_offset = sample14_uv * tex_uv_to_pixel_scale - dest_pixel;
|
||||
const vec2 sample15_offset = sample15_uv * tex_uv_to_pixel_scale - dest_pixel;
|
||||
// Compute Gaussian sample weights:
|
||||
const float w0 = exp(-LENGTH_SQ(sample0_offset) * denom_inv);
|
||||
const float w1 = exp(-LENGTH_SQ(sample1_offset) * denom_inv);
|
||||
const float w2 = exp(-LENGTH_SQ(sample2_offset) * denom_inv);
|
||||
const float w3 = exp(-LENGTH_SQ(sample3_offset) * denom_inv);
|
||||
const float w4 = exp(-LENGTH_SQ(sample4_offset) * denom_inv);
|
||||
const float w5 = exp(-LENGTH_SQ(sample5_offset) * denom_inv);
|
||||
const float w6 = exp(-LENGTH_SQ(sample6_offset) * denom_inv);
|
||||
const float w7 = exp(-LENGTH_SQ(sample7_offset) * denom_inv);
|
||||
const float w8 = exp(-LENGTH_SQ(sample8_offset) * denom_inv);
|
||||
const float w9 = exp(-LENGTH_SQ(sample9_offset) * denom_inv);
|
||||
const float w10 = exp(-LENGTH_SQ(sample10_offset) * denom_inv);
|
||||
const float w11 = exp(-LENGTH_SQ(sample11_offset) * denom_inv);
|
||||
const float w12 = exp(-LENGTH_SQ(sample12_offset) * denom_inv);
|
||||
const float w13 = exp(-LENGTH_SQ(sample13_offset) * denom_inv);
|
||||
const float w14 = exp(-LENGTH_SQ(sample14_offset) * denom_inv);
|
||||
const float w15 = exp(-LENGTH_SQ(sample15_offset) * denom_inv);
|
||||
const float weight_sum_inv = 1.0/(
|
||||
w0 + w1 + w2 + w3 + w4 + w5 + w6 + w7 +
|
||||
w8 +w9 + w10 + w11 + w12 + w13 + w14 + w15);
|
||||
// Weight and sum the samples:
|
||||
const vec3 sum = w0 * sample0 + w1 * sample1 + w2 * sample2 + w3 * sample3 +
|
||||
w4 * sample4 + w5 * sample5 + w6 * sample6 + w7 * sample7 +
|
||||
w8 * sample8 + w9 * sample9 + w10 * sample10 + w11 * sample11 +
|
||||
w12 * sample12 + w13 * sample13 + w14 * sample14 + w15 * sample15;
|
||||
return sum * weight_sum_inv;
|
||||
}
|
||||
|
||||
#pragma stage vertex
|
||||
layout(location = 0) in vec4 Position;
|
||||
layout(location = 1) in vec2 TexCoord;
|
||||
layout(location = 0) out vec2 tex_uv;
|
||||
layout(location = 1) out float estimated_viewport_size_x;
|
||||
layout(location = 2) out vec2 blur_dxdy;
|
||||
layout(location = 3) out vec2 uv_scanline_step;
|
||||
layout(location = 4) out vec2 texture_size_inv;
|
||||
layout(location = 5) out vec2 tex_uv_to_pixel_scale;
|
||||
|
||||
void main()
|
||||
{
|
||||
// This vertex shader copies blurs/vertex-shader-blur-one-pass-resize.h,
|
||||
// except we're using a different source image.
|
||||
gl_Position = params.MVP * Position;
|
||||
const vec2 video_uv = TexCoord;
|
||||
tex_uv = video_uv;
|
||||
// The last pass (vertical scanlines) had a viewport y scale, so we can
|
||||
// use it to calculate a better runtime sigma:
|
||||
estimated_viewport_size_x = registers.SourceSize.y * params.geom_aspect_ratio_x / params.geom_aspect_ratio_y;
|
||||
|
||||
// Get the uv sample distance between output pixels. We're using a resize
|
||||
// blur, so arbitrary upsizing will be acceptable if filter_linearN =
|
||||
// "true," and arbitrary downsizing will be acceptable if mipmap_inputN =
|
||||
// "true" too. The blur will be much more accurate if a true 4x4 Gaussian
|
||||
// resize is used instead of tex2Dblur3x3_resize (which samples between
|
||||
// texels even for upsizing).
|
||||
const vec2 dxdy_min_scale = registers.ORIG_LINEARIZEDSize.xy / registers.OutputSize.xy;
|
||||
texture_size_inv = vec2(1.0) * registers.ORIG_LINEARIZEDSize.zw;
|
||||
if(bloom_approx_filter > 1.5) // 4x4 true Gaussian resize
|
||||
{
|
||||
// For upsizing, we'll snap to texels and sample the nearest 4.
|
||||
const vec2 dxdy_scale = max(dxdy_min_scale, vec2(1.0));
|
||||
blur_dxdy = dxdy_scale * texture_size_inv;
|
||||
}
|
||||
else
|
||||
{
|
||||
const vec2 dxdy_scale = dxdy_min_scale;
|
||||
blur_dxdy = dxdy_scale * texture_size_inv;
|
||||
}
|
||||
|
||||
tex_uv_to_pixel_scale = registers.OutputSize.xy;
|
||||
// texture_size_inv = texture_size_inv; <- commented out because it's pointless in slang
|
||||
|
||||
// Detecting interlacing again here lets us apply convergence offsets in
|
||||
// this pass. il_step_multiple contains the (texel, scanline) step
|
||||
// multiple: 1 for progressive, 2 for interlaced.
|
||||
const vec2 orig_video_size = registers.ORIG_LINEARIZEDSize.xy;
|
||||
float interlace_check = 0.0;
|
||||
if (is_interlaced(orig_video_size.y) == true) interlace_check = 1.0;
|
||||
const float y_step = 1.0 + interlace_check;
|
||||
const vec2 il_step_multiple = vec2(1.0, y_step);
|
||||
// Get the uv distance between (texels, same-field scanlines):
|
||||
uv_scanline_step = il_step_multiple * registers.ORIG_LINEARIZEDSize.zw;
|
||||
}
|
||||
|
||||
#pragma stage fragment
|
||||
layout(location = 0) in vec2 tex_uv;
|
||||
layout(location = 1) in float estimated_viewport_size_x;
|
||||
layout(location = 2) in vec2 blur_dxdy;
|
||||
layout(location = 3) in vec2 uv_scanline_step;
|
||||
layout(location = 4) in vec2 texture_size_inv;
|
||||
layout(location = 5) in vec2 tex_uv_to_pixel_scale;
|
||||
layout(location = 0) out vec4 FragColor;
|
||||
layout(set = 0, binding = 2) uniform sampler2D Source;
|
||||
layout(set = 0, binding = 3) uniform sampler2D ORIG_LINEARIZED;
|
||||
|
||||
void main()
|
||||
{
|
||||
// Would a viewport-relative size work better for this pass? (No.)
|
||||
// PROS:
|
||||
// 1.) Instead of writing an absolute size to user-cgp-constants.h, we'd
|
||||
// write a viewport scale. That number could be used to directly scale
|
||||
// the viewport-resolution bloom sigma and/or triad size to a smaller
|
||||
// scale. This way, we could calculate an optimal dynamic sigma no
|
||||
// matter how the dot pitch is specified.
|
||||
// CONS:
|
||||
// 1.) Texel smearing would be much worse at small viewport sizes, but
|
||||
// performance would be much worse at large viewport sizes, so there
|
||||
// would be no easy way to calculate a decent scale.
|
||||
// 2.) Worse, we could no longer get away with using a constant-size blur!
|
||||
// Instead, we'd have to face all the same difficulties as the real
|
||||
// phosphor bloom, which requires static #ifdefs to decide the blur
|
||||
// size based on the expected triad size...a dynamic value.
|
||||
// 3.) Like the phosphor bloom, we'd have less control over making the blur
|
||||
// size correct for an optical blur. That said, we likely overblur (to
|
||||
// maintain brightness) more than the eye would do by itself: 20/20
|
||||
// human vision distinguishes ~1 arc minute, or 1/60 of a degree. The
|
||||
// highest viewing angle recommendation I know of is THX's 40.04 degree
|
||||
// recommendation, at which 20/20 vision can distinguish about 2402.4
|
||||
// lines. Assuming the "TV lines" definition, that means 1201.2
|
||||
// distinct light lines and 1201.2 distinct dark lines can be told
|
||||
// apart, i.e. 1201.2 pairs of lines. This would correspond to 1201.2
|
||||
// pairs of alternating lit/unlit phosphors, so 2402.4 phosphors total
|
||||
// (if they're alternately lit). That's a max of 800.8 triads. Using
|
||||
// a more popular 30 degree viewing angle recommendation, 20/20 vision
|
||||
// can distinguish 1800 lines, or 600 triads of alternately lit
|
||||
// phosphors. In contrast, we currently blur phosphors all the way
|
||||
// down to 341.3 triads to ensure full brightness.
|
||||
// 4.) Realistically speaking, we're usually just going to use bilinear
|
||||
// filtering in this pass anyway, but it only works well to limit
|
||||
// bandwidth if it's done at a small constant scale.
|
||||
|
||||
// Get the constants we need to sample:
|
||||
const vec2 texture_size = registers.ORIG_LINEARIZEDSize.xy;
|
||||
vec2 tex_uv_r, tex_uv_g, tex_uv_b;
|
||||
|
||||
if(beam_misconvergence)
|
||||
{
|
||||
const vec2 convergence_offsets_r = get_convergence_offsets_r_vector();
|
||||
const vec2 convergence_offsets_g = get_convergence_offsets_g_vector();
|
||||
const vec2 convergence_offsets_b = get_convergence_offsets_b_vector();
|
||||
tex_uv_r = tex_uv - vec2(params.convergence_offset_x_r, params.convergence_offset_y_r) * uv_scanline_step;
|
||||
tex_uv_g = tex_uv - vec2(params.convergence_offset_x_g, params.convergence_offset_y_g) * uv_scanline_step;
|
||||
tex_uv_b = tex_uv - vec2(params.convergence_offset_x_b, params.convergence_offset_y_b) * uv_scanline_step;
|
||||
}
|
||||
// Get the blur sigma:
|
||||
const float bloom_approx_sigma = get_bloom_approx_sigma(registers.OutputSize.x, estimated_viewport_size_x);
|
||||
|
||||
// Sample the resized and blurred texture, and apply convergence offsets if
|
||||
// necessary. Applying convergence offsets here triples our samples from
|
||||
// 16/9/1 to 48/27/3, but faster and easier than sampling BLOOM_APPROX and
|
||||
// HALATION_BLUR 3 times at full resolution every time they're used.
|
||||
vec3 color_r, color_g, color_b, color;
|
||||
if(bloom_approx_filter > 1.5)
|
||||
{
|
||||
// Use a 4x4 Gaussian resize. This is slower but technically correct.
|
||||
if(beam_misconvergence)
|
||||
{
|
||||
color_r = tex2Dresize_gaussian4x4(ORIG_LINEARIZED, tex_uv_r,
|
||||
blur_dxdy, texture_size, texture_size_inv,
|
||||
tex_uv_to_pixel_scale, bloom_approx_sigma);
|
||||
color_g = tex2Dresize_gaussian4x4(ORIG_LINEARIZED, tex_uv_g,
|
||||
blur_dxdy, texture_size, texture_size_inv,
|
||||
tex_uv_to_pixel_scale, bloom_approx_sigma);
|
||||
color_b = tex2Dresize_gaussian4x4(ORIG_LINEARIZED, tex_uv_b,
|
||||
blur_dxdy, texture_size, texture_size_inv,
|
||||
tex_uv_to_pixel_scale, bloom_approx_sigma);
|
||||
}
|
||||
else
|
||||
{
|
||||
color = tex2Dresize_gaussian4x4(ORIG_LINEARIZED, tex_uv,
|
||||
blur_dxdy, texture_size, texture_size_inv,
|
||||
tex_uv_to_pixel_scale, bloom_approx_sigma);
|
||||
}
|
||||
}
|
||||
else if(bloom_approx_filter > 0.5)
|
||||
{
|
||||
// Use a 3x3 resize blur. This is the softest option, because we're
|
||||
// blurring already blurry bilinear samples. It doesn't play quite as
|
||||
// nicely with convergence offsets, but it has its charms.
|
||||
if(beam_misconvergence)
|
||||
{
|
||||
color_r = tex2Dblur3x3resize(ORIG_LINEARIZED, tex_uv_r,
|
||||
blur_dxdy, bloom_approx_sigma);
|
||||
color_g = tex2Dblur3x3resize(ORIG_LINEARIZED, tex_uv_g,
|
||||
blur_dxdy, bloom_approx_sigma);
|
||||
color_b = tex2Dblur3x3resize(ORIG_LINEARIZED, tex_uv_b,
|
||||
blur_dxdy, bloom_approx_sigma);
|
||||
}
|
||||
else
|
||||
{
|
||||
color = tex2Dblur3x3resize(ORIG_LINEARIZED, tex_uv, blur_dxdy);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// Use bilinear sampling. This approximates a 4x4 Gaussian resize MUCH
|
||||
// better than tex2Dblur3x3_resize for the very small sigmas we're
|
||||
// likely to use at small output resolutions. (This estimate becomes
|
||||
// too sharp above ~400x300, but the blurs break down above that
|
||||
// resolution too, unless min_allowed_viewport_triads is high enough to
|
||||
// keep bloom_approx_scale_x/min_allowed_viewport_triads < ~1.1658025.)
|
||||
if(beam_misconvergence)
|
||||
{
|
||||
color_r = tex2D_linearize(ORIG_LINEARIZED, tex_uv_r).rgb;
|
||||
color_g = tex2D_linearize(ORIG_LINEARIZED, tex_uv_g).rgb;
|
||||
color_b = tex2D_linearize(ORIG_LINEARIZED, tex_uv_b).rgb;
|
||||
}
|
||||
else
|
||||
{
|
||||
color = tex2D_linearize(ORIG_LINEARIZED, tex_uv).rgb;
|
||||
}
|
||||
}
|
||||
// Pack the colors from the red/green/blue beams into a single vector:
|
||||
if(beam_misconvergence)
|
||||
{
|
||||
color = vec3(color_r.r, color_g.g, color_b.b);
|
||||
}
|
||||
// Encode and output the blurred image:
|
||||
FragColor = vec4(color, 1.0);
|
||||
}
|
|
@ -6,56 +6,6 @@ layout(push_constant) uniform Push
|
|||
uint FrameCount;
|
||||
} registers;
|
||||
|
||||
layout(std140, set = 0, binding = 0) uniform UBO
|
||||
{
|
||||
mat4 MVP;
|
||||
float crt_gamma;
|
||||
float lcd_gamma;
|
||||
float levels_contrast;
|
||||
float halation_weight;
|
||||
float diffusion_weight;
|
||||
float bloom_underestimate_levels;
|
||||
float bloom_excess;
|
||||
float beam_min_sigma;
|
||||
float beam_max_sigma;
|
||||
float beam_spot_power;
|
||||
float beam_min_shape;
|
||||
float beam_max_shape;
|
||||
float beam_shape_power;
|
||||
float beam_horiz_filter;
|
||||
float beam_horiz_sigma;
|
||||
float beam_horiz_linear_rgb_weight;
|
||||
float convergence_offset_x_r;
|
||||
float convergence_offset_x_g;
|
||||
float convergence_offset_x_b;
|
||||
float convergence_offset_y_r;
|
||||
float convergence_offset_y_g;
|
||||
float convergence_offset_y_b;
|
||||
float mask_type;
|
||||
float mask_sample_mode_desired;
|
||||
float mask_specify_num_triads;
|
||||
float mask_triad_size_desired;
|
||||
float mask_num_triads_desired;
|
||||
float aa_subpixel_r_offset_x_runtime;
|
||||
float aa_subpixel_r_offset_y_runtime;
|
||||
float aa_cubic_c;
|
||||
float aa_gauss_sigma;
|
||||
float geom_mode_runtime;
|
||||
float geom_radius;
|
||||
float geom_view_dist;
|
||||
float geom_tilt_angle_x;
|
||||
float geom_tilt_angle_y;
|
||||
float geom_aspect_ratio_x;
|
||||
float geom_aspect_ratio_y;
|
||||
float geom_overscan_x;
|
||||
float geom_overscan_y;
|
||||
float border_size;
|
||||
float border_darkness;
|
||||
float border_compress;
|
||||
float interlace_bff;
|
||||
float interlace_1080i;
|
||||
} params;
|
||||
|
||||
///////////////////////////// GPL LICENSE NOTICE /////////////////////////////
|
||||
|
||||
// crt-royale: A full-featured CRT shader, with cheese.
|
||||
|
@ -88,211 +38,7 @@ layout(std140, set = 0, binding = 0) uniform UBO
|
|||
|
||||
////////////////////////////////// INCLUDES //////////////////////////////////
|
||||
|
||||
#include "../user-settings.h"
|
||||
#include "bind-shader-params.h"
|
||||
//#include "../../../../include/gamma-management.h"
|
||||
//#include "scanline-functions.h"
|
||||
|
||||
// from scanline-functions.h //
|
||||
bool is_interlaced(float num_lines)
|
||||
{
|
||||
// Detect interlacing based on the number of lines in the source.
|
||||
if(interlace_detect)
|
||||
{
|
||||
// NTSC: 525 lines, 262.5/field; 486 active (2 half-lines), 243/field
|
||||
// NTSC Emulators: Typically 224 or 240 lines
|
||||
// PAL: 625 lines, 312.5/field; 576 active (typical), 288/field
|
||||
// PAL Emulators: ?
|
||||
// ATSC: 720p, 1080i, 1080p
|
||||
// Where do we place our cutoffs? Assumptions:
|
||||
// 1.) We only need to care about active lines.
|
||||
// 2.) Anything > 288 and <= 576 lines is probably interlaced.
|
||||
// 3.) Anything > 576 lines is probably not interlaced...
|
||||
// 4.) ...except 1080 lines, which is a crapshoot (user decision).
|
||||
// 5.) Just in case the main program uses calculated video sizes,
|
||||
// we should nudge the float thresholds a bit.
|
||||
bool sd_interlace;
|
||||
if (num_lines > 288.5 && num_lines < 576.5)
|
||||
{sd_interlace = true;}
|
||||
else
|
||||
{sd_interlace = false;}
|
||||
bool hd_interlace;
|
||||
if (num_lines > 1079.5 && num_lines < 1080.5)
|
||||
{hd_interlace = true;}
|
||||
else
|
||||
{hd_interlace = false;}
|
||||
return (sd_interlace || hd_interlace);
|
||||
}
|
||||
else
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
// end scanline-functions.h //
|
||||
|
||||
// from gamma-management.h //
|
||||
/////////////////////////////// BASE CONSTANTS ///////////////////////////////
|
||||
|
||||
// Set standard gamma constants, but allow users to override them:
|
||||
#ifndef OVERRIDE_STANDARD_GAMMA
|
||||
// Standard encoding gammas:
|
||||
const float ntsc_gamma = 2.2; // Best to use NTSC for PAL too?
|
||||
const float pal_gamma = 2.8; // Never actually 2.8 in practice
|
||||
// Typical device decoding gammas (only use for emulating devices):
|
||||
// CRT/LCD reference gammas are higher than NTSC and Rec.709 video standard
|
||||
// gammas: The standards purposely undercorrected for an analog CRT's
|
||||
// assumed 2.5 reference display gamma to maintain contrast in assumed
|
||||
// [dark] viewing conditions: http://www.poynton.com/PDFs/GammaFAQ.pdf
|
||||
// These unstated assumptions about display gamma and perceptual rendering
|
||||
// intent caused a lot of confusion, and more modern CRT's seemed to target
|
||||
// NTSC 2.2 gamma with circuitry. LCD displays seem to have followed suit
|
||||
// (they struggle near black with 2.5 gamma anyway), especially PC/laptop
|
||||
// displays designed to view sRGB in bright environments. (Standards are
|
||||
// also in flux again with BT.1886, but it's underspecified for displays.)
|
||||
const float crt_reference_gamma_high = 2.5; // In (2.35, 2.55)
|
||||
const float crt_reference_gamma_low = 2.35; // In (2.35, 2.55)
|
||||
const float lcd_reference_gamma = 2.5; // To match CRT
|
||||
const float crt_office_gamma = 2.2; // Circuitry-adjusted for NTSC
|
||||
const float lcd_office_gamma = 2.2; // Approximates sRGB
|
||||
#endif // OVERRIDE_STANDARD_GAMMA
|
||||
|
||||
// Assuming alpha == 1.0 might make it easier for users to avoid some bugs,
|
||||
// but only if they're aware of it.
|
||||
#ifndef OVERRIDE_ALPHA_ASSUMPTIONS
|
||||
const bool assume_opaque_alpha = false;
|
||||
#endif
|
||||
|
||||
|
||||
/////////////////////// DERIVED CONSTANTS AS FUNCTIONS ///////////////////////
|
||||
|
||||
// gamma-management.h should be compatible with overriding gamma values with
|
||||
// runtime user parameters, but we can only define other global constants in
|
||||
// terms of static constants, not uniform user parameters. To get around this
|
||||
// limitation, we need to define derived constants using functions.
|
||||
|
||||
// Set device gamma constants, but allow users to override them:
|
||||
#ifdef OVERRIDE_DEVICE_GAMMA
|
||||
// The user promises to globally define the appropriate constants:
|
||||
float get_crt_gamma() { return crt_gamma; }
|
||||
float get_gba_gamma() { return gba_gamma; }
|
||||
float get_lcd_gamma() { return lcd_gamma; }
|
||||
#else
|
||||
float get_crt_gamma() { return crt_reference_gamma_high; }
|
||||
float get_gba_gamma() { return 3.5; } // Game Boy Advance; in (3.0, 4.0)
|
||||
float get_lcd_gamma() { return lcd_office_gamma; }
|
||||
#endif // OVERRIDE_DEVICE_GAMMA
|
||||
|
||||
// Set decoding/encoding gammas for the first/lass passes, but allow overrides:
|
||||
#ifdef OVERRIDE_FINAL_GAMMA
|
||||
// The user promises to globally define the appropriate constants:
|
||||
float get_intermediate_gamma() { return intermediate_gamma; }
|
||||
float get_input_gamma() { return input_gamma; }
|
||||
float get_output_gamma() { return output_gamma; }
|
||||
#else
|
||||
// If we gamma-correct every pass, always use ntsc_gamma between passes to
|
||||
// ensure middle passes don't need to care if anything is being simulated:
|
||||
float get_intermediate_gamma() { return ntsc_gamma; }
|
||||
#ifdef SIMULATE_CRT_ON_LCD
|
||||
float get_input_gamma() { return get_crt_gamma(); }
|
||||
float get_output_gamma() { return get_lcd_gamma(); }
|
||||
#else
|
||||
#ifdef SIMULATE_GBA_ON_LCD
|
||||
float get_input_gamma() { return get_gba_gamma(); }
|
||||
float get_output_gamma() { return get_lcd_gamma(); }
|
||||
#else
|
||||
#ifdef SIMULATE_LCD_ON_CRT
|
||||
float get_input_gamma() { return get_lcd_gamma(); }
|
||||
float get_output_gamma() { return get_crt_gamma(); }
|
||||
#else
|
||||
#ifdef SIMULATE_GBA_ON_CRT
|
||||
float get_input_gamma() { return get_gba_gamma(); }
|
||||
float get_output_gamma() { return get_crt_gamma(); }
|
||||
#else // Don't simulate anything:
|
||||
float get_input_gamma() { return ntsc_gamma; }
|
||||
float get_output_gamma() { return ntsc_gamma; }
|
||||
#endif // SIMULATE_GBA_ON_CRT
|
||||
#endif // SIMULATE_LCD_ON_CRT
|
||||
#endif // SIMULATE_GBA_ON_LCD
|
||||
#endif // SIMULATE_CRT_ON_LCD
|
||||
#endif // OVERRIDE_FINAL_GAMMA
|
||||
|
||||
#ifndef GAMMA_ENCODE_EVERY_FBO
|
||||
#ifdef FIRST_PASS
|
||||
const bool linearize_input = true;
|
||||
float get_pass_input_gamma() { return get_input_gamma(); }
|
||||
#else
|
||||
const bool linearize_input = false;
|
||||
float get_pass_input_gamma() { return 1.0; }
|
||||
#endif
|
||||
#ifdef LAST_PASS
|
||||
const bool gamma_encode_output = true;
|
||||
float get_pass_output_gamma() { return get_output_gamma(); }
|
||||
#else
|
||||
const bool gamma_encode_output = false;
|
||||
float get_pass_output_gamma() { return 1.0; }
|
||||
#endif
|
||||
#else
|
||||
const bool linearize_input = true;
|
||||
const bool gamma_encode_output = true;
|
||||
#ifdef FIRST_PASS
|
||||
float get_pass_input_gamma() { return get_input_gamma(); }
|
||||
#else
|
||||
float get_pass_input_gamma() { return get_intermediate_gamma(); }
|
||||
#endif
|
||||
#ifdef LAST_PASS
|
||||
float get_pass_output_gamma() { return get_output_gamma(); }
|
||||
#else
|
||||
float get_pass_output_gamma() { return get_intermediate_gamma(); }
|
||||
#endif
|
||||
#endif
|
||||
|
||||
vec4 decode_input(const vec4 color)
|
||||
{
|
||||
if(linearize_input)
|
||||
{
|
||||
if(assume_opaque_alpha)
|
||||
{
|
||||
return vec4(pow(color.rgb, vec3(get_pass_input_gamma())), 1.0);
|
||||
}
|
||||
else
|
||||
{
|
||||
return vec4(pow(color.rgb, vec3(get_pass_input_gamma())), color.a);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
return color;
|
||||
}
|
||||
}
|
||||
|
||||
vec4 encode_output(const vec4 color)
|
||||
{
|
||||
if(gamma_encode_output)
|
||||
{
|
||||
if(assume_opaque_alpha)
|
||||
{
|
||||
return vec4(pow(color.rgb, vec3(1.0/get_pass_output_gamma())), 1.0);
|
||||
}
|
||||
else
|
||||
{
|
||||
return vec4(pow(color.rgb, vec3(1.0/get_pass_output_gamma())), color.a);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
return color;
|
||||
}
|
||||
}
|
||||
|
||||
#define tex2D_linearize(C, D) decode_input(vec4(texture(C, D)))
|
||||
//vec4 tex2D_linearize(const sampler2D tex, const vec2 tex_coords)
|
||||
//{ return decode_input(vec4(texture(tex, tex_coords))); }
|
||||
|
||||
//#define tex2D_linearize(C, D, E) decode_input(vec4(texture(C, D, E)))
|
||||
//vec4 tex2D_linearize(const sampler2D tex, const vec2 tex_coords, const int texel_off)
|
||||
//{ return decode_input(vec4(texture(tex, tex_coords, texel_off))); }
|
||||
|
||||
// end gamma-management.h //
|
||||
#include "includes.h"
|
||||
|
||||
#pragma stage vertex
|
||||
layout(location = 0) in vec4 Position;
|
||||
|
|
|
@ -0,0 +1,237 @@
|
|||
#version 450
|
||||
|
||||
layout(push_constant) uniform Push
|
||||
{
|
||||
vec4 SourceSize;
|
||||
vec4 OriginalSize;
|
||||
vec4 OutputSize;
|
||||
uint FrameCount;
|
||||
} registers;
|
||||
|
||||
#include "params.inc"
|
||||
|
||||
///////////////////////////// GPL LICENSE NOTICE /////////////////////////////
|
||||
|
||||
// crt-royale: A full-featured CRT shader, with cheese.
|
||||
// Copyright (C) 2014 TroggleMonkey <trogglemonkey@gmx.com>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation; either version 2 of the License, or any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along with
|
||||
// this program; if not, write to the Free Software Foundation, Inc., 59 Temple
|
||||
// Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
|
||||
|
||||
////////////////////////////////// INCLUDES //////////////////////////////////
|
||||
|
||||
#include "includes.h"
|
||||
|
||||
#pragma stage vertex
|
||||
layout(location = 0) in vec4 Position;
|
||||
layout(location = 1) in vec2 TexCoord;
|
||||
layout(location = 0) out vec2 tex_uv;
|
||||
layout(location = 1) out vec2 uv_step;
|
||||
layout(location = 2) out vec2 il_step_multiple;
|
||||
layout(location = 3) out float pixel_height_in_scanlines;
|
||||
|
||||
void main()
|
||||
{
|
||||
gl_Position = params.MVP * Position;
|
||||
tex_uv = TexCoord;
|
||||
|
||||
// Detect interlacing: il_step_multiple indicates the step multiple between
|
||||
// lines: 1 is for progressive sources, and 2 is for interlaced sources.
|
||||
const vec2 video_size = registers.SourceSize.xy;
|
||||
float interlace_check = is_interlaced(video_size.y) ? 1.0 : 0.0;
|
||||
const float y_step = 1.0 + interlace_check;
|
||||
il_step_multiple = vec2(1.0, y_step);
|
||||
// Get the uv tex coords step between one texel (x) and scanline (y):
|
||||
uv_step = il_step_multiple * registers.SourceSize.zw;
|
||||
|
||||
// If shader parameters are used, {min, max}_{sigma, shape} are runtime
|
||||
// values. Compute {sigma, shape}_range outside of scanline_contrib() so
|
||||
// they aren't computed once per scanline (6 times per fragment and up to
|
||||
// 18 times per vertex):
|
||||
const float sigma_range = max(params.beam_max_sigma, params.beam_min_sigma) -
|
||||
params.beam_min_sigma;
|
||||
const float shape_range = max(params.beam_max_shape, params.beam_min_shape) -
|
||||
params.beam_min_shape;
|
||||
|
||||
// We need the pixel height in scanlines for antialiased/integral sampling:
|
||||
pixel_height_in_scanlines = (video_size.y * registers.OutputSize.w) /
|
||||
il_step_multiple.y;
|
||||
}
|
||||
|
||||
#pragma stage fragment
|
||||
layout(location = 0) in vec2 tex_uv;
|
||||
layout(location = 1) in vec2 uv_step;
|
||||
layout(location = 2) in vec2 il_step_multiple;
|
||||
layout(location = 3) in float pixel_height_in_scanlines;
|
||||
layout(location = 0) out vec4 FragColor;
|
||||
layout(set = 0, binding = 2) uniform sampler2D Source;
|
||||
|
||||
void main()
|
||||
{
|
||||
// This pass: Sample multiple (misconverged?) scanlines to the final
|
||||
// vertical resolution. Temporarily auto-dim the output to avoid clipping.
|
||||
|
||||
// Read some attributes into local variables:
|
||||
const vec2 texture_size = registers.SourceSize.xy;
|
||||
const vec2 texture_size_inv = registers.SourceSize.zw;
|
||||
const float frame_count = vec2(registers.FrameCount, registers.FrameCount).x;
|
||||
const float ph = pixel_height_in_scanlines;
|
||||
|
||||
// Get the uv coords of the previous scanline (in this field), and the
|
||||
// scanline's distance from this sample, in scanlines.
|
||||
float dist;
|
||||
const vec2 scanline_uv = get_last_scanline_uv(tex_uv, texture_size,
|
||||
texture_size_inv, il_step_multiple, frame_count, dist);
|
||||
// Consider 2, 3, 4, or 6 scanlines numbered 0-5: The previous and next
|
||||
// scanlines are numbered 2 and 3. Get scanline colors colors (ignore
|
||||
// horizontal sampling, since since registers.OutputSize.x = video_size.x).
|
||||
// NOTE: Anisotropic filtering creates interlacing artifacts, which is why
|
||||
// ORIG_LINEARIZED bobbed any interlaced input before this pass.
|
||||
const vec2 v_step = vec2(0.0, uv_step.y);
|
||||
const vec3 scanline2_color = tex2D_linearize(Source, scanline_uv).rgb;
|
||||
const vec3 scanline3_color =
|
||||
tex2D_linearize(Source, scanline_uv + v_step).rgb;
|
||||
vec3 scanline0_color, scanline1_color, scanline4_color, scanline5_color,
|
||||
scanline_outside_color;
|
||||
float dist_round;
|
||||
// Use scanlines 0, 1, 4, and 5 for a total of 6 scanlines:
|
||||
if(beam_num_scanlines > 5.5)
|
||||
{
|
||||
scanline1_color =
|
||||
tex2D_linearize(Source, scanline_uv - v_step).rgb;
|
||||
scanline4_color =
|
||||
tex2D_linearize(Source, scanline_uv + 2.0 * v_step).rgb;
|
||||
scanline0_color =
|
||||
tex2D_linearize(Source, scanline_uv - 2.0 * v_step).rgb;
|
||||
scanline5_color =
|
||||
tex2D_linearize(Source, scanline_uv + 3.0 * v_step).rgb;
|
||||
}
|
||||
// Use scanlines 1, 4, and either 0 or 5 for a total of 5 scanlines:
|
||||
else if(beam_num_scanlines > 4.5)
|
||||
{
|
||||
scanline1_color =
|
||||
tex2D_linearize(Source, scanline_uv - v_step).rgb;
|
||||
scanline4_color =
|
||||
tex2D_linearize(Source, scanline_uv + 2.0 * v_step).rgb;
|
||||
// dist is in [0, 1]
|
||||
dist_round = round(dist);
|
||||
const vec2 sample_0_or_5_uv_off =
|
||||
mix(-2.0 * v_step, 3.0 * v_step, dist_round);
|
||||
// Call this "scanline_outside_color" to cope with the conditional
|
||||
// scanline number:
|
||||
scanline_outside_color = tex2D_linearize(
|
||||
Source, scanline_uv + sample_0_or_5_uv_off).rgb;
|
||||
}
|
||||
// Use scanlines 1 and 4 for a total of 4 scanlines:
|
||||
else if(beam_num_scanlines > 3.5)
|
||||
{
|
||||
scanline1_color =
|
||||
tex2D_linearize(Source, scanline_uv - v_step).rgb;
|
||||
scanline4_color =
|
||||
tex2D_linearize(Source, scanline_uv + 2.0 * v_step).rgb;
|
||||
}
|
||||
// Use scanline 1 or 4 for a total of 3 scanlines:
|
||||
else if(beam_num_scanlines > 2.5)
|
||||
{
|
||||
// dist is in [0, 1]
|
||||
dist_round = round(dist);
|
||||
const vec2 sample_1or4_uv_off =
|
||||
mix(-v_step, 2.0 * v_step, dist_round);
|
||||
scanline_outside_color = tex2D_linearize(
|
||||
Source, scanline_uv + sample_1or4_uv_off).rgb;
|
||||
}
|
||||
|
||||
// Compute scanline contributions, accounting for vertical convergence.
|
||||
// Vertical convergence offsets are in units of current-field scanlines.
|
||||
// dist2 means "positive sample distance from scanline 2, in scanlines:"
|
||||
vec3 dist2 = vec3(dist);
|
||||
if(beam_misconvergence)
|
||||
{
|
||||
const vec3 convergence_offsets_vert_rgb =
|
||||
get_convergence_offsets_y_vector();
|
||||
dist2 = vec3(dist) - convergence_offsets_vert_rgb;
|
||||
}
|
||||
// Calculate {sigma, shape}_range outside of scanline_contrib so it's only
|
||||
// done once per pixel (not 6 times) with runtime params. Don't reuse the
|
||||
// vertex shader calculations, so static versions can be constant-folded.
|
||||
const float sigma_range = max(params.beam_max_sigma, params.beam_min_sigma) -
|
||||
params.beam_min_sigma;
|
||||
const float shape_range = max(params.beam_max_shape, params.beam_min_shape) -
|
||||
params.beam_min_shape;
|
||||
// Calculate and sum final scanline contributions, starting with lines 2/3.
|
||||
// There is no normalization step, because we're not interpolating a
|
||||
// continuous signal. Instead, each scanline is an additive light source.
|
||||
const vec3 scanline2_contrib = scanline_contrib(dist2,
|
||||
scanline2_color, ph, sigma_range, shape_range);
|
||||
const vec3 scanline3_contrib = scanline_contrib(abs(vec3(1.0) - dist2),
|
||||
scanline3_color, ph, sigma_range, shape_range);
|
||||
vec3 scanline_intensity = scanline2_contrib + scanline3_contrib;
|
||||
|
||||
if(beam_num_scanlines > 5.5)
|
||||
{
|
||||
vec3 scanline0_contrib =
|
||||
scanline_contrib(dist2 + vec3(2.0), scanline0_color,
|
||||
ph, sigma_range, shape_range);
|
||||
vec3 scanline1_contrib =
|
||||
scanline_contrib(dist2 + vec3(1.0), scanline1_color,
|
||||
ph, sigma_range, shape_range);
|
||||
vec3 scanline4_contrib =
|
||||
scanline_contrib(abs(vec3(2.0) - dist2), scanline4_color,
|
||||
ph, sigma_range, shape_range);
|
||||
vec3 scanline5_contrib =
|
||||
scanline_contrib(abs(vec3(3.0) - dist2), scanline5_color,
|
||||
ph, sigma_range, shape_range);
|
||||
scanline_intensity += scanline0_contrib + scanline1_contrib +
|
||||
scanline4_contrib + scanline5_contrib;
|
||||
}
|
||||
else if(beam_num_scanlines > 4.5)
|
||||
{
|
||||
vec3 scanline1_contrib =
|
||||
scanline_contrib(dist2 + vec3(1.0), scanline1_color,
|
||||
ph, sigma_range, shape_range);
|
||||
vec3 scanline4_contrib =
|
||||
scanline_contrib(abs(vec3(2.0) - dist2), scanline4_color,
|
||||
ph, sigma_range, shape_range);
|
||||
vec3 dist0or5 = mix(
|
||||
dist2 + vec3(2.0), vec3(3.0) - dist2, dist_round);
|
||||
vec3 scanline0or5_contrib = scanline_contrib(
|
||||
dist0or5, scanline_outside_color, ph, sigma_range, shape_range);
|
||||
scanline_intensity += scanline1_contrib + scanline4_contrib +
|
||||
scanline0or5_contrib;
|
||||
}
|
||||
else if(beam_num_scanlines > 3.5)
|
||||
{
|
||||
vec3 scanline1_contrib =
|
||||
scanline_contrib(dist2 + vec3(1.0), scanline1_color,
|
||||
ph, sigma_range, shape_range);
|
||||
vec3 scanline4_contrib =
|
||||
scanline_contrib(abs(vec3(2.0) - dist2), scanline4_color,
|
||||
ph, sigma_range, shape_range);
|
||||
scanline_intensity += scanline1_contrib + scanline4_contrib;
|
||||
}
|
||||
else if(beam_num_scanlines > 2.5)
|
||||
{
|
||||
vec3 dist1or4 = mix(
|
||||
dist2 + vec3(1.0), vec3(2.0) - dist2, dist_round);
|
||||
vec3 scanline1or4_contrib = scanline_contrib(
|
||||
dist1or4, scanline_outside_color, ph, sigma_range, shape_range);
|
||||
scanline_intensity += scanline1or4_contrib;
|
||||
}
|
||||
|
||||
// Auto-dim the image to avoid clipping, encode if necessary, and output.
|
||||
// My original idea was to compute a minimal auto-dim factor and put it in
|
||||
// the alpha channel, but it wasn't working, at least not reliably. This
|
||||
// is faster anyway, levels_autodim_temp = 0.5 isn't causing banding.
|
||||
FragColor = vec4(encode_output(vec4(scanline_intensity * levels_autodim_temp, 1.0)));
|
||||
}
|
547
crt/shaders/crt-royale/src/gamma-management-old.h
Normal file
547
crt/shaders/crt-royale/src/gamma-management-old.h
Normal file
|
@ -0,0 +1,547 @@
|
|||
#ifndef GAMMA_MANAGEMENT_H
|
||||
#define GAMMA_MANAGEMENT_H
|
||||
|
||||
///////////////////////////////// MIT LICENSE ////////////////////////////////
|
||||
|
||||
// Copyright (C) 2014 TroggleMonkey
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to
|
||||
// deal in the Software without restriction, including without limitation the
|
||||
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
// sell copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
// IN THE SOFTWARE.
|
||||
|
||||
///////////////////////////////// DESCRIPTION ////////////////////////////////
|
||||
|
||||
// This file provides gamma-aware tex*D*() and encode_output() functions.
|
||||
// Requires: Before #include-ing this file, the including file must #define
|
||||
// the following macros when applicable and follow their rules:
|
||||
// 1.) #define FIRST_PASS if this is the first pass.
|
||||
// 2.) #define LAST_PASS if this is the last pass.
|
||||
// 3.) If sRGB is available, set srgb_framebufferN = "true" for
|
||||
// every pass except the last in your .cgp preset.
|
||||
// 4.) If sRGB isn't available but you want gamma-correctness with
|
||||
// no banding, #define GAMMA_ENCODE_EVERY_FBO each pass.
|
||||
// 5.) #define SIMULATE_CRT_ON_LCD if desired (precedence over 5-7)
|
||||
// 6.) #define SIMULATE_GBA_ON_LCD if desired (precedence over 6-7)
|
||||
// 7.) #define SIMULATE_LCD_ON_CRT if desired (precedence over 7)
|
||||
// 8.) #define SIMULATE_GBA_ON_CRT if desired (precedence over -)
|
||||
// If an option in [5, 8] is #defined in the first or last pass, it
|
||||
// should be #defined for both. It shouldn't make a difference
|
||||
// whether it's #defined for intermediate passes or not.
|
||||
// Optional: The including file (or an earlier included file) may optionally
|
||||
// #define a number of macros indicating it will override certain
|
||||
// macros and associated constants are as follows:
|
||||
// static constants with either static or uniform constants. The
|
||||
// 1.) OVERRIDE_STANDARD_GAMMA: The user must first define:
|
||||
// static const float ntsc_gamma
|
||||
// static const float pal_gamma
|
||||
// static const float crt_reference_gamma_high
|
||||
// static const float crt_reference_gamma_low
|
||||
// static const float lcd_reference_gamma
|
||||
// static const float crt_office_gamma
|
||||
// static const float lcd_office_gamma
|
||||
// 2.) OVERRIDE_DEVICE_GAMMA: The user must first define:
|
||||
// static const float crt_gamma
|
||||
// static const float gba_gamma
|
||||
// static const float lcd_gamma
|
||||
// 3.) OVERRIDE_FINAL_GAMMA: The user must first define:
|
||||
// static const float input_gamma
|
||||
// static const float intermediate_gamma
|
||||
// static const float output_gamma
|
||||
// (intermediate_gamma is for GAMMA_ENCODE_EVERY_FBO.)
|
||||
// 4.) OVERRIDE_ALPHA_ASSUMPTIONS: The user must first define:
|
||||
// static const bool assume_opaque_alpha
|
||||
// The gamma constant overrides must be used in every pass or none,
|
||||
// and OVERRIDE_FINAL_GAMMA bypasses all of the SIMULATE* macros.
|
||||
// OVERRIDE_ALPHA_ASSUMPTIONS may be set on a per-pass basis.
|
||||
// Usage: After setting macros appropriately, ignore gamma correction and
|
||||
// replace all tex*D*() calls with equivalent gamma-aware
|
||||
// tex*D*_linearize calls, except:
|
||||
// 1.) When you read an LUT, use regular tex*D or a gamma-specified
|
||||
// function, depending on its gamma encoding:
|
||||
// tex*D*_linearize_gamma (takes a runtime gamma parameter)
|
||||
// 2.) If you must read pass0's original input in a later pass, use
|
||||
// tex2D_linearize_ntsc_gamma. If you want to read pass0's
|
||||
// input with gamma-corrected bilinear filtering, consider
|
||||
// creating a first linearizing pass and reading from the input
|
||||
// of pass1 later.
|
||||
// Then, return encode_output(color) from every fragment shader.
|
||||
// Finally, use the global gamma_aware_bilinear boolean if you want
|
||||
// to statically branch based on whether bilinear filtering is
|
||||
// gamma-correct or not (e.g. for placing Gaussian blur samples).
|
||||
//
|
||||
// Detailed Policy:
|
||||
// tex*D*_linearize() functions enforce a consistent gamma-management policy
|
||||
// based on the FIRST_PASS and GAMMA_ENCODE_EVERY_FBO settings. They assume
|
||||
// their input texture has the same encoding characteristics as the input for
|
||||
// the current pass (which doesn't apply to the exceptions listed above).
|
||||
// Similarly, encode_output() enforces a policy based on the LAST_PASS and
|
||||
// GAMMA_ENCODE_EVERY_FBO settings. Together, they result in one of the
|
||||
// following two pipelines.
|
||||
// Typical pipeline with intermediate sRGB framebuffers:
|
||||
// linear_color = pow(pass0_encoded_color, input_gamma);
|
||||
// intermediate_output = linear_color; // Automatic sRGB encoding
|
||||
// linear_color = intermediate_output; // Automatic sRGB decoding
|
||||
// final_output = pow(intermediate_output, 1.0/output_gamma);
|
||||
// Typical pipeline without intermediate sRGB framebuffers:
|
||||
// linear_color = pow(pass0_encoded_color, input_gamma);
|
||||
// intermediate_output = pow(linear_color, 1.0/intermediate_gamma);
|
||||
// linear_color = pow(intermediate_output, intermediate_gamma);
|
||||
// final_output = pow(intermediate_output, 1.0/output_gamma);
|
||||
// Using GAMMA_ENCODE_EVERY_FBO is much slower, but it's provided as a way to
|
||||
// easily get gamma-correctness without banding on devices where sRGB isn't
|
||||
// supported.
|
||||
//
|
||||
// Use This Header to Maximize Code Reuse:
|
||||
// The purpose of this header is to provide a consistent interface for texture
|
||||
// reads and output gamma-encoding that localizes and abstracts away all the
|
||||
// annoying details. This greatly reduces the amount of code in each shader
|
||||
// pass that depends on the pass number in the .cgp preset or whether sRGB
|
||||
// FBO's are being used: You can trivially change the gamma behavior of your
|
||||
// whole pass by commenting or uncommenting 1-3 #defines. To reuse the same
|
||||
// code in your first, Nth, and last passes, you can even put it all in another
|
||||
// header file and #include it from skeleton .cg files that #define the
|
||||
// appropriate pass-specific settings.
|
||||
//
|
||||
// Rationale for Using Three Macros:
|
||||
// This file uses GAMMA_ENCODE_EVERY_FBO instead of an opposite macro like
|
||||
// SRGB_PIPELINE to ensure sRGB is assumed by default, which hopefully imposes
|
||||
// a lower maintenance burden on each pass. At first glance it seems we could
|
||||
// accomplish everything with two macros: GAMMA_CORRECT_IN / GAMMA_CORRECT_OUT.
|
||||
// This works for simple use cases where input_gamma == output_gamma, but it
|
||||
// breaks down for more complex scenarios like CRT simulation, where the pass
|
||||
// number determines the gamma encoding of the input and output.
|
||||
|
||||
|
||||
/////////////////////////////// BASE CONSTANTS ///////////////////////////////
|
||||
|
||||
// Set standard gamma constants, but allow users to override them:
|
||||
#ifndef OVERRIDE_STANDARD_GAMMA
|
||||
// Standard encoding gammas:
|
||||
const float ntsc_gamma = 2.2; // Best to use NTSC for PAL too?
|
||||
const float pal_gamma = 2.8; // Never actually 2.8 in practice
|
||||
// Typical device decoding gammas (only use for emulating devices):
|
||||
// CRT/LCD reference gammas are higher than NTSC and Rec.709 video standard
|
||||
// gammas: The standards purposely undercorrected for an analog CRT's
|
||||
// assumed 2.5 reference display gamma to maintain contrast in assumed
|
||||
// [dark] viewing conditions: http://www.poynton.com/PDFs/GammaFAQ.pdf
|
||||
// These unstated assumptions about display gamma and perceptual rendering
|
||||
// intent caused a lot of confusion, and more modern CRT's seemed to target
|
||||
// NTSC 2.2 gamma with circuitry. LCD displays seem to have followed suit
|
||||
// (they struggle near black with 2.5 gamma anyway), especially PC/laptop
|
||||
// displays designed to view sRGB in bright environments. (Standards are
|
||||
// also in flux again with BT.1886, but it's underspecified for displays.)
|
||||
const float crt_reference_gamma_high = 2.5; // In (2.35, 2.55)
|
||||
const float crt_reference_gamma_low = 2.35; // In (2.35, 2.55)
|
||||
const float lcd_reference_gamma = 2.5; // To match CRT
|
||||
const float crt_office_gamma = 2.2; // Circuitry-adjusted for NTSC
|
||||
const float lcd_office_gamma = 2.2; // Approximates sRGB
|
||||
#endif // OVERRIDE_STANDARD_GAMMA
|
||||
|
||||
// Assuming alpha == 1.0 might make it easier for users to avoid some bugs,
|
||||
// but only if they're aware of it.
|
||||
#ifndef OVERRIDE_ALPHA_ASSUMPTIONS
|
||||
const bool assume_opaque_alpha = false;
|
||||
#endif
|
||||
|
||||
|
||||
/////////////////////// DERIVED CONSTANTS AS FUNCTIONS ///////////////////////
|
||||
|
||||
// gamma-management.h should be compatible with overriding gamma values with
|
||||
// runtime user parameters, but we can only define other global constants in
|
||||
// terms of static constants, not uniform user parameters. To get around this
|
||||
// limitation, we need to define derived constants using functions.
|
||||
|
||||
// Set device gamma constants, but allow users to override them:
|
||||
#ifdef OVERRIDE_DEVICE_GAMMA
|
||||
// The user promises to globally define the appropriate constants:
|
||||
float get_crt_gamma() { return crt_gamma; }
|
||||
float get_gba_gamma() { return gba_gamma; }
|
||||
float get_lcd_gamma() { return lcd_gamma; }
|
||||
#else
|
||||
float get_crt_gamma() { return crt_reference_gamma_high; }
|
||||
float get_gba_gamma() { return 3.5; } // Game Boy Advance; in (3.0, 4.0)
|
||||
float get_lcd_gamma() { return lcd_office_gamma; }
|
||||
#endif // OVERRIDE_DEVICE_GAMMA
|
||||
|
||||
// Set decoding/encoding gammas for the first/lass passes, but allow overrides:
|
||||
#ifdef OVERRIDE_FINAL_GAMMA
|
||||
// The user promises to globally define the appropriate constants:
|
||||
float get_intermediate_gamma() { return intermediate_gamma; }
|
||||
float get_input_gamma() { return input_gamma; }
|
||||
float get_output_gamma() { return output_gamma; }
|
||||
#else
|
||||
// If we gamma-correct every pass, always use ntsc_gamma between passes to
|
||||
// ensure middle passes don't need to care if anything is being simulated:
|
||||
float get_intermediate_gamma() { return ntsc_gamma; }
|
||||
#ifdef SIMULATE_CRT_ON_LCD
|
||||
float get_input_gamma() { return get_crt_gamma(); }
|
||||
float get_output_gamma() { return get_lcd_gamma(); }
|
||||
#else
|
||||
#ifdef SIMULATE_GBA_ON_LCD
|
||||
float get_input_gamma() { return get_gba_gamma(); }
|
||||
float get_output_gamma() { return get_lcd_gamma(); }
|
||||
#else
|
||||
#ifdef SIMULATE_LCD_ON_CRT
|
||||
float get_input_gamma() { return get_lcd_gamma(); }
|
||||
float get_output_gamma() { return get_crt_gamma(); }
|
||||
#else
|
||||
#ifdef SIMULATE_GBA_ON_CRT
|
||||
float get_input_gamma() { return get_gba_gamma(); }
|
||||
float get_output_gamma() { return get_crt_gamma(); }
|
||||
#else // Don't simulate anything:
|
||||
float get_input_gamma() { return ntsc_gamma; }
|
||||
float get_output_gamma() { return ntsc_gamma; }
|
||||
#endif // SIMULATE_GBA_ON_CRT
|
||||
#endif // SIMULATE_LCD_ON_CRT
|
||||
#endif // SIMULATE_GBA_ON_LCD
|
||||
#endif // SIMULATE_CRT_ON_LCD
|
||||
#endif // OVERRIDE_FINAL_GAMMA
|
||||
|
||||
// Set decoding/encoding gammas for the current pass. Use static constants for
|
||||
// linearize_input and gamma_encode_output, because they aren't derived, and
|
||||
// they let the compiler do dead-code elimination.
|
||||
#ifndef GAMMA_ENCODE_EVERY_FBO
|
||||
#ifdef FIRST_PASS
|
||||
const bool linearize_input = true;
|
||||
float get_pass_input_gamma() { return get_input_gamma(); }
|
||||
#else
|
||||
const bool linearize_input = false;
|
||||
float get_pass_input_gamma() { return 1.0; }
|
||||
#endif
|
||||
#ifdef LAST_PASS
|
||||
const bool gamma_encode_output = true;
|
||||
float get_pass_output_gamma() { return get_output_gamma(); }
|
||||
#else
|
||||
const bool gamma_encode_output = false;
|
||||
float get_pass_output_gamma() { return 1.0; }
|
||||
#endif
|
||||
#else
|
||||
const bool linearize_input = true;
|
||||
const bool gamma_encode_output = true;
|
||||
#ifdef FIRST_PASS
|
||||
float get_pass_input_gamma() { return get_input_gamma(); }
|
||||
#else
|
||||
float get_pass_input_gamma() { return get_intermediate_gamma(); }
|
||||
#endif
|
||||
#ifdef LAST_PASS
|
||||
float get_pass_output_gamma() { return get_output_gamma(); }
|
||||
#else
|
||||
float get_pass_output_gamma() { return get_intermediate_gamma(); }
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// Users might want to know if bilinear filtering will be gamma-correct:
|
||||
const bool gamma_aware_bilinear = !linearize_input;
|
||||
|
||||
|
||||
////////////////////// COLOR ENCODING/DECODING FUNCTIONS /////////////////////
|
||||
|
||||
vec4 encode_output(const vec4 color)
|
||||
{
|
||||
if(gamma_encode_output)
|
||||
{
|
||||
if(assume_opaque_alpha)
|
||||
{
|
||||
return vec4(pow(color.rgb, vec3(1.0/get_pass_output_gamma())), 1.0);
|
||||
}
|
||||
else
|
||||
{
|
||||
return vec4(pow(color.rgb, vec3(1.0/get_pass_output_gamma())), color.a);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
return color;
|
||||
}
|
||||
}
|
||||
|
||||
vec4 decode_input(const vec4 color)
|
||||
{
|
||||
if(linearize_input)
|
||||
{
|
||||
if(assume_opaque_alpha)
|
||||
{
|
||||
return vec4(pow(color.rgb, vec3(get_pass_input_gamma())), 1.0);
|
||||
}
|
||||
else
|
||||
{
|
||||
return vec4(pow(color.rgb, vec3(get_pass_input_gamma())), color.a);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
return color;
|
||||
}
|
||||
}
|
||||
|
||||
vec4 decode_gamma_input(const vec4 color, const vec3 gamma)
|
||||
{
|
||||
if(assume_opaque_alpha)
|
||||
{
|
||||
return vec4(pow(color.rgb, vec3(gamma)), 1.0);
|
||||
}
|
||||
else
|
||||
{
|
||||
return vec4(pow(color.rgb, vec3(gamma)), color.a);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/////////////////////////// TEXTURE LOOKUP WRAPPERS //////////////////////////
|
||||
|
||||
// "SMART" LINEARIZING TEXTURE LOOKUP FUNCTIONS:
|
||||
// Provide a wide array of linearizing texture lookup wrapper functions. The
|
||||
// Cg shader spec Retroarch uses only allows for 2D textures, but 1D and 3D
|
||||
// lookups are provided for completeness in case that changes someday. Nobody
|
||||
// is likely to use the *fetch and *proj functions, but they're included just
|
||||
// in case. The only tex*D texture sampling functions omitted are:
|
||||
// - tex*Dcmpbias
|
||||
// - tex*Dcmplod
|
||||
// - tex*DARRAY*
|
||||
// - tex*DMS*
|
||||
// - Variants returning integers
|
||||
// Standard line length restrictions are ignored below for vertical brevity.
|
||||
|
||||
/*
|
||||
// tex1D:
|
||||
vec4 tex1D_linearize(const sampler1D texture, const float tex_coords)
|
||||
{ return decode_input(tex1D(texture, tex_coords)); }
|
||||
|
||||
vec4 tex1D_linearize(const sampler1D texture, const vec2 tex_coords)
|
||||
{ return decode_input(tex1D(texture, tex_coords)); }
|
||||
|
||||
vec4 tex1D_linearize(const sampler1D texture, const float tex_coords, const int texel_off)
|
||||
{ return decode_input(tex1D(texture, tex_coords, texel_off)); }
|
||||
|
||||
vec4 tex1D_linearize(const sampler1D texture, const vec2 tex_coords, const int texel_off)
|
||||
{ return decode_input(tex1D(texture, tex_coords, texel_off)); }
|
||||
|
||||
vec4 tex1D_linearize(const sampler1D texture, const float tex_coords, const float dx, const float dy)
|
||||
{ return decode_input(tex1D(texture, tex_coords, dx, dy)); }
|
||||
|
||||
vec4 tex1D_linearize(const sampler1D texture, const vec2 tex_coords, const float dx, const float dy)
|
||||
{ return decode_input(tex1D(texture, tex_coords, dx, dy)); }
|
||||
|
||||
vec4 tex1D_linearize(const sampler1D texture, const float tex_coords, const float dx, const float dy, const int texel_off)
|
||||
{ return decode_input(tex1D(texture, tex_coords, dx, dy, texel_off)); }
|
||||
|
||||
vec4 tex1D_linearize(const sampler1D texture, const vec2 tex_coords, const float dx, const float dy, const int texel_off)
|
||||
{ return decode_input(tex1D(texture, tex_coords, dx, dy, texel_off)); }
|
||||
|
||||
// tex1Dbias:
|
||||
vec4 tex1Dbias_linearize(const sampler1D texture, const vec4 tex_coords)
|
||||
{ return decode_input(tex1Dbias(texture, tex_coords)); }
|
||||
|
||||
vec4 tex1Dbias_linearize(const sampler1D texture, const vec4 tex_coords, const int texel_off)
|
||||
{ return decode_input(tex1Dbias(texture, tex_coords, texel_off)); }
|
||||
|
||||
// tex1Dfetch:
|
||||
vec4 tex1Dfetch_linearize(const sampler1D texture, const int4 tex_coords)
|
||||
{ return decode_input(tex1Dfetch(texture, tex_coords)); }
|
||||
|
||||
vec4 tex1Dfetch_linearize(const sampler1D texture, const int4 tex_coords, const int texel_off)
|
||||
{ return decode_input(tex1Dfetch(texture, tex_coords, texel_off)); }
|
||||
|
||||
// tex1Dlod:
|
||||
vec4 tex1Dlod_linearize(const sampler1D texture, const vec4 tex_coords)
|
||||
{ return decode_input(tex1Dlod(texture, tex_coords)); }
|
||||
|
||||
vec4 tex1Dlod_linearize(const sampler1D texture, const vec4 tex_coords, const int texel_off)
|
||||
{ return decode_input(tex1Dlod(texture, tex_coords, texel_off)); }
|
||||
|
||||
// tex1Dproj:
|
||||
vec4 tex1Dproj_linearize(const sampler1D texture, const vec2 tex_coords)
|
||||
{ return decode_input(tex1Dproj(texture, tex_coords)); }
|
||||
|
||||
vec4 tex1Dproj_linearize(const sampler1D texture, const vec3 tex_coords)
|
||||
{ return decode_input(tex1Dproj(texture, tex_coords)); }
|
||||
|
||||
vec4 tex1Dproj_linearize(const sampler1D texture, const vec2 tex_coords, const int texel_off)
|
||||
{ return decode_input(tex1Dproj(texture, tex_coords, texel_off)); }
|
||||
|
||||
vec4 tex1Dproj_linearize(const sampler1D texture, const vec3 tex_coords, const int texel_off)
|
||||
{ return decode_input(tex1Dproj(texture, tex_coords, texel_off)); }
|
||||
*/
|
||||
|
||||
// tex2D:
|
||||
vec4 tex2D_linearize(const sampler2D tex, const vec2 tex_coords)
|
||||
{ return decode_input(vec4(texture(tex, tex_coords))); }
|
||||
|
||||
vec4 tex2D_linearize(const sampler2D tex, const vec3 tex_coords)
|
||||
{ return decode_input(vec4(texture(tex, tex_coords))); }
|
||||
|
||||
vec4 tex2D_linearize(const sampler2D tex, const vec2 tex_coords, const int texel_off)
|
||||
{ return decode_input(vec4(texture(tex, tex_coords, texel_off))); }
|
||||
|
||||
vec4 tex2D_linearize(const sampler2D tex, const vec3 tex_coords, const int texel_off)
|
||||
{ return decode_input(vec4(texture(tex, tex_coords, texel_off))); }
|
||||
|
||||
vec4 tex2D_linearize(const sampler2D tex, const vec2 tex_coords, const vec2 dx, const vec2 dy)
|
||||
{ return decode_input(vec4(texture(tex, tex_coords, dx, dy))); }
|
||||
|
||||
vec4 tex2D_linearize(const sampler2D tex, const vec3 tex_coords, const vec2 dx, const vec2 dy)
|
||||
{ return decode_input(vec4(texture(tex, tex_coords, dx, dy))); }
|
||||
|
||||
vec4 tex2D_linearize(const sampler2D tex, const vec2 tex_coords, const vec2 dx, const vec2 dy, const int texel_off)
|
||||
{ return decode_input(vec4(texture(tex, tex_coords, dx, dy, texel_off))); }
|
||||
|
||||
vec4 tex2D_linearize(const sampler2D tex, const vec3 tex_coords, const vec2 dx, const vec2 dy, const int texel_off)
|
||||
{ return decode_input(vec4(texture(tex, tex_coords, dx, dy, texel_off))); }
|
||||
|
||||
// tex2Dbias:
|
||||
vec4 tex2Dbias_linearize(const sampler2D tex, const vec4 tex_coords)
|
||||
{ return decode_input(vec4(tex2Dbias(tex, tex_coords))); }
|
||||
|
||||
vec4 tex2Dbias_linearize(const sampler2D tex, const vec4 tex_coords, const int texel_off)
|
||||
{ return decode_input(vec4(tex2Dbias(tex, tex_coords, texel_off))); }
|
||||
|
||||
// tex2Dfetch:
|
||||
vec4 tex2Dfetch_linearize(const sampler2D tex, const ivec4 tex_coords)
|
||||
{ return decode_input(vec4(texture2Dfetch(tex, tex_coords))); }
|
||||
|
||||
vec4 tex2Dfetch_linearize(const sampler2D tex, const ivec4 tex_coords, const int texel_off)
|
||||
{ return decode_input(vec4(texture2Dfetch(tex, tex_coords, texel_off))); }
|
||||
|
||||
// tex2Dlod:
|
||||
vec4 tex2Dlod_linearize(const sampler2D tex, const vec4 tex_coords)
|
||||
{ return decode_input(vec4(texture2Dlod(tex, tex_coords))); }
|
||||
|
||||
vec4 tex2Dlod_linearize(const sampler2D tex, const vec4 tex_coords, const int texel_off)
|
||||
{ return decode_input(vec4(texture2Dlod(tex, tex_coords, texel_off))); }
|
||||
|
||||
// tex2Dproj:
|
||||
vec4 tex2Dproj_linearize(const sampler2D tex, const vec3 tex_coords)
|
||||
{ return decode_input(vec4(tex2Dproj(tex, tex_coords))); }
|
||||
|
||||
vec4 tex2Dproj_linearize(const sampler2D tex, const vec4 tex_coords)
|
||||
{ return decode_input(vec4(tex2Dproj(tex, tex_coords))); }
|
||||
|
||||
vec4 tex2Dproj_linearize(const sampler2D tex, const vec3 tex_coords, const int texel_off)
|
||||
{ return decode_input(vec4(tex2Dproj(tex, tex_coords, texel_off))); }
|
||||
|
||||
vec4 tex2Dproj_linearize(const sampler2D tex, const vec4 tex_coords, const int texel_off)
|
||||
{ return decode_input(vec4(tex2Dproj(tex, tex_coords, texel_off))); }
|
||||
|
||||
/*
|
||||
// tex3D:
|
||||
vec4 tex3D_linearize(const sampler3D texture, const vec3 tex_coords)
|
||||
{ return decode_input(tex3D(texture, tex_coords)); }
|
||||
|
||||
vec4 tex3D_linearize(const sampler3D texture, const vec3 tex_coords, const int texel_off)
|
||||
{ return decode_input(tex3D(texture, tex_coords, texel_off)); }
|
||||
|
||||
vec4 tex3D_linearize(const sampler3D texture, const vec3 tex_coords, const vec3 dx, const vec3 dy)
|
||||
{ return decode_input(tex3D(texture, tex_coords, dx, dy)); }
|
||||
|
||||
vec4 tex3D_linearize(const sampler3D texture, const vec3 tex_coords, const vec3 dx, const vec3 dy, const int texel_off)
|
||||
{ return decode_input(tex3D(texture, tex_coords, dx, dy, texel_off)); }
|
||||
|
||||
// tex3Dbias:
|
||||
vec4 tex3Dbias_linearize(const sampler3D texture, const vec4 tex_coords)
|
||||
{ return decode_input(tex3Dbias(texture, tex_coords)); }
|
||||
|
||||
vec4 tex3Dbias_linearize(const sampler3D texture, const vec4 tex_coords, const int texel_off)
|
||||
{ return decode_input(tex3Dbias(texture, tex_coords, texel_off)); }
|
||||
|
||||
// tex3Dfetch:
|
||||
vec4 tex3Dfetch_linearize(const sampler3D texture, const int4 tex_coords)
|
||||
{ return decode_input(tex3Dfetch(texture, tex_coords)); }
|
||||
|
||||
vec4 tex3Dfetch_linearize(const sampler3D texture, const int4 tex_coords, const int texel_off)
|
||||
{ return decode_input(tex3Dfetch(texture, tex_coords, texel_off)); }
|
||||
|
||||
// tex3Dlod:
|
||||
vec4 tex3Dlod_linearize(const sampler3D texture, const vec4 tex_coords)
|
||||
{ return decode_input(tex3Dlod(texture, tex_coords)); }
|
||||
|
||||
vec4 tex3Dlod_linearize(const sampler3D texture, const vec4 tex_coords, const int texel_off)
|
||||
{ return decode_input(tex3Dlod(texture, tex_coords, texel_off)); }
|
||||
|
||||
// tex3Dproj:
|
||||
vec4 tex3Dproj_linearize(const sampler3D texture, const vec4 tex_coords)
|
||||
{ return decode_input(tex3Dproj(texture, tex_coords)); }
|
||||
|
||||
vec4 tex3Dproj_linearize(const sampler3D texture, const vec4 tex_coords, const int texel_off)
|
||||
{ return decode_input(tex3Dproj(texture, tex_coords, texel_off)); }
|
||||
*/
|
||||
|
||||
|
||||
// NONSTANDARD "SMART" LINEARIZING TEXTURE LOOKUP FUNCTIONS:
|
||||
// This narrow selection of nonstandard tex2D* functions can be useful:
|
||||
|
||||
// tex2Dlod0: Automatically fill in the tex2D LOD parameter for mip level 0.
|
||||
vec4 tex2Dlod0_linearize(const sampler2D texture, const vec2 tex_coords)
|
||||
{ return decode_input(vec4(texture2Dlod(texture, vec4(tex_coords, 0.0, 0.0)))); }
|
||||
|
||||
vec4 tex2Dlod0_linearize(const sampler2D texture, const vec2 tex_coords, const int texel_off)
|
||||
{ return decode_input(vec4(texture2Dlod(texture, vec4(tex_coords, 0.0, 0.0), texel_off))); }
|
||||
|
||||
|
||||
// MANUALLY LINEARIZING TEXTURE LOOKUP FUNCTIONS:
|
||||
// Provide a narrower selection of tex2D* wrapper functions that decode an
|
||||
// input sample with a specified gamma value. These are useful for reading
|
||||
// LUT's and for reading the input of pass0 in a later pass.
|
||||
|
||||
// tex2D:
|
||||
vec4 tex2D_linearize_gamma(const sampler2D tex, const vec2 tex_coords, const vec3 gamma)
|
||||
{ return decode_gamma_input(vec4(texture(tex, tex_coords), vec3(gamma))); }
|
||||
|
||||
vec4 tex2D_linearize_gamma(const sampler2D tex, const vec3 tex_coords, const vec3 gamma)
|
||||
{ return decode_gamma_input(vec4(texture(tex, tex_coords), vec3(gamma))); }
|
||||
|
||||
vec4 tex2D_linearize_gamma(const sampler2D tex, const vec2 tex_coords, const int texel_off, const vec3 gamma)
|
||||
{ return decode_gamma_input(vec4(texture(tex, tex_coords, texel_off), vec3(gamma))); }
|
||||
|
||||
vec4 tex2D_linearize_gamma(const sampler2D tex, const vec3 tex_coords, const int texel_off, const vec3 gamma)
|
||||
{ return decode_gamma_input(vec4(texture(tex, tex_coords, texel_off), vec3(gamma))); }
|
||||
|
||||
vec4 tex2D_linearize_gamma(const sampler2D tex, const vec2 tex_coords, const vec2 dx, const vec2 dy, const vec3 gamma)
|
||||
{ return decode_gamma_input(vec4(texture(tex, tex_coords, dx, dy), vec3(gamma))); }
|
||||
|
||||
vec4 tex2D_linearize_gamma(const sampler2D tex, const vec3 tex_coords, const vec2 dx, const vec2 dy, const vec3 gamma)
|
||||
{ return decode_gamma_input(vec4(texture(tex, tex_coords, dx, dy), vec3(gamma))); }
|
||||
|
||||
vec4 tex2D_linearize_gamma(const sampler2D tex, const vec2 tex_coords, const vec2 dx, const vec2 dy, const int texel_off, const vec3 gamma)
|
||||
{ return decode_gamma_input(vec4(texture(tex, tex_coords, dx, dy, texel_off), vec3(gamma))); }
|
||||
|
||||
vec4 tex2D_linearize_gamma(const sampler2D tex, const vec3 tex_coords, const vec2 dx, const vec2 dy, const int texel_off, const vec3 gamma)
|
||||
{ return decode_gamma_input(vec4(texture(tex, tex_coords, dx, dy, texel_off), vec3(gamma))); }
|
||||
|
||||
// tex2Dbias:
|
||||
vec4 tex2Dbias_linearize_gamma(const sampler2D tex, const vec4 tex_coords, const vec3 gamma)
|
||||
{ return decode_gamma_input(vec4(tex2Dbias(tex, tex_coords), vec3(gamma))); }
|
||||
|
||||
vec4 tex2Dbias_linearize_gamma(const sampler2D tex, const vec4 tex_coords, const int texel_off, const vec3 gamma)
|
||||
{ return decode_gamma_input(vec4(tex2Dbias(tex, tex_coords, texel_off), vec3(gamma))); }
|
||||
|
||||
// tex2Dfetch:
|
||||
vec4 tex2Dfetch_linearize_gamma(const sampler2D tex, const int4 tex_coords, const vec3 gamma)
|
||||
{ return decode_gamma_input(vec4(tex2Dfetch(tex, tex_coords), vec3(gamma))); }
|
||||
|
||||
vec4 tex2Dfetch_linearize_gamma(const sampler2D tex, const int4 tex_coords, const int texel_off, const vec3 gamma)
|
||||
{ return decode_gamma_input(vec4(tex2Dfetch(tex, tex_coords, texel_off), vec3(gamma))); }
|
||||
|
||||
// tex2Dlod:
|
||||
vec4 tex2Dlod_linearize_gamma(const sampler2D tex, const vec4 tex_coords, const vec3 gamma)
|
||||
{ return decode_gamma_input(vec4(tex2Dlod(tex, tex_coords), vec3(gamma))); }
|
||||
|
||||
vec4 tex2Dlod_linearize_gamma(const sampler2D tex, const vec4 tex_coords, const int texel_off, const vec3 gamma)
|
||||
{ return decode_gamma_input(vec4(tex2Dlod(tex, tex_coords, texel_off), vec3(gamma))); }
|
||||
|
||||
|
||||
#endif // GAMMA_MANAGEMENT_H
|
||||
|
160
crt/shaders/crt-royale/src/gamma-management.h
Normal file
160
crt/shaders/crt-royale/src/gamma-management.h
Normal file
|
@ -0,0 +1,160 @@
|
|||
/////////////////////////////// BASE CONSTANTS ///////////////////////////////
|
||||
|
||||
// Set standard gamma constants, but allow users to override them:
|
||||
#ifndef OVERRIDE_STANDARD_GAMMA
|
||||
// Standard encoding gammas:
|
||||
const float ntsc_gamma = 2.2; // Best to use NTSC for PAL too?
|
||||
const float pal_gamma = 2.8; // Never actually 2.8 in practice
|
||||
// Typical device decoding gammas (only use for emulating devices):
|
||||
// CRT/LCD reference gammas are higher than NTSC and Rec.709 video standard
|
||||
// gammas: The standards purposely undercorrected for an analog CRT's
|
||||
// assumed 2.5 reference display gamma to maintain contrast in assumed
|
||||
// [dark] viewing conditions: http://www.poynton.com/PDFs/GammaFAQ.pdf
|
||||
// These unstated assumptions about display gamma and perceptual rendering
|
||||
// intent caused a lot of confusion, and more modern CRT's seemed to target
|
||||
// NTSC 2.2 gamma with circuitry. LCD displays seem to have followed suit
|
||||
// (they struggle near black with 2.5 gamma anyway), especially PC/laptop
|
||||
// displays designed to view sRGB in bright environments. (Standards are
|
||||
// also in flux again with BT.1886, but it's underspecified for displays.)
|
||||
const float crt_reference_gamma_high = 2.5; // In (2.35, 2.55)
|
||||
const float crt_reference_gamma_low = 2.35; // In (2.35, 2.55)
|
||||
const float lcd_reference_gamma = 2.5; // To match CRT
|
||||
const float crt_office_gamma = 2.2; // Circuitry-adjusted for NTSC
|
||||
const float lcd_office_gamma = 2.2; // Approximates sRGB
|
||||
#endif // OVERRIDE_STANDARD_GAMMA
|
||||
|
||||
// Assuming alpha == 1.0 might make it easier for users to avoid some bugs,
|
||||
// but only if they're aware of it.
|
||||
#ifndef OVERRIDE_ALPHA_ASSUMPTIONS
|
||||
const bool assume_opaque_alpha = false;
|
||||
#endif
|
||||
|
||||
|
||||
/////////////////////// DERIVED CONSTANTS AS FUNCTIONS ///////////////////////
|
||||
|
||||
// gamma-management.h should be compatible with overriding gamma values with
|
||||
// runtime user parameters, but we can only define other global constants in
|
||||
// terms of static constants, not uniform user parameters. To get around this
|
||||
// limitation, we need to define derived constants using functions.
|
||||
|
||||
// Set device gamma constants, but allow users to override them:
|
||||
#ifdef OVERRIDE_DEVICE_GAMMA
|
||||
// The user promises to globally define the appropriate constants:
|
||||
float get_crt_gamma() { return crt_gamma; }
|
||||
float get_gba_gamma() { return gba_gamma; }
|
||||
float get_lcd_gamma() { return lcd_gamma; }
|
||||
#else
|
||||
float get_crt_gamma() { return crt_reference_gamma_high; }
|
||||
float get_gba_gamma() { return 3.5; } // Game Boy Advance; in (3.0, 4.0)
|
||||
float get_lcd_gamma() { return lcd_office_gamma; }
|
||||
#endif // OVERRIDE_DEVICE_GAMMA
|
||||
|
||||
// Set decoding/encoding gammas for the first/lass passes, but allow overrides:
|
||||
#ifdef OVERRIDE_FINAL_GAMMA
|
||||
// The user promises to globally define the appropriate constants:
|
||||
float get_intermediate_gamma() { return intermediate_gamma; }
|
||||
float get_input_gamma() { return input_gamma; }
|
||||
float get_output_gamma() { return output_gamma; }
|
||||
#else
|
||||
// If we gamma-correct every pass, always use ntsc_gamma between passes to
|
||||
// ensure middle passes don't need to care if anything is being simulated:
|
||||
float get_intermediate_gamma() { return ntsc_gamma; }
|
||||
#ifdef SIMULATE_CRT_ON_LCD
|
||||
float get_input_gamma() { return get_crt_gamma(); }
|
||||
float get_output_gamma() { return get_lcd_gamma(); }
|
||||
#else
|
||||
#ifdef SIMULATE_GBA_ON_LCD
|
||||
float get_input_gamma() { return get_gba_gamma(); }
|
||||
float get_output_gamma() { return get_lcd_gamma(); }
|
||||
#else
|
||||
#ifdef SIMULATE_LCD_ON_CRT
|
||||
float get_input_gamma() { return get_lcd_gamma(); }
|
||||
float get_output_gamma() { return get_crt_gamma(); }
|
||||
#else
|
||||
#ifdef SIMULATE_GBA_ON_CRT
|
||||
float get_input_gamma() { return get_gba_gamma(); }
|
||||
float get_output_gamma() { return get_crt_gamma(); }
|
||||
#else // Don't simulate anything:
|
||||
float get_input_gamma() { return ntsc_gamma; }
|
||||
float get_output_gamma() { return ntsc_gamma; }
|
||||
#endif // SIMULATE_GBA_ON_CRT
|
||||
#endif // SIMULATE_LCD_ON_CRT
|
||||
#endif // SIMULATE_GBA_ON_LCD
|
||||
#endif // SIMULATE_CRT_ON_LCD
|
||||
#endif // OVERRIDE_FINAL_GAMMA
|
||||
|
||||
#ifndef GAMMA_ENCODE_EVERY_FBO
|
||||
#ifdef FIRST_PASS
|
||||
const bool linearize_input = true;
|
||||
float get_pass_input_gamma() { return get_input_gamma(); }
|
||||
#else
|
||||
const bool linearize_input = false;
|
||||
float get_pass_input_gamma() { return 1.0; }
|
||||
#endif
|
||||
#ifdef LAST_PASS
|
||||
const bool gamma_encode_output = true;
|
||||
float get_pass_output_gamma() { return get_output_gamma(); }
|
||||
#else
|
||||
const bool gamma_encode_output = false;
|
||||
float get_pass_output_gamma() { return 1.0; }
|
||||
#endif
|
||||
#else
|
||||
const bool linearize_input = true;
|
||||
const bool gamma_encode_output = true;
|
||||
#ifdef FIRST_PASS
|
||||
float get_pass_input_gamma() { return get_input_gamma(); }
|
||||
#else
|
||||
float get_pass_input_gamma() { return get_intermediate_gamma(); }
|
||||
#endif
|
||||
#ifdef LAST_PASS
|
||||
float get_pass_output_gamma() { return get_output_gamma(); }
|
||||
#else
|
||||
float get_pass_output_gamma() { return get_intermediate_gamma(); }
|
||||
#endif
|
||||
#endif
|
||||
|
||||
vec4 decode_input(const vec4 color)
|
||||
{
|
||||
if(linearize_input)
|
||||
{
|
||||
if(assume_opaque_alpha)
|
||||
{
|
||||
return vec4(pow(color.rgb, vec3(get_pass_input_gamma())), 1.0);
|
||||
}
|
||||
else
|
||||
{
|
||||
return vec4(pow(color.rgb, vec3(get_pass_input_gamma())), color.a);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
return color;
|
||||
}
|
||||
}
|
||||
|
||||
vec4 encode_output(const vec4 color)
|
||||
{
|
||||
if(gamma_encode_output)
|
||||
{
|
||||
if(assume_opaque_alpha)
|
||||
{
|
||||
return vec4(pow(color.rgb, vec3(1.0/get_pass_output_gamma())), 1.0);
|
||||
}
|
||||
else
|
||||
{
|
||||
return vec4(pow(color.rgb, vec3(1.0/get_pass_output_gamma())), color.a);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
return color;
|
||||
}
|
||||
}
|
||||
|
||||
#define tex2D_linearize(C, D) decode_input(vec4(texture(C, D)))
|
||||
//vec4 tex2D_linearize(const sampler2D tex, const vec2 tex_coords)
|
||||
//{ return decode_input(vec4(texture(tex, tex_coords))); }
|
||||
|
||||
//#define tex2D_linearize(C, D, E) decode_input(vec4(texture(C, D, E)))
|
||||
//vec4 tex2D_linearize(const sampler2D tex, const vec2 tex_coords, const int texel_off)
|
||||
//{ return decode_input(vec4(texture(tex, tex_coords, texel_off))); }
|
10
crt/shaders/crt-royale/src/includes.h
Normal file
10
crt/shaders/crt-royale/src/includes.h
Normal file
|
@ -0,0 +1,10 @@
|
|||
#define INCLUDES
|
||||
|
||||
#include "../user-settings.h"
|
||||
#include "derived-settings-and-constants.h"
|
||||
#include "special-functions.h" //#include "../../../../include/special-functions.h" <-move includes into crt-royale's src directory until it's actually working
|
||||
#include "bind-shader-params.h"
|
||||
#include "gamma-management.h" //#include "../../../../include/gamma-management.h" <-move includes into crt-royale's src directory until it's actually working
|
||||
#include "blur-functions.h" //#include "../../../../include/blur-functions.h" <-move includes into crt-royale's src directory until it's actually working
|
||||
#include "scanline-functions.h"
|
||||
#include "bloom-functions.h"
|
243
crt/shaders/crt-royale/src/quad-pixel-communication.h
Normal file
243
crt/shaders/crt-royale/src/quad-pixel-communication.h
Normal file
|
@ -0,0 +1,243 @@
|
|||
#ifndef QUAD_PIXEL_COMMUNICATION_H
|
||||
#define QUAD_PIXEL_COMMUNICATION_H
|
||||
|
||||
///////////////////////////////// MIT LICENSE ////////////////////////////////
|
||||
|
||||
// Copyright (C) 2014 TroggleMonkey*
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to
|
||||
// deal in the Software without restriction, including without limitation the
|
||||
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
// sell copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
// IN THE SOFTWARE.
|
||||
|
||||
///////////////////////////////// DISCLAIMER /////////////////////////////////
|
||||
|
||||
// *This code was inspired by "Shader Amortization using Pixel Quad Message
|
||||
// Passing" by Eric Penner, published in GPU Pro 2, Chapter VI.2. My intent
|
||||
// is not to plagiarize his fundamentally similar code and assert my own
|
||||
// copyright, but the algorithmic helper functions require so little code that
|
||||
// implementations can't vary by much except bugfixes and conventions. I just
|
||||
// wanted to license my own particular code here to avoid ambiguity and make it
|
||||
// clear that as far as I'm concerned, people can do as they please with it.
|
||||
|
||||
///////////////////////////////// DESCRIPTION ////////////////////////////////
|
||||
|
||||
// Given screen pixel numbers, derive a "quad vector" describing a fragment's
|
||||
// position in its 2x2 pixel quad. Given that vector, obtain the values of any
|
||||
// variable at neighboring fragments.
|
||||
// Requires: Using this file in general requires:
|
||||
// 1.) ddx() and ddy() are present in the current Cg profile.
|
||||
// 2.) The GPU driver is using fine/high-quality derivatives.
|
||||
// Functions will give incorrect results if this is not true,
|
||||
// so a test function is included.
|
||||
|
||||
|
||||
///////////////////// QUAD-PIXEL COMMUNICATION PRIMITIVES ////////////////////
|
||||
|
||||
vec4 get_quad_vector_naive(const vec4 output_pixel_num_wrt_uvxy)
|
||||
{
|
||||
// Requires: Two measures of the current fragment's output pixel number
|
||||
// in the range ([0, IN.output_size.x), [0, IN.output_size.y)):
|
||||
// 1.) output_pixel_num_wrt_uvxy.xy increase with uv coords.
|
||||
// 2.) output_pixel_num_wrt_uvxy.zw increase with screen xy.
|
||||
// Returns: Two measures of the fragment's position in its 2x2 quad:
|
||||
// 1.) The .xy components are its 2x2 placement with respect to
|
||||
// uv direction (the origin (0, 0) is at the top-left):
|
||||
// top-left = (-1.0, -1.0) top-right = ( 1.0, -1.0)
|
||||
// bottom-left = (-1.0, 1.0) bottom-right = ( 1.0, 1.0)
|
||||
// You need this to arrange/weight shared texture samples.
|
||||
// 2.) The .zw components are its 2x2 placement with respect to
|
||||
// screen xy direction (IN.position); the origin varies.
|
||||
// quad_gather needs this measure to work correctly.
|
||||
// Note: quad_vector.zw = quad_vector.xy * vec2(
|
||||
// ddx(output_pixel_num_wrt_uvxy.x),
|
||||
// ddy(output_pixel_num_wrt_uvxy.y));
|
||||
// Caveats: This function assumes the GPU driver always starts 2x2 pixel
|
||||
// quads at even pixel numbers. This assumption can be wrong
|
||||
// for odd output resolutions (nondeterministically so).
|
||||
const vec4 pixel_odd = frac(output_pixel_num_wrt_uvxy * 0.5) * 2.0;
|
||||
const vec4 quad_vector = pixel_odd * 2.0 - vec4(1.0);
|
||||
return quad_vector;
|
||||
}
|
||||
|
||||
vec4 get_quad_vector(const vec4 output_pixel_num_wrt_uvxy)
|
||||
{
|
||||
// Requires: Same as get_quad_vector_naive() (see that first).
|
||||
// Returns: Same as get_quad_vector_naive() (see that first), but it's
|
||||
// correct even if the 2x2 pixel quad starts at an odd pixel,
|
||||
// which can occur at odd resolutions.
|
||||
const vec4 quad_vector_guess =
|
||||
get_quad_vector_naive(output_pixel_num_wrt_uvxy);
|
||||
// If quad_vector_guess.zw doesn't increase with screen xy, we know
|
||||
// the 2x2 pixel quad starts at an odd pixel:
|
||||
const vec2 odd_start_mirror = 0.5 * vec2(ddx(quad_vector_guess.z),
|
||||
ddy(quad_vector_guess.w));
|
||||
return quad_vector_guess * odd_start_mirror.xyxy;
|
||||
}
|
||||
|
||||
vec4 get_quad_vector(const vec2 output_pixel_num_wrt_uv)
|
||||
{
|
||||
// Requires: 1.) ddx() and ddy() are present in the current Cg profile.
|
||||
// 2.) output_pixel_num_wrt_uv must increase with uv coords and
|
||||
// measure the current fragment's output pixel number in:
|
||||
// ([0, IN.output_size.x), [0, IN.output_size.y))
|
||||
// Returns: Same as get_quad_vector_naive() (see that first), but it's
|
||||
// correct even if the 2x2 pixel quad starts at an odd pixel,
|
||||
// which can occur at odd resolutions.
|
||||
// Caveats: This function requires less information than the version
|
||||
// taking a vec4, but it's potentially slower.
|
||||
// Do screen coords increase with or against uv? Get the direction
|
||||
// with respect to (uv.x, uv.y) for (screen.x, screen.y) in {-1, 1}.
|
||||
const vec2 screen_uv_mirror = vec2(ddx(output_pixel_num_wrt_uv.x),
|
||||
ddy(output_pixel_num_wrt_uv.y));
|
||||
const vec2 pixel_odd_wrt_uv = frac(output_pixel_num_wrt_uv * 0.5) * 2.0;
|
||||
const vec2 quad_vector_uv_guess = (pixel_odd_wrt_uv - vec2(0.5)) * 2.0;
|
||||
const vec2 quad_vector_screen_guess = quad_vector_uv_guess * screen_uv_mirror;
|
||||
// If quad_vector_screen_guess doesn't increase with screen xy, we know
|
||||
// the 2x2 pixel quad starts at an odd pixel:
|
||||
const vec2 odd_start_mirror = 0.5 * vec2(ddx(quad_vector_screen_guess.x),
|
||||
ddy(quad_vector_screen_guess.y));
|
||||
const vec4 quad_vector_guess = vec4(
|
||||
quad_vector_uv_guess, quad_vector_screen_guess);
|
||||
return quad_vector_guess * odd_start_mirror.xyxy;
|
||||
}
|
||||
|
||||
void quad_gather(const vec4 quad_vector, const vec4 curr,
|
||||
out vec4 adjx, out vec4 adjy, out vec4 diag)
|
||||
{
|
||||
// Requires: 1.) ddx() and ddy() are present in the current Cg profile.
|
||||
// 2.) The GPU driver is using fine/high-quality derivatives.
|
||||
// 3.) quad_vector describes the current fragment's location in
|
||||
// its 2x2 pixel quad using get_quad_vector()'s conventions.
|
||||
// 4.) curr is any vector you wish to get neighboring values of.
|
||||
// Returns: Values of an input vector (curr) at neighboring fragments
|
||||
// adjacent x, adjacent y, and diagonal (via out parameters).
|
||||
adjx = curr - ddx(curr) * quad_vector.z;
|
||||
adjy = curr - ddy(curr) * quad_vector.w;
|
||||
diag = adjx - ddy(adjx) * quad_vector.w;
|
||||
}
|
||||
|
||||
void quad_gather(const vec4 quad_vector, const vec3 curr,
|
||||
out vec3 adjx, out vec3 adjy, out vec3 diag)
|
||||
{
|
||||
// vec3 version
|
||||
adjx = curr - ddx(curr) * quad_vector.z;
|
||||
adjy = curr - ddy(curr) * quad_vector.w;
|
||||
diag = adjx - ddy(adjx) * quad_vector.w;
|
||||
}
|
||||
|
||||
void quad_gather(const vec4 quad_vector, const vec2 curr,
|
||||
out vec2 adjx, out vec2 adjy, out vec2 diag)
|
||||
{
|
||||
// vec2 version
|
||||
adjx = curr - ddx(curr) * quad_vector.z;
|
||||
adjy = curr - ddy(curr) * quad_vector.w;
|
||||
diag = adjx - ddy(adjx) * quad_vector.w;
|
||||
}
|
||||
|
||||
vec4 quad_gather(const vec4 quad_vector, const float curr)
|
||||
{
|
||||
// Float version:
|
||||
// Returns: return.x == current
|
||||
// return.y == adjacent x
|
||||
// return.z == adjacent y
|
||||
// return.w == diagonal
|
||||
vec4 all = vec4(curr);
|
||||
all.y = all.x - ddx(all.x) * quad_vector.z;
|
||||
all.zw = all.xy - ddy(all.xy) * quad_vector.w;
|
||||
return all;
|
||||
}
|
||||
|
||||
vec4 quad_gather_sum(const vec4 quad_vector, const vec4 curr)
|
||||
{
|
||||
// Requires: Same as quad_gather()
|
||||
// Returns: Sum of an input vector (curr) at all fragments in a quad.
|
||||
vec4 adjx, adjy, diag;
|
||||
quad_gather(quad_vector, curr, adjx, adjy, diag);
|
||||
return (curr + adjx + adjy + diag);
|
||||
}
|
||||
|
||||
vec3 quad_gather_sum(const vec4 quad_vector, const vec3 curr)
|
||||
{
|
||||
// vec3 version:
|
||||
vec3 adjx, adjy, diag;
|
||||
quad_gather(quad_vector, curr, adjx, adjy, diag);
|
||||
return (curr + adjx + adjy + diag);
|
||||
}
|
||||
|
||||
vec2 quad_gather_sum(const vec4 quad_vector, const vec2 curr)
|
||||
{
|
||||
// vec2 version:
|
||||
vec2 adjx, adjy, diag;
|
||||
quad_gather(quad_vector, curr, adjx, adjy, diag);
|
||||
return (curr + adjx + adjy + diag);
|
||||
}
|
||||
|
||||
float quad_gather_sum(const vec4 quad_vector, const float curr)
|
||||
{
|
||||
// Float version:
|
||||
const vec4 all_values = quad_gather(quad_vector, curr);
|
||||
return (all_values.x + all_values.y + all_values.z + all_values.w);
|
||||
}
|
||||
|
||||
bool fine_derivatives_working(const vec4 quad_vector, vec4 curr)
|
||||
{
|
||||
// Requires: 1.) ddx() and ddy() are present in the current Cg profile.
|
||||
// 2.) quad_vector describes the current fragment's location in
|
||||
// its 2x2 pixel quad using get_quad_vector()'s conventions.
|
||||
// 3.) curr must be a test vector with non-constant derivatives
|
||||
// (its value should change nonlinearly across fragments).
|
||||
// Returns: true if fine/hybrid/high-quality derivatives are used, or
|
||||
// false if coarse derivatives are used or inconclusive
|
||||
// Usage: Test whether quad-pixel communication is working!
|
||||
// Method: We can confirm fine derivatives are used if the following
|
||||
// holds (ever, for any value at any fragment):
|
||||
// (ddy(curr) != ddy(adjx)) or (ddx(curr) != ddx(adjy))
|
||||
// The more values we test (e.g. test a vec4 two ways), the
|
||||
// easier it is to demonstrate fine derivatives are working.
|
||||
// TODO: Check for floating point exact comparison issues!
|
||||
vec4 ddx_curr = ddx(curr);
|
||||
vec4 ddy_curr = ddy(curr);
|
||||
vec4 adjx = curr - ddx_curr * quad_vector.z;
|
||||
vec4 adjy = curr - ddy_curr * quad_vector.w;
|
||||
bool ddy_different = any(ddy_curr != ddy(adjx));
|
||||
bool ddx_different = any(ddx_curr != ddx(adjy));
|
||||
return any(bool2(ddy_different, ddx_different));
|
||||
}
|
||||
|
||||
bool fine_derivatives_working_fast(const vec4 quad_vector, float curr)
|
||||
{
|
||||
// Requires: Same as fine_derivatives_working()
|
||||
// Returns: Same as fine_derivatives_working()
|
||||
// Usage: This is faster than fine_derivatives_working() but more
|
||||
// likely to return false negatives, so it's less useful for
|
||||
// offline testing/debugging. It's also useless as the basis
|
||||
// for dynamic runtime branching as of May 2014: Derivatives
|
||||
// (and quad-pixel communication) are currently disallowed in
|
||||
// branches. However, future GPU's may allow you to use them
|
||||
// in dynamic branches if you promise the branch condition
|
||||
// evaluates the same for every fragment in the quad (and/or if
|
||||
// the driver enforces that promise by making a single fragment
|
||||
// control branch decisions). If that ever happens, this
|
||||
// version may become a more economical choice.
|
||||
float ddx_curr = ddx(curr);
|
||||
float ddy_curr = ddy(curr);
|
||||
float adjx = curr - ddx_curr * quad_vector.z;
|
||||
return (ddy_curr != ddy(adjx));
|
||||
}
|
||||
|
||||
#endif // QUAD_PIXEL_COMMUNICATION_H
|
||||
|
572
crt/shaders/crt-royale/src/scanline-functions-old.h
Normal file
572
crt/shaders/crt-royale/src/scanline-functions-old.h
Normal file
|
@ -0,0 +1,572 @@
|
|||
#ifndef SCANLINE_FUNCTIONS_H
|
||||
#define SCANLINE_FUNCTIONS_H
|
||||
|
||||
///////////////////////////// GPL LICENSE NOTICE /////////////////////////////
|
||||
|
||||
// crt-royale: A full-featured CRT shader, with cheese.
|
||||
// Copyright (C) 2014 TroggleMonkey <trogglemonkey@gmx.com>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation; either version 2 of the License, or any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along with
|
||||
// this program; if not, write to the Free Software Foundation, Inc., 59 Temple
|
||||
// Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
|
||||
|
||||
////////////////////////////////// INCLUDES //////////////////////////////////
|
||||
|
||||
#include "../user-settings.h"
|
||||
#include "derived-settings-and-constants.h"
|
||||
#include "../../../../include/special-functions.h"
|
||||
#include "../../../../include/gamma-management.h"
|
||||
|
||||
|
||||
///////////////////////////// SCANLINE FUNCTIONS /////////////////////////////
|
||||
/*
|
||||
inline float3 get_gaussian_sigma(const float3 color, const float sigma_range)
|
||||
{
|
||||
// Requires: Globals:
|
||||
// 1.) beam_min_sigma and beam_max_sigma are global floats
|
||||
// containing the desired minimum and maximum beam standard
|
||||
// deviations, for dim and bright colors respectively.
|
||||
// 2.) beam_max_sigma must be > 0.0
|
||||
// 3.) beam_min_sigma must be in (0.0, beam_max_sigma]
|
||||
// 4.) beam_spot_power must be defined as a global float.
|
||||
// Parameters:
|
||||
// 1.) color is the underlying source color along a scanline
|
||||
// 2.) sigma_range = beam_max_sigma - beam_min_sigma; we take
|
||||
// sigma_range as a parameter to avoid repeated computation
|
||||
// when beam_{min, max}_sigma are runtime shader parameters
|
||||
// Optional: Users may set beam_spot_shape_function to 1 to define the
|
||||
// inner f(color) subfunction (see below) as:
|
||||
// f(color) = sqrt(1.0 - (color - 1.0)*(color - 1.0))
|
||||
// Otherwise (technically, if beam_spot_shape_function < 0.5):
|
||||
// f(color) = pow(color, beam_spot_power)
|
||||
// Returns: The standard deviation of the Gaussian beam for "color:"
|
||||
// sigma = beam_min_sigma + sigma_range * f(color)
|
||||
// Details/Discussion:
|
||||
// The beam's spot shape vaguely resembles an aspect-corrected f() in the
|
||||
// range [0, 1] (not quite, but it's related). f(color) = color makes
|
||||
// spots look like diamonds, and a spherical function or cube balances
|
||||
// between variable width and a soft/realistic shape. A beam_spot_power
|
||||
// > 1.0 can produce an ugly spot shape and more initial clipping, but the
|
||||
// final shape also differs based on the horizontal resampling filter and
|
||||
// the phosphor bloom. For instance, resampling horizontally in nonlinear
|
||||
// light and/or with a sharp (e.g. Lanczos) filter will sharpen the spot
|
||||
// shape, but a sixth root is still quite soft. A power function (default
|
||||
// 1.0/3.0 beam_spot_power) is most flexible, but a fixed spherical curve
|
||||
// has the highest variability without an awful spot shape.
|
||||
//
|
||||
// beam_min_sigma affects scanline sharpness/aliasing in dim areas, and its
|
||||
// difference from beam_max_sigma affects beam width variability. It only
|
||||
// affects clipping [for pure Gaussians] if beam_spot_power > 1.0 (which is
|
||||
// a conservative estimate for a more complex constraint).
|
||||
//
|
||||
// beam_max_sigma affects clipping and increasing scanline width/softness
|
||||
// as color increases. The wider this is, the more scanlines need to be
|
||||
// evaluated to avoid distortion. For a pure Gaussian, the max_beam_sigma
|
||||
// at which the first unused scanline always has a weight < 1.0/255.0 is:
|
||||
// num scanlines = 2, max_beam_sigma = 0.2089; distortions begin ~0.34
|
||||
// num scanlines = 3, max_beam_sigma = 0.3879; distortions begin ~0.52
|
||||
// num scanlines = 4, max_beam_sigma = 0.5723; distortions begin ~0.70
|
||||
// num scanlines = 5, max_beam_sigma = 0.7591; distortions begin ~0.89
|
||||
// num scanlines = 6, max_beam_sigma = 0.9483; distortions begin ~1.08
|
||||
// Generalized Gaussians permit more leeway here as steepness increases.
|
||||
if(beam_spot_shape_function < 0.5)
|
||||
{
|
||||
// Use a power function:
|
||||
return float3(beam_min_sigma) + sigma_range *
|
||||
pow(color, beam_spot_power);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Use a spherical function:
|
||||
const float3 color_minus_1 = color - float3(1.0);
|
||||
return float3(beam_min_sigma) + sigma_range *
|
||||
sqrt(float3(1.0) - color_minus_1*color_minus_1);
|
||||
}
|
||||
}
|
||||
|
||||
inline float3 get_generalized_gaussian_beta(const float3 color,
|
||||
const float shape_range)
|
||||
{
|
||||
// Requires: Globals:
|
||||
// 1.) beam_min_shape and beam_max_shape are global floats
|
||||
// containing the desired min/max generalized Gaussian
|
||||
// beta parameters, for dim and bright colors respectively.
|
||||
// 2.) beam_max_shape must be >= 2.0
|
||||
// 3.) beam_min_shape must be in [2.0, beam_max_shape]
|
||||
// 4.) beam_shape_power must be defined as a global float.
|
||||
// Parameters:
|
||||
// 1.) color is the underlying source color along a scanline
|
||||
// 2.) shape_range = beam_max_shape - beam_min_shape; we take
|
||||
// shape_range as a parameter to avoid repeated computation
|
||||
// when beam_{min, max}_shape are runtime shader parameters
|
||||
// Returns: The type-I generalized Gaussian "shape" parameter beta for
|
||||
// the given color.
|
||||
// Details/Discussion:
|
||||
// Beta affects the scanline distribution as follows:
|
||||
// a.) beta < 2.0 narrows the peak to a spike with a discontinuous slope
|
||||
// b.) beta == 2.0 just degenerates to a Gaussian
|
||||
// c.) beta > 2.0 flattens and widens the peak, then drops off more steeply
|
||||
// than a Gaussian. Whereas high sigmas widen and soften peaks, high
|
||||
// beta widen and sharpen peaks at the risk of aliasing.
|
||||
// Unlike high beam_spot_powers, high beam_shape_powers actually soften shape
|
||||
// transitions, whereas lower ones sharpen them (at the risk of aliasing).
|
||||
return beam_min_shape + shape_range * pow(color, beam_shape_power);
|
||||
}
|
||||
|
||||
float3 scanline_gaussian_integral_contrib(const float3 dist,
|
||||
const float3 color, const float pixel_height, const float sigma_range)
|
||||
{
|
||||
// Requires: 1.) dist is the distance of the [potentially separate R/G/B]
|
||||
// point(s) from a scanline in units of scanlines, where
|
||||
// 1.0 means the sample point straddles the next scanline.
|
||||
// 2.) color is the underlying source color along a scanline.
|
||||
// 3.) pixel_height is the output pixel height in scanlines.
|
||||
// 4.) Requirements of get_gaussian_sigma() must be met.
|
||||
// Returns: Return a scanline's light output over a given pixel.
|
||||
// Details:
|
||||
// The CRT beam profile follows a roughly Gaussian distribution which is
|
||||
// wider for bright colors than dark ones. The integral over the full
|
||||
// range of a Gaussian function is always 1.0, so we can vary the beam
|
||||
// with a standard deviation without affecting brightness. 'x' = distance:
|
||||
// gaussian sample = 1/(sigma*sqrt(2*pi)) * e**(-(x**2)/(2*sigma**2))
|
||||
// gaussian integral = 0.5 (1.0 + erf(x/(sigma * sqrt(2))))
|
||||
// Use a numerical approximation of the "error function" (the Gaussian
|
||||
// indefinite integral) to find the definite integral of the scanline's
|
||||
// average brightness over a given pixel area. Even if curved coords were
|
||||
// used in this pass, a flat scalar pixel height works almost as well as a
|
||||
// pixel height computed from a full pixel-space to scanline-space matrix.
|
||||
const float3 sigma = get_gaussian_sigma(color, sigma_range);
|
||||
const float3 ph_offset = float3(pixel_height * 0.5);
|
||||
const float3 denom_inv = 1.0/(sigma*sqrt(2.0));
|
||||
const float3 integral_high = erf((dist + ph_offset)*denom_inv);
|
||||
const float3 integral_low = erf((dist - ph_offset)*denom_inv);
|
||||
return color * 0.5*(integral_high - integral_low)/pixel_height;
|
||||
}
|
||||
|
||||
float3 scanline_generalized_gaussian_integral_contrib(const float3 dist,
|
||||
const float3 color, const float pixel_height, const float sigma_range,
|
||||
const float shape_range)
|
||||
{
|
||||
// Requires: 1.) Requirements of scanline_gaussian_integral_contrib()
|
||||
// must be met.
|
||||
// 2.) Requirements of get_gaussian_sigma() must be met.
|
||||
// 3.) Requirements of get_generalized_gaussian_beta() must be
|
||||
// met.
|
||||
// Returns: Return a scanline's light output over a given pixel.
|
||||
// A generalized Gaussian distribution allows the shape (beta) to vary
|
||||
// as well as the width (alpha). "gamma" refers to the gamma function:
|
||||
// generalized sample =
|
||||
// beta/(2*alpha*gamma(1/beta)) * e**(-(|x|/alpha)**beta)
|
||||
// ligamma(s, z) is the lower incomplete gamma function, for which we only
|
||||
// implement two of four branches (because we keep 1/beta <= 0.5):
|
||||
// generalized integral = 0.5 + 0.5* sign(x) *
|
||||
// ligamma(1/beta, (|x|/alpha)**beta)/gamma(1/beta)
|
||||
// See get_generalized_gaussian_beta() for a discussion of beta.
|
||||
// We base alpha on the intended Gaussian sigma, but it only strictly
|
||||
// models models standard deviation at beta == 2, because the standard
|
||||
// deviation depends on both alpha and beta (keeping alpha independent is
|
||||
// faster and preserves intuitive behavior and a full spectrum of results).
|
||||
const float3 alpha = sqrt(2.0) * get_gaussian_sigma(color, sigma_range);
|
||||
const float3 beta = get_generalized_gaussian_beta(color, shape_range);
|
||||
const float3 alpha_inv = float3(1.0)/alpha;
|
||||
const float3 s = float3(1.0)/beta;
|
||||
const float3 ph_offset = float3(pixel_height * 0.5);
|
||||
// Pass beta to gamma_impl to avoid repeated divides. Similarly pass
|
||||
// beta (i.e. 1/s) and 1/gamma(s) to normalized_ligamma_impl.
|
||||
const float3 gamma_s_inv = float3(1.0)/gamma_impl(s, beta);
|
||||
const float3 dist1 = dist + ph_offset;
|
||||
const float3 dist0 = dist - ph_offset;
|
||||
const float3 integral_high = sign(dist1) * normalized_ligamma_impl(
|
||||
s, pow(abs(dist1)*alpha_inv, beta), beta, gamma_s_inv);
|
||||
const float3 integral_low = sign(dist0) * normalized_ligamma_impl(
|
||||
s, pow(abs(dist0)*alpha_inv, beta), beta, gamma_s_inv);
|
||||
return color * 0.5*(integral_high - integral_low)/pixel_height;
|
||||
}
|
||||
|
||||
float3 scanline_gaussian_sampled_contrib(const float3 dist, const float3 color,
|
||||
const float pixel_height, const float sigma_range)
|
||||
{
|
||||
// See scanline_gaussian integral_contrib() for detailed comments!
|
||||
// gaussian sample = 1/(sigma*sqrt(2*pi)) * e**(-(x**2)/(2*sigma**2))
|
||||
const float3 sigma = get_gaussian_sigma(color, sigma_range);
|
||||
// Avoid repeated divides:
|
||||
const float3 sigma_inv = float3(1.0)/sigma;
|
||||
const float3 inner_denom_inv = 0.5 * sigma_inv * sigma_inv;
|
||||
const float3 outer_denom_inv = sigma_inv/sqrt(2.0*pi);
|
||||
if(beam_antialias_level > 0.5)
|
||||
{
|
||||
// Sample 1/3 pixel away in each direction as well:
|
||||
const float3 sample_offset = float3(pixel_height/3.0);
|
||||
const float3 dist2 = dist + sample_offset;
|
||||
const float3 dist3 = abs(dist - sample_offset);
|
||||
// Average three pure Gaussian samples:
|
||||
const float3 scale = color/3.0 * outer_denom_inv;
|
||||
const float3 weight1 = exp(-(dist*dist)*inner_denom_inv);
|
||||
const float3 weight2 = exp(-(dist2*dist2)*inner_denom_inv);
|
||||
const float3 weight3 = exp(-(dist3*dist3)*inner_denom_inv);
|
||||
return scale * (weight1 + weight2 + weight3);
|
||||
}
|
||||
else
|
||||
{
|
||||
return color*exp(-(dist*dist)*inner_denom_inv)*outer_denom_inv;
|
||||
}
|
||||
}
|
||||
|
||||
float3 scanline_generalized_gaussian_sampled_contrib(const float3 dist,
|
||||
const float3 color, const float pixel_height, const float sigma_range,
|
||||
const float shape_range)
|
||||
{
|
||||
// See scanline_generalized_gaussian_integral_contrib() for details!
|
||||
// generalized sample =
|
||||
// beta/(2*alpha*gamma(1/beta)) * e**(-(|x|/alpha)**beta)
|
||||
const float3 alpha = sqrt(2.0) * get_gaussian_sigma(color, sigma_range);
|
||||
const float3 beta = get_generalized_gaussian_beta(color, shape_range);
|
||||
// Avoid repeated divides:
|
||||
const float3 alpha_inv = float3(1.0)/alpha;
|
||||
const float3 beta_inv = float3(1.0)/beta;
|
||||
const float3 scale = color * beta * 0.5 * alpha_inv /
|
||||
gamma_impl(beta_inv, beta);
|
||||
if(beam_antialias_level > 0.5)
|
||||
{
|
||||
// Sample 1/3 pixel closer to and farther from the scanline too.
|
||||
const float3 sample_offset = float3(pixel_height/3.0);
|
||||
const float3 dist2 = dist + sample_offset;
|
||||
const float3 dist3 = abs(dist - sample_offset);
|
||||
// Average three generalized Gaussian samples:
|
||||
const float3 weight1 = exp(-pow(abs(dist*alpha_inv), beta));
|
||||
const float3 weight2 = exp(-pow(abs(dist2*alpha_inv), beta));
|
||||
const float3 weight3 = exp(-pow(abs(dist3*alpha_inv), beta));
|
||||
return scale/3.0 * (weight1 + weight2 + weight3);
|
||||
}
|
||||
else
|
||||
{
|
||||
return scale * exp(-pow(abs(dist*alpha_inv), beta));
|
||||
}
|
||||
}
|
||||
|
||||
inline float3 scanline_contrib(float3 dist, float3 color,
|
||||
float pixel_height, const float sigma_range, const float shape_range)
|
||||
{
|
||||
// Requires: 1.) Requirements of scanline_gaussian_integral_contrib()
|
||||
// must be met.
|
||||
// 2.) Requirements of get_gaussian_sigma() must be met.
|
||||
// 3.) Requirements of get_generalized_gaussian_beta() must be
|
||||
// met.
|
||||
// Returns: Return a scanline's light output over a given pixel, using
|
||||
// a generalized or pure Gaussian distribution and sampling or
|
||||
// integrals as desired by user codepath choices.
|
||||
if(beam_generalized_gaussian)
|
||||
{
|
||||
if(beam_antialias_level > 1.5)
|
||||
{
|
||||
return scanline_generalized_gaussian_integral_contrib(
|
||||
dist, color, pixel_height, sigma_range, shape_range);
|
||||
}
|
||||
else
|
||||
{
|
||||
return scanline_generalized_gaussian_sampled_contrib(
|
||||
dist, color, pixel_height, sigma_range, shape_range);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if(beam_antialias_level > 1.5)
|
||||
{
|
||||
return scanline_gaussian_integral_contrib(
|
||||
dist, color, pixel_height, sigma_range);
|
||||
}
|
||||
else
|
||||
{
|
||||
return scanline_gaussian_sampled_contrib(
|
||||
dist, color, pixel_height, sigma_range);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline float3 get_raw_interpolated_color(const float3 color0,
|
||||
const float3 color1, const float3 color2, const float3 color3,
|
||||
const float4 weights)
|
||||
{
|
||||
// Use max to avoid bizarre artifacts from negative colors:
|
||||
return max(mul(weights, float4x3(color0, color1, color2, color3)), 0.0);
|
||||
}
|
||||
|
||||
float3 get_interpolated_linear_color(const float3 color0, const float3 color1,
|
||||
const float3 color2, const float3 color3, const float4 weights)
|
||||
{
|
||||
// Requires: 1.) Requirements of include/gamma-management.h must be met:
|
||||
// intermediate_gamma must be globally defined, and input
|
||||
// colors are interpreted as linear RGB unless you #define
|
||||
// GAMMA_ENCODE_EVERY_FBO (in which case they are
|
||||
// interpreted as gamma-encoded with intermediate_gamma).
|
||||
// 2.) color0-3 are colors sampled from a texture with tex2D().
|
||||
// They are interpreted as defined in requirement 1.
|
||||
// 3.) weights contains weights for each color, summing to 1.0.
|
||||
// 4.) beam_horiz_linear_rgb_weight must be defined as a global
|
||||
// float in [0.0, 1.0] describing how much blending should
|
||||
// be done in linear RGB (rest is gamma-corrected RGB).
|
||||
// 5.) RUNTIME_SCANLINES_HORIZ_FILTER_COLORSPACE must be #defined
|
||||
// if beam_horiz_linear_rgb_weight is anything other than a
|
||||
// static constant, or we may try branching at runtime
|
||||
// without dynamic branches allowed (slow).
|
||||
// Returns: Return an interpolated color lookup between the four input
|
||||
// colors based on the weights in weights. The final color will
|
||||
// be a linear RGB value, but the blending will be done as
|
||||
// indicated above.
|
||||
const float intermediate_gamma = get_intermediate_gamma();
|
||||
// Branch if beam_horiz_linear_rgb_weight is static (for free) or if the
|
||||
// profile allows dynamic branches (faster than computing extra pows):
|
||||
#ifndef RUNTIME_SCANLINES_HORIZ_FILTER_COLORSPACE
|
||||
#define SCANLINES_BRANCH_FOR_LINEAR_RGB_WEIGHT
|
||||
#else
|
||||
#ifdef DRIVERS_ALLOW_DYNAMIC_BRANCHES
|
||||
#define SCANLINES_BRANCH_FOR_LINEAR_RGB_WEIGHT
|
||||
#endif
|
||||
#endif
|
||||
#ifdef SCANLINES_BRANCH_FOR_LINEAR_RGB_WEIGHT
|
||||
// beam_horiz_linear_rgb_weight is static, so we can branch:
|
||||
#ifdef GAMMA_ENCODE_EVERY_FBO
|
||||
const float3 gamma_mixed_color = pow(get_raw_interpolated_color(
|
||||
color0, color1, color2, color3, weights), intermediate_gamma);
|
||||
if(beam_horiz_linear_rgb_weight > 0.0)
|
||||
{
|
||||
const float3 linear_mixed_color = get_raw_interpolated_color(
|
||||
pow(color0, intermediate_gamma),
|
||||
pow(color1, intermediate_gamma),
|
||||
pow(color2, intermediate_gamma),
|
||||
pow(color3, intermediate_gamma),
|
||||
weights);
|
||||
return lerp(gamma_mixed_color, linear_mixed_color,
|
||||
beam_horiz_linear_rgb_weight);
|
||||
}
|
||||
else
|
||||
{
|
||||
return gamma_mixed_color;
|
||||
}
|
||||
#else
|
||||
const float3 linear_mixed_color = get_raw_interpolated_color(
|
||||
color0, color1, color2, color3, weights);
|
||||
if(beam_horiz_linear_rgb_weight < 1.0)
|
||||
{
|
||||
const float3 gamma_mixed_color = get_raw_interpolated_color(
|
||||
pow(color0, 1.0/intermediate_gamma),
|
||||
pow(color1, 1.0/intermediate_gamma),
|
||||
pow(color2, 1.0/intermediate_gamma),
|
||||
pow(color3, 1.0/intermediate_gamma),
|
||||
weights);
|
||||
return lerp(gamma_mixed_color, linear_mixed_color,
|
||||
beam_horiz_linear_rgb_weight);
|
||||
}
|
||||
else
|
||||
{
|
||||
return linear_mixed_color;
|
||||
}
|
||||
#endif // GAMMA_ENCODE_EVERY_FBO
|
||||
#else
|
||||
#ifdef GAMMA_ENCODE_EVERY_FBO
|
||||
// Inputs: color0-3 are colors in gamma-encoded RGB.
|
||||
const float3 gamma_mixed_color = pow(get_raw_interpolated_color(
|
||||
color0, color1, color2, color3, weights), intermediate_gamma);
|
||||
const float3 linear_mixed_color = get_raw_interpolated_color(
|
||||
pow(color0, intermediate_gamma),
|
||||
pow(color1, intermediate_gamma),
|
||||
pow(color2, intermediate_gamma),
|
||||
pow(color3, intermediate_gamma),
|
||||
weights);
|
||||
return lerp(gamma_mixed_color, linear_mixed_color,
|
||||
beam_horiz_linear_rgb_weight);
|
||||
#else
|
||||
// Inputs: color0-3 are colors in linear RGB.
|
||||
const float3 linear_mixed_color = get_raw_interpolated_color(
|
||||
color0, color1, color2, color3, weights);
|
||||
const float3 gamma_mixed_color = get_raw_interpolated_color(
|
||||
pow(color0, 1.0/intermediate_gamma),
|
||||
pow(color1, 1.0/intermediate_gamma),
|
||||
pow(color2, 1.0/intermediate_gamma),
|
||||
pow(color3, 1.0/intermediate_gamma),
|
||||
weights);
|
||||
return lerp(gamma_mixed_color, linear_mixed_color,
|
||||
beam_horiz_linear_rgb_weight);
|
||||
#endif // GAMMA_ENCODE_EVERY_FBO
|
||||
#endif // SCANLINES_BRANCH_FOR_LINEAR_RGB_WEIGHT
|
||||
}
|
||||
|
||||
float3 get_scanline_color(const sampler2D texture, const float2 scanline_uv,
|
||||
const float2 uv_step_x, const float4 weights)
|
||||
{
|
||||
// Requires: 1.) scanline_uv must be vertically snapped to the caller's
|
||||
// desired line or scanline and horizontally snapped to the
|
||||
// texel just left of the output pixel (color1)
|
||||
// 2.) uv_step_x must contain the horizontal uv distance
|
||||
// between texels.
|
||||
// 3.) weights must contain interpolation filter weights for
|
||||
// color0, color1, color2, and color3, where color1 is just
|
||||
// left of the output pixel.
|
||||
// Returns: Return a horizontally interpolated texture lookup using 2-4
|
||||
// nearby texels, according to weights and the conventions of
|
||||
// get_interpolated_linear_color().
|
||||
// We can ignore the outside texture lookups for Quilez resampling.
|
||||
const float3 color1 = tex2D(texture, scanline_uv).rgb;
|
||||
const float3 color2 = tex2D(texture, scanline_uv + uv_step_x).rgb;
|
||||
float3 color0 = float3(0.0);
|
||||
float3 color3 = float3(0.0);
|
||||
if(beam_horiz_filter > 0.5)
|
||||
{
|
||||
color0 = tex2D(texture, scanline_uv - uv_step_x).rgb;
|
||||
color3 = tex2D(texture, scanline_uv + 2.0 * uv_step_x).rgb;
|
||||
}
|
||||
// Sample the texture as-is, whether it's linear or gamma-encoded:
|
||||
// get_interpolated_linear_color() will handle the difference.
|
||||
return get_interpolated_linear_color(color0, color1, color2, color3, weights);
|
||||
}
|
||||
|
||||
float3 sample_single_scanline_horizontal(const sampler2D texture,
|
||||
const float2 tex_uv, const float2 texture_size,
|
||||
const float2 texture_size_inv)
|
||||
{
|
||||
// TODO: Add function requirements.
|
||||
// Snap to the previous texel and get sample dists from 2/4 nearby texels:
|
||||
const float2 curr_texel = tex_uv * texture_size;
|
||||
// Use under_half to fix a rounding bug right around exact texel locations.
|
||||
const float2 prev_texel =
|
||||
floor(curr_texel - float2(under_half)) + float2(0.5);
|
||||
const float2 prev_texel_hor = float2(prev_texel.x, curr_texel.y);
|
||||
const float2 prev_texel_hor_uv = prev_texel_hor * texture_size_inv;
|
||||
const float prev_dist = curr_texel.x - prev_texel_hor.x;
|
||||
const float4 sample_dists = float4(1.0 + prev_dist, prev_dist,
|
||||
1.0 - prev_dist, 2.0 - prev_dist);
|
||||
// Get Quilez, Lanczos2, or Gaussian resize weights for 2/4 nearby texels:
|
||||
float4 weights;
|
||||
if(beam_horiz_filter < 0.5)
|
||||
{
|
||||
// Quilez:
|
||||
const float x = sample_dists.y;
|
||||
const float w2 = x*x*x*(x*(x*6.0 - 15.0) + 10.0);
|
||||
weights = float4(0.0, 1.0 - w2, w2, 0.0);
|
||||
}
|
||||
else if(beam_horiz_filter < 1.5)
|
||||
{
|
||||
// Gaussian:
|
||||
float inner_denom_inv = 1.0/(2.0*beam_horiz_sigma*beam_horiz_sigma);
|
||||
weights = exp(-(sample_dists*sample_dists)*inner_denom_inv);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Lanczos2:
|
||||
const float4 pi_dists = FIX_ZERO(sample_dists * pi);
|
||||
weights = 2.0 * sin(pi_dists) * sin(pi_dists * 0.5) /
|
||||
(pi_dists * pi_dists);
|
||||
}
|
||||
// Ensure the weight sum == 1.0:
|
||||
const float4 final_weights = weights/dot(weights, float4(1.0));
|
||||
// Get the interpolated horizontal scanline color:
|
||||
const float2 uv_step_x = float2(texture_size_inv.x, 0.0);
|
||||
return get_scanline_color(
|
||||
texture, prev_texel_hor_uv, uv_step_x, final_weights);
|
||||
}
|
||||
|
||||
float3 sample_rgb_scanline_horizontal(const sampler2D texture,
|
||||
const float2 tex_uv, const float2 texture_size,
|
||||
const float2 texture_size_inv)
|
||||
{
|
||||
// TODO: Add function requirements.
|
||||
// Rely on a helper to make convergence easier.
|
||||
if(beam_misconvergence)
|
||||
{
|
||||
const float3 convergence_offsets_rgb =
|
||||
get_convergence_offsets_x_vector();
|
||||
const float3 offset_u_rgb =
|
||||
convergence_offsets_rgb * texture_size_inv.xxx;
|
||||
const float2 scanline_uv_r = tex_uv - float2(offset_u_rgb.r, 0.0);
|
||||
const float2 scanline_uv_g = tex_uv - float2(offset_u_rgb.g, 0.0);
|
||||
const float2 scanline_uv_b = tex_uv - float2(offset_u_rgb.b, 0.0);
|
||||
const float3 sample_r = sample_single_scanline_horizontal(
|
||||
texture, scanline_uv_r, texture_size, texture_size_inv);
|
||||
const float3 sample_g = sample_single_scanline_horizontal(
|
||||
texture, scanline_uv_g, texture_size, texture_size_inv);
|
||||
const float3 sample_b = sample_single_scanline_horizontal(
|
||||
texture, scanline_uv_b, texture_size, texture_size_inv);
|
||||
return float3(sample_r.r, sample_g.g, sample_b.b);
|
||||
}
|
||||
else
|
||||
{
|
||||
return sample_single_scanline_horizontal(texture, tex_uv, texture_size,
|
||||
texture_size_inv);
|
||||
}
|
||||
}
|
||||
|
||||
float2 get_last_scanline_uv(const float2 tex_uv, const float2 texture_size,
|
||||
const float2 texture_size_inv, const float2 il_step_multiple,
|
||||
const float frame_count, out float dist)
|
||||
{
|
||||
// Compute texture coords for the last/upper scanline, accounting for
|
||||
// interlacing: With interlacing, only consider even/odd scanlines every
|
||||
// other frame. Top-field first (TFF) order puts even scanlines on even
|
||||
// frames, and BFF order puts them on odd frames. Texels are centered at:
|
||||
// frac(tex_uv * texture_size) == x.5
|
||||
// Caution: If these coordinates ever seem incorrect, first make sure it's
|
||||
// not because anisotropic filtering is blurring across field boundaries.
|
||||
// Note: TFF/BFF won't matter for sources that double-weave or similar.
|
||||
const float field_offset = floor(il_step_multiple.y * 0.75) *
|
||||
fmod(frame_count + float(interlace_bff), 2.0);
|
||||
const float2 curr_texel = tex_uv * texture_size;
|
||||
// Use under_half to fix a rounding bug right around exact texel locations.
|
||||
const float2 prev_texel_num = floor(curr_texel - float2(under_half));
|
||||
const float wrong_field = fmod(
|
||||
prev_texel_num.y + field_offset, il_step_multiple.y);
|
||||
const float2 scanline_texel_num = prev_texel_num - float2(0.0, wrong_field);
|
||||
// Snap to the center of the previous scanline in the current field:
|
||||
const float2 scanline_texel = scanline_texel_num + float2(0.5);
|
||||
const float2 scanline_uv = scanline_texel * texture_size_inv;
|
||||
// Save the sample's distance from the scanline, in units of scanlines:
|
||||
dist = (curr_texel.y - scanline_texel.y)/il_step_multiple.y;
|
||||
return scanline_uv;
|
||||
}
|
||||
*/
|
||||
bool is_interlaced(float num_lines)
|
||||
{
|
||||
// Detect interlacing based on the number of lines in the source.
|
||||
if(interlace_detect)
|
||||
{
|
||||
// NTSC: 525 lines, 262.5/field; 486 active (2 half-lines), 243/field
|
||||
// NTSC Emulators: Typically 224 or 240 lines
|
||||
// PAL: 625 lines, 312.5/field; 576 active (typical), 288/field
|
||||
// PAL Emulators: ?
|
||||
// ATSC: 720p, 1080i, 1080p
|
||||
// Where do we place our cutoffs? Assumptions:
|
||||
// 1.) We only need to care about active lines.
|
||||
// 2.) Anything > 288 and <= 576 lines is probably interlaced.
|
||||
// 3.) Anything > 576 lines is probably not interlaced...
|
||||
// 4.) ...except 1080 lines, which is a crapshoot (user decision).
|
||||
// 5.) Just in case the main program uses calculated video sizes,
|
||||
// we should nudge the float thresholds a bit.
|
||||
bool sd_interlace;
|
||||
if (num_lines > 288.5 && num_lines < 576.5)
|
||||
{sd_interlace = true;}
|
||||
else
|
||||
{sd_interlace = false;}
|
||||
bool hd_interlace;
|
||||
if (num_lines > 1079.5 && num_lines < 1080.5)
|
||||
{hd_interlace = false;}
|
||||
else
|
||||
{hd_interlace = sd_interlace || hd_interlace;}
|
||||
}
|
||||
else
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#endif // SCANLINE_FUNCTIONS_H
|
||||
|
|
@ -1,6 +1,3 @@
|
|||
#ifndef SCANLINE_FUNCTIONS_H
|
||||
#define SCANLINE_FUNCTIONS_H
|
||||
|
||||
///////////////////////////// GPL LICENSE NOTICE /////////////////////////////
|
||||
|
||||
// crt-royale: A full-featured CRT shader, with cheese.
|
||||
|
@ -22,15 +19,77 @@
|
|||
|
||||
////////////////////////////////// INCLUDES //////////////////////////////////
|
||||
|
||||
#include "../user-settings.h"
|
||||
#include "derived-settings-and-constants.h"
|
||||
#include "../../../../include/special-functions.h"
|
||||
#include "../../../../include/gamma-management.h"
|
||||
|
||||
//#include "../user-settings.h"
|
||||
//#include "derived-settings-and-constants.h"
|
||||
//#include "../../../../include/special-functions.h"
|
||||
//#include "../../../../include/gamma-management.h"
|
||||
|
||||
///////////////////////////// SCANLINE FUNCTIONS /////////////////////////////
|
||||
/*
|
||||
inline float3 get_gaussian_sigma(const float3 color, const float sigma_range)
|
||||
|
||||
bool is_interlaced(float num_lines)
|
||||
{
|
||||
// Detect interlacing based on the number of lines in the source.
|
||||
if(interlace_detect)
|
||||
{
|
||||
// NTSC: 525 lines, 262.5/field; 486 active (2 half-lines), 243/field
|
||||
// NTSC Emulators: Typically 224 or 240 lines
|
||||
// PAL: 625 lines, 312.5/field; 576 active (typical), 288/field
|
||||
// PAL Emulators: ?
|
||||
// ATSC: 720p, 1080i, 1080p
|
||||
// Where do we place our cutoffs? Assumptions:
|
||||
// 1.) We only need to care about active lines.
|
||||
// 2.) Anything > 288 and <= 576 lines is probably interlaced.
|
||||
// 3.) Anything > 576 lines is probably not interlaced...
|
||||
// 4.) ...except 1080 lines, which is a crapshoot (user decision).
|
||||
// 5.) Just in case the main program uses calculated video sizes,
|
||||
// we should nudge the float thresholds a bit.
|
||||
bool sd_interlace;
|
||||
if (num_lines > 288.5 && num_lines < 576.5)
|
||||
{sd_interlace = true;}
|
||||
else
|
||||
{sd_interlace = false;}
|
||||
bool hd_interlace;
|
||||
if (num_lines > 1079.5 && num_lines < 1080.5)
|
||||
{hd_interlace = true;}
|
||||
else
|
||||
{hd_interlace = false;}
|
||||
return (sd_interlace || hd_interlace);
|
||||
}
|
||||
else
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
vec2 get_last_scanline_uv(const vec2 tex_uv, const vec2 texture_size,
|
||||
const vec2 texture_size_inv, const vec2 il_step_multiple,
|
||||
const float frame_count, out float dist)
|
||||
{
|
||||
// Compute texture coords for the last/upper scanline, accounting for
|
||||
// interlacing: With interlacing, only consider even/odd scanlines every
|
||||
// other frame. Top-field first (TFF) order puts even scanlines on even
|
||||
// frames, and BFF order puts them on odd frames. Texels are centered at:
|
||||
// frac(tex_uv * texture_size) == x.5
|
||||
// Caution: If these coordinates ever seem incorrect, first make sure it's
|
||||
// not because anisotropic filtering is blurring across field boundaries.
|
||||
// Note: TFF/BFF won't matter for sources that double-weave or similar.
|
||||
const float field_offset = floor(il_step_multiple.y * 0.75) *
|
||||
mod(frame_count + float(interlace_bff), 2.0);
|
||||
const vec2 curr_texel = tex_uv * texture_size;
|
||||
// Use under_half to fix a rounding bug right around exact texel locations.
|
||||
const vec2 prev_texel_num = floor(curr_texel - vec2(under_half));
|
||||
const float wrong_field = mod(
|
||||
prev_texel_num.y + field_offset, il_step_multiple.y);
|
||||
const vec2 scanline_texel_num = prev_texel_num - vec2(0.0, wrong_field);
|
||||
// Snap to the center of the previous scanline in the current field:
|
||||
const vec2 scanline_texel = scanline_texel_num + vec2(0.5);
|
||||
const vec2 scanline_uv = scanline_texel * texture_size_inv;
|
||||
// Save the sample's distance from the scanline, in units of scanlines:
|
||||
dist = (curr_texel.y - scanline_texel.y)/il_step_multiple.y;
|
||||
return scanline_uv;
|
||||
}
|
||||
|
||||
vec3 get_gaussian_sigma(const vec3 color, const float sigma_range)
|
||||
{
|
||||
// Requires: Globals:
|
||||
// 1.) beam_min_sigma and beam_max_sigma are global floats
|
||||
|
@ -82,19 +141,19 @@ inline float3 get_gaussian_sigma(const float3 color, const float sigma_range)
|
|||
if(beam_spot_shape_function < 0.5)
|
||||
{
|
||||
// Use a power function:
|
||||
return float3(beam_min_sigma) + sigma_range *
|
||||
pow(color, beam_spot_power);
|
||||
return vec3(beam_min_sigma) + sigma_range *
|
||||
pow(color, vec3(beam_spot_power));
|
||||
}
|
||||
else
|
||||
{
|
||||
// Use a spherical function:
|
||||
const float3 color_minus_1 = color - float3(1.0);
|
||||
return float3(beam_min_sigma) + sigma_range *
|
||||
sqrt(float3(1.0) - color_minus_1*color_minus_1);
|
||||
const vec3 color_minus_1 = color - vec3(1.0);
|
||||
return vec3(beam_min_sigma) + sigma_range *
|
||||
sqrt(vec3(1.0) - color_minus_1*color_minus_1);
|
||||
}
|
||||
}
|
||||
|
||||
inline float3 get_generalized_gaussian_beta(const float3 color,
|
||||
vec3 get_generalized_gaussian_beta(const vec3 color,
|
||||
const float shape_range)
|
||||
{
|
||||
// Requires: Globals:
|
||||
|
@ -120,11 +179,11 @@ inline float3 get_generalized_gaussian_beta(const float3 color,
|
|||
// beta widen and sharpen peaks at the risk of aliasing.
|
||||
// Unlike high beam_spot_powers, high beam_shape_powers actually soften shape
|
||||
// transitions, whereas lower ones sharpen them (at the risk of aliasing).
|
||||
return beam_min_shape + shape_range * pow(color, beam_shape_power);
|
||||
return beam_min_shape + shape_range * pow(color, vec3(beam_shape_power));
|
||||
}
|
||||
|
||||
float3 scanline_gaussian_integral_contrib(const float3 dist,
|
||||
const float3 color, const float pixel_height, const float sigma_range)
|
||||
vec3 scanline_gaussian_integral_contrib(const vec3 dist,
|
||||
const vec3 color, const float pixel_height, const float sigma_range)
|
||||
{
|
||||
// Requires: 1.) dist is the distance of the [potentially separate R/G/B]
|
||||
// point(s) from a scanline in units of scanlines, where
|
||||
|
@ -145,16 +204,16 @@ float3 scanline_gaussian_integral_contrib(const float3 dist,
|
|||
// average brightness over a given pixel area. Even if curved coords were
|
||||
// used in this pass, a flat scalar pixel height works almost as well as a
|
||||
// pixel height computed from a full pixel-space to scanline-space matrix.
|
||||
const float3 sigma = get_gaussian_sigma(color, sigma_range);
|
||||
const float3 ph_offset = float3(pixel_height * 0.5);
|
||||
const float3 denom_inv = 1.0/(sigma*sqrt(2.0));
|
||||
const float3 integral_high = erf((dist + ph_offset)*denom_inv);
|
||||
const float3 integral_low = erf((dist - ph_offset)*denom_inv);
|
||||
const vec3 sigma = get_gaussian_sigma(color, sigma_range);
|
||||
const vec3 ph_offset = vec3(pixel_height * 0.5);
|
||||
const vec3 denom_inv = 1.0/(sigma*sqrt(2.0));
|
||||
const vec3 integral_high = erf((dist + ph_offset)*denom_inv);
|
||||
const vec3 integral_low = erf((dist - ph_offset)*denom_inv);
|
||||
return color * 0.5*(integral_high - integral_low)/pixel_height;
|
||||
}
|
||||
|
||||
float3 scanline_generalized_gaussian_integral_contrib(const float3 dist,
|
||||
const float3 color, const float pixel_height, const float sigma_range,
|
||||
vec3 scanline_generalized_gaussian_integral_contrib(const vec3 dist,
|
||||
const vec3 color, const float pixel_height, const float sigma_range,
|
||||
const float shape_range)
|
||||
{
|
||||
// Requires: 1.) Requirements of scanline_gaussian_integral_contrib()
|
||||
|
@ -176,44 +235,44 @@ float3 scanline_generalized_gaussian_integral_contrib(const float3 dist,
|
|||
// models models standard deviation at beta == 2, because the standard
|
||||
// deviation depends on both alpha and beta (keeping alpha independent is
|
||||
// faster and preserves intuitive behavior and a full spectrum of results).
|
||||
const float3 alpha = sqrt(2.0) * get_gaussian_sigma(color, sigma_range);
|
||||
const float3 beta = get_generalized_gaussian_beta(color, shape_range);
|
||||
const float3 alpha_inv = float3(1.0)/alpha;
|
||||
const float3 s = float3(1.0)/beta;
|
||||
const float3 ph_offset = float3(pixel_height * 0.5);
|
||||
const vec3 alpha = sqrt(2.0) * get_gaussian_sigma(color, sigma_range);
|
||||
const vec3 beta = get_generalized_gaussian_beta(color, shape_range);
|
||||
const vec3 alpha_inv = vec3(1.0)/alpha;
|
||||
const vec3 s = vec3(1.0)/beta;
|
||||
const vec3 ph_offset = vec3(pixel_height * 0.5);
|
||||
// Pass beta to gamma_impl to avoid repeated divides. Similarly pass
|
||||
// beta (i.e. 1/s) and 1/gamma(s) to normalized_ligamma_impl.
|
||||
const float3 gamma_s_inv = float3(1.0)/gamma_impl(s, beta);
|
||||
const float3 dist1 = dist + ph_offset;
|
||||
const float3 dist0 = dist - ph_offset;
|
||||
const float3 integral_high = sign(dist1) * normalized_ligamma_impl(
|
||||
const vec3 gamma_s_inv = vec3(1.0)/gamma_impl(s, beta);
|
||||
const vec3 dist1 = dist + ph_offset;
|
||||
const vec3 dist0 = dist - ph_offset;
|
||||
const vec3 integral_high = sign(dist1) * normalized_ligamma_impl(
|
||||
s, pow(abs(dist1)*alpha_inv, beta), beta, gamma_s_inv);
|
||||
const float3 integral_low = sign(dist0) * normalized_ligamma_impl(
|
||||
const vec3 integral_low = sign(dist0) * normalized_ligamma_impl(
|
||||
s, pow(abs(dist0)*alpha_inv, beta), beta, gamma_s_inv);
|
||||
return color * 0.5*(integral_high - integral_low)/pixel_height;
|
||||
}
|
||||
|
||||
float3 scanline_gaussian_sampled_contrib(const float3 dist, const float3 color,
|
||||
vec3 scanline_gaussian_sampled_contrib(const vec3 dist, const vec3 color,
|
||||
const float pixel_height, const float sigma_range)
|
||||
{
|
||||
// See scanline_gaussian integral_contrib() for detailed comments!
|
||||
// gaussian sample = 1/(sigma*sqrt(2*pi)) * e**(-(x**2)/(2*sigma**2))
|
||||
const float3 sigma = get_gaussian_sigma(color, sigma_range);
|
||||
const vec3 sigma = get_gaussian_sigma(color, sigma_range);
|
||||
// Avoid repeated divides:
|
||||
const float3 sigma_inv = float3(1.0)/sigma;
|
||||
const float3 inner_denom_inv = 0.5 * sigma_inv * sigma_inv;
|
||||
const float3 outer_denom_inv = sigma_inv/sqrt(2.0*pi);
|
||||
const vec3 sigma_inv = vec3(1.0)/sigma;
|
||||
const vec3 inner_denom_inv = 0.5 * sigma_inv * sigma_inv;
|
||||
const vec3 outer_denom_inv = sigma_inv/sqrt(2.0*pi);
|
||||
if(beam_antialias_level > 0.5)
|
||||
{
|
||||
// Sample 1/3 pixel away in each direction as well:
|
||||
const float3 sample_offset = float3(pixel_height/3.0);
|
||||
const float3 dist2 = dist + sample_offset;
|
||||
const float3 dist3 = abs(dist - sample_offset);
|
||||
const vec3 sample_offset = vec3(pixel_height/3.0);
|
||||
const vec3 dist2 = dist + sample_offset;
|
||||
const vec3 dist3 = abs(dist - sample_offset);
|
||||
// Average three pure Gaussian samples:
|
||||
const float3 scale = color/3.0 * outer_denom_inv;
|
||||
const float3 weight1 = exp(-(dist*dist)*inner_denom_inv);
|
||||
const float3 weight2 = exp(-(dist2*dist2)*inner_denom_inv);
|
||||
const float3 weight3 = exp(-(dist3*dist3)*inner_denom_inv);
|
||||
const vec3 scale = color/3.0 * outer_denom_inv;
|
||||
const vec3 weight1 = exp(-(dist*dist)*inner_denom_inv);
|
||||
const vec3 weight2 = exp(-(dist2*dist2)*inner_denom_inv);
|
||||
const vec3 weight3 = exp(-(dist3*dist3)*inner_denom_inv);
|
||||
return scale * (weight1 + weight2 + weight3);
|
||||
}
|
||||
else
|
||||
|
@ -222,30 +281,30 @@ float3 scanline_gaussian_sampled_contrib(const float3 dist, const float3 color,
|
|||
}
|
||||
}
|
||||
|
||||
float3 scanline_generalized_gaussian_sampled_contrib(const float3 dist,
|
||||
const float3 color, const float pixel_height, const float sigma_range,
|
||||
vec3 scanline_generalized_gaussian_sampled_contrib(const vec3 dist,
|
||||
const vec3 color, const float pixel_height, const float sigma_range,
|
||||
const float shape_range)
|
||||
{
|
||||
// See scanline_generalized_gaussian_integral_contrib() for details!
|
||||
// generalized sample =
|
||||
// beta/(2*alpha*gamma(1/beta)) * e**(-(|x|/alpha)**beta)
|
||||
const float3 alpha = sqrt(2.0) * get_gaussian_sigma(color, sigma_range);
|
||||
const float3 beta = get_generalized_gaussian_beta(color, shape_range);
|
||||
const vec3 alpha = sqrt(2.0) * get_gaussian_sigma(color, sigma_range);
|
||||
const vec3 beta = get_generalized_gaussian_beta(color, shape_range);
|
||||
// Avoid repeated divides:
|
||||
const float3 alpha_inv = float3(1.0)/alpha;
|
||||
const float3 beta_inv = float3(1.0)/beta;
|
||||
const float3 scale = color * beta * 0.5 * alpha_inv /
|
||||
const vec3 alpha_inv = vec3(1.0)/alpha;
|
||||
const vec3 beta_inv = vec3(1.0)/beta;
|
||||
const vec3 scale = color * beta * 0.5 * alpha_inv /
|
||||
gamma_impl(beta_inv, beta);
|
||||
if(beam_antialias_level > 0.5)
|
||||
{
|
||||
// Sample 1/3 pixel closer to and farther from the scanline too.
|
||||
const float3 sample_offset = float3(pixel_height/3.0);
|
||||
const float3 dist2 = dist + sample_offset;
|
||||
const float3 dist3 = abs(dist - sample_offset);
|
||||
const vec3 sample_offset = vec3(pixel_height/3.0);
|
||||
const vec3 dist2 = dist + sample_offset;
|
||||
const vec3 dist3 = abs(dist - sample_offset);
|
||||
// Average three generalized Gaussian samples:
|
||||
const float3 weight1 = exp(-pow(abs(dist*alpha_inv), beta));
|
||||
const float3 weight2 = exp(-pow(abs(dist2*alpha_inv), beta));
|
||||
const float3 weight3 = exp(-pow(abs(dist3*alpha_inv), beta));
|
||||
const vec3 weight1 = exp(-pow(abs(dist*alpha_inv), beta));
|
||||
const vec3 weight2 = exp(-pow(abs(dist2*alpha_inv), beta));
|
||||
const vec3 weight3 = exp(-pow(abs(dist3*alpha_inv), beta));
|
||||
return scale/3.0 * (weight1 + weight2 + weight3);
|
||||
}
|
||||
else
|
||||
|
@ -254,7 +313,7 @@ float3 scanline_generalized_gaussian_sampled_contrib(const float3 dist,
|
|||
}
|
||||
}
|
||||
|
||||
inline float3 scanline_contrib(float3 dist, float3 color,
|
||||
vec3 scanline_contrib(vec3 dist, vec3 color,
|
||||
float pixel_height, const float sigma_range, const float shape_range)
|
||||
{
|
||||
// Requires: 1.) Requirements of scanline_gaussian_integral_contrib()
|
||||
|
@ -291,282 +350,4 @@ inline float3 scanline_contrib(float3 dist, float3 color,
|
|||
dist, color, pixel_height, sigma_range);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline float3 get_raw_interpolated_color(const float3 color0,
|
||||
const float3 color1, const float3 color2, const float3 color3,
|
||||
const float4 weights)
|
||||
{
|
||||
// Use max to avoid bizarre artifacts from negative colors:
|
||||
return max(mul(weights, float4x3(color0, color1, color2, color3)), 0.0);
|
||||
}
|
||||
|
||||
float3 get_interpolated_linear_color(const float3 color0, const float3 color1,
|
||||
const float3 color2, const float3 color3, const float4 weights)
|
||||
{
|
||||
// Requires: 1.) Requirements of include/gamma-management.h must be met:
|
||||
// intermediate_gamma must be globally defined, and input
|
||||
// colors are interpreted as linear RGB unless you #define
|
||||
// GAMMA_ENCODE_EVERY_FBO (in which case they are
|
||||
// interpreted as gamma-encoded with intermediate_gamma).
|
||||
// 2.) color0-3 are colors sampled from a texture with tex2D().
|
||||
// They are interpreted as defined in requirement 1.
|
||||
// 3.) weights contains weights for each color, summing to 1.0.
|
||||
// 4.) beam_horiz_linear_rgb_weight must be defined as a global
|
||||
// float in [0.0, 1.0] describing how much blending should
|
||||
// be done in linear RGB (rest is gamma-corrected RGB).
|
||||
// 5.) RUNTIME_SCANLINES_HORIZ_FILTER_COLORSPACE must be #defined
|
||||
// if beam_horiz_linear_rgb_weight is anything other than a
|
||||
// static constant, or we may try branching at runtime
|
||||
// without dynamic branches allowed (slow).
|
||||
// Returns: Return an interpolated color lookup between the four input
|
||||
// colors based on the weights in weights. The final color will
|
||||
// be a linear RGB value, but the blending will be done as
|
||||
// indicated above.
|
||||
const float intermediate_gamma = get_intermediate_gamma();
|
||||
// Branch if beam_horiz_linear_rgb_weight is static (for free) or if the
|
||||
// profile allows dynamic branches (faster than computing extra pows):
|
||||
#ifndef RUNTIME_SCANLINES_HORIZ_FILTER_COLORSPACE
|
||||
#define SCANLINES_BRANCH_FOR_LINEAR_RGB_WEIGHT
|
||||
#else
|
||||
#ifdef DRIVERS_ALLOW_DYNAMIC_BRANCHES
|
||||
#define SCANLINES_BRANCH_FOR_LINEAR_RGB_WEIGHT
|
||||
#endif
|
||||
#endif
|
||||
#ifdef SCANLINES_BRANCH_FOR_LINEAR_RGB_WEIGHT
|
||||
// beam_horiz_linear_rgb_weight is static, so we can branch:
|
||||
#ifdef GAMMA_ENCODE_EVERY_FBO
|
||||
const float3 gamma_mixed_color = pow(get_raw_interpolated_color(
|
||||
color0, color1, color2, color3, weights), intermediate_gamma);
|
||||
if(beam_horiz_linear_rgb_weight > 0.0)
|
||||
{
|
||||
const float3 linear_mixed_color = get_raw_interpolated_color(
|
||||
pow(color0, intermediate_gamma),
|
||||
pow(color1, intermediate_gamma),
|
||||
pow(color2, intermediate_gamma),
|
||||
pow(color3, intermediate_gamma),
|
||||
weights);
|
||||
return lerp(gamma_mixed_color, linear_mixed_color,
|
||||
beam_horiz_linear_rgb_weight);
|
||||
}
|
||||
else
|
||||
{
|
||||
return gamma_mixed_color;
|
||||
}
|
||||
#else
|
||||
const float3 linear_mixed_color = get_raw_interpolated_color(
|
||||
color0, color1, color2, color3, weights);
|
||||
if(beam_horiz_linear_rgb_weight < 1.0)
|
||||
{
|
||||
const float3 gamma_mixed_color = get_raw_interpolated_color(
|
||||
pow(color0, 1.0/intermediate_gamma),
|
||||
pow(color1, 1.0/intermediate_gamma),
|
||||
pow(color2, 1.0/intermediate_gamma),
|
||||
pow(color3, 1.0/intermediate_gamma),
|
||||
weights);
|
||||
return lerp(gamma_mixed_color, linear_mixed_color,
|
||||
beam_horiz_linear_rgb_weight);
|
||||
}
|
||||
else
|
||||
{
|
||||
return linear_mixed_color;
|
||||
}
|
||||
#endif // GAMMA_ENCODE_EVERY_FBO
|
||||
#else
|
||||
#ifdef GAMMA_ENCODE_EVERY_FBO
|
||||
// Inputs: color0-3 are colors in gamma-encoded RGB.
|
||||
const float3 gamma_mixed_color = pow(get_raw_interpolated_color(
|
||||
color0, color1, color2, color3, weights), intermediate_gamma);
|
||||
const float3 linear_mixed_color = get_raw_interpolated_color(
|
||||
pow(color0, intermediate_gamma),
|
||||
pow(color1, intermediate_gamma),
|
||||
pow(color2, intermediate_gamma),
|
||||
pow(color3, intermediate_gamma),
|
||||
weights);
|
||||
return lerp(gamma_mixed_color, linear_mixed_color,
|
||||
beam_horiz_linear_rgb_weight);
|
||||
#else
|
||||
// Inputs: color0-3 are colors in linear RGB.
|
||||
const float3 linear_mixed_color = get_raw_interpolated_color(
|
||||
color0, color1, color2, color3, weights);
|
||||
const float3 gamma_mixed_color = get_raw_interpolated_color(
|
||||
pow(color0, 1.0/intermediate_gamma),
|
||||
pow(color1, 1.0/intermediate_gamma),
|
||||
pow(color2, 1.0/intermediate_gamma),
|
||||
pow(color3, 1.0/intermediate_gamma),
|
||||
weights);
|
||||
return lerp(gamma_mixed_color, linear_mixed_color,
|
||||
beam_horiz_linear_rgb_weight);
|
||||
#endif // GAMMA_ENCODE_EVERY_FBO
|
||||
#endif // SCANLINES_BRANCH_FOR_LINEAR_RGB_WEIGHT
|
||||
}
|
||||
|
||||
float3 get_scanline_color(const sampler2D texture, const float2 scanline_uv,
|
||||
const float2 uv_step_x, const float4 weights)
|
||||
{
|
||||
// Requires: 1.) scanline_uv must be vertically snapped to the caller's
|
||||
// desired line or scanline and horizontally snapped to the
|
||||
// texel just left of the output pixel (color1)
|
||||
// 2.) uv_step_x must contain the horizontal uv distance
|
||||
// between texels.
|
||||
// 3.) weights must contain interpolation filter weights for
|
||||
// color0, color1, color2, and color3, where color1 is just
|
||||
// left of the output pixel.
|
||||
// Returns: Return a horizontally interpolated texture lookup using 2-4
|
||||
// nearby texels, according to weights and the conventions of
|
||||
// get_interpolated_linear_color().
|
||||
// We can ignore the outside texture lookups for Quilez resampling.
|
||||
const float3 color1 = tex2D(texture, scanline_uv).rgb;
|
||||
const float3 color2 = tex2D(texture, scanline_uv + uv_step_x).rgb;
|
||||
float3 color0 = float3(0.0);
|
||||
float3 color3 = float3(0.0);
|
||||
if(beam_horiz_filter > 0.5)
|
||||
{
|
||||
color0 = tex2D(texture, scanline_uv - uv_step_x).rgb;
|
||||
color3 = tex2D(texture, scanline_uv + 2.0 * uv_step_x).rgb;
|
||||
}
|
||||
// Sample the texture as-is, whether it's linear or gamma-encoded:
|
||||
// get_interpolated_linear_color() will handle the difference.
|
||||
return get_interpolated_linear_color(color0, color1, color2, color3, weights);
|
||||
}
|
||||
|
||||
float3 sample_single_scanline_horizontal(const sampler2D texture,
|
||||
const float2 tex_uv, const float2 texture_size,
|
||||
const float2 texture_size_inv)
|
||||
{
|
||||
// TODO: Add function requirements.
|
||||
// Snap to the previous texel and get sample dists from 2/4 nearby texels:
|
||||
const float2 curr_texel = tex_uv * texture_size;
|
||||
// Use under_half to fix a rounding bug right around exact texel locations.
|
||||
const float2 prev_texel =
|
||||
floor(curr_texel - float2(under_half)) + float2(0.5);
|
||||
const float2 prev_texel_hor = float2(prev_texel.x, curr_texel.y);
|
||||
const float2 prev_texel_hor_uv = prev_texel_hor * texture_size_inv;
|
||||
const float prev_dist = curr_texel.x - prev_texel_hor.x;
|
||||
const float4 sample_dists = float4(1.0 + prev_dist, prev_dist,
|
||||
1.0 - prev_dist, 2.0 - prev_dist);
|
||||
// Get Quilez, Lanczos2, or Gaussian resize weights for 2/4 nearby texels:
|
||||
float4 weights;
|
||||
if(beam_horiz_filter < 0.5)
|
||||
{
|
||||
// Quilez:
|
||||
const float x = sample_dists.y;
|
||||
const float w2 = x*x*x*(x*(x*6.0 - 15.0) + 10.0);
|
||||
weights = float4(0.0, 1.0 - w2, w2, 0.0);
|
||||
}
|
||||
else if(beam_horiz_filter < 1.5)
|
||||
{
|
||||
// Gaussian:
|
||||
float inner_denom_inv = 1.0/(2.0*beam_horiz_sigma*beam_horiz_sigma);
|
||||
weights = exp(-(sample_dists*sample_dists)*inner_denom_inv);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Lanczos2:
|
||||
const float4 pi_dists = FIX_ZERO(sample_dists * pi);
|
||||
weights = 2.0 * sin(pi_dists) * sin(pi_dists * 0.5) /
|
||||
(pi_dists * pi_dists);
|
||||
}
|
||||
// Ensure the weight sum == 1.0:
|
||||
const float4 final_weights = weights/dot(weights, float4(1.0));
|
||||
// Get the interpolated horizontal scanline color:
|
||||
const float2 uv_step_x = float2(texture_size_inv.x, 0.0);
|
||||
return get_scanline_color(
|
||||
texture, prev_texel_hor_uv, uv_step_x, final_weights);
|
||||
}
|
||||
|
||||
float3 sample_rgb_scanline_horizontal(const sampler2D texture,
|
||||
const float2 tex_uv, const float2 texture_size,
|
||||
const float2 texture_size_inv)
|
||||
{
|
||||
// TODO: Add function requirements.
|
||||
// Rely on a helper to make convergence easier.
|
||||
if(beam_misconvergence)
|
||||
{
|
||||
const float3 convergence_offsets_rgb =
|
||||
get_convergence_offsets_x_vector();
|
||||
const float3 offset_u_rgb =
|
||||
convergence_offsets_rgb * texture_size_inv.xxx;
|
||||
const float2 scanline_uv_r = tex_uv - float2(offset_u_rgb.r, 0.0);
|
||||
const float2 scanline_uv_g = tex_uv - float2(offset_u_rgb.g, 0.0);
|
||||
const float2 scanline_uv_b = tex_uv - float2(offset_u_rgb.b, 0.0);
|
||||
const float3 sample_r = sample_single_scanline_horizontal(
|
||||
texture, scanline_uv_r, texture_size, texture_size_inv);
|
||||
const float3 sample_g = sample_single_scanline_horizontal(
|
||||
texture, scanline_uv_g, texture_size, texture_size_inv);
|
||||
const float3 sample_b = sample_single_scanline_horizontal(
|
||||
texture, scanline_uv_b, texture_size, texture_size_inv);
|
||||
return float3(sample_r.r, sample_g.g, sample_b.b);
|
||||
}
|
||||
else
|
||||
{
|
||||
return sample_single_scanline_horizontal(texture, tex_uv, texture_size,
|
||||
texture_size_inv);
|
||||
}
|
||||
}
|
||||
|
||||
float2 get_last_scanline_uv(const float2 tex_uv, const float2 texture_size,
|
||||
const float2 texture_size_inv, const float2 il_step_multiple,
|
||||
const float frame_count, out float dist)
|
||||
{
|
||||
// Compute texture coords for the last/upper scanline, accounting for
|
||||
// interlacing: With interlacing, only consider even/odd scanlines every
|
||||
// other frame. Top-field first (TFF) order puts even scanlines on even
|
||||
// frames, and BFF order puts them on odd frames. Texels are centered at:
|
||||
// frac(tex_uv * texture_size) == x.5
|
||||
// Caution: If these coordinates ever seem incorrect, first make sure it's
|
||||
// not because anisotropic filtering is blurring across field boundaries.
|
||||
// Note: TFF/BFF won't matter for sources that double-weave or similar.
|
||||
const float field_offset = floor(il_step_multiple.y * 0.75) *
|
||||
fmod(frame_count + float(interlace_bff), 2.0);
|
||||
const float2 curr_texel = tex_uv * texture_size;
|
||||
// Use under_half to fix a rounding bug right around exact texel locations.
|
||||
const float2 prev_texel_num = floor(curr_texel - float2(under_half));
|
||||
const float wrong_field = fmod(
|
||||
prev_texel_num.y + field_offset, il_step_multiple.y);
|
||||
const float2 scanline_texel_num = prev_texel_num - float2(0.0, wrong_field);
|
||||
// Snap to the center of the previous scanline in the current field:
|
||||
const float2 scanline_texel = scanline_texel_num + float2(0.5);
|
||||
const float2 scanline_uv = scanline_texel * texture_size_inv;
|
||||
// Save the sample's distance from the scanline, in units of scanlines:
|
||||
dist = (curr_texel.y - scanline_texel.y)/il_step_multiple.y;
|
||||
return scanline_uv;
|
||||
}
|
||||
*/
|
||||
bool is_interlaced(float num_lines)
|
||||
{
|
||||
// Detect interlacing based on the number of lines in the source.
|
||||
if(interlace_detect)
|
||||
{
|
||||
// NTSC: 525 lines, 262.5/field; 486 active (2 half-lines), 243/field
|
||||
// NTSC Emulators: Typically 224 or 240 lines
|
||||
// PAL: 625 lines, 312.5/field; 576 active (typical), 288/field
|
||||
// PAL Emulators: ?
|
||||
// ATSC: 720p, 1080i, 1080p
|
||||
// Where do we place our cutoffs? Assumptions:
|
||||
// 1.) We only need to care about active lines.
|
||||
// 2.) Anything > 288 and <= 576 lines is probably interlaced.
|
||||
// 3.) Anything > 576 lines is probably not interlaced...
|
||||
// 4.) ...except 1080 lines, which is a crapshoot (user decision).
|
||||
// 5.) Just in case the main program uses calculated video sizes,
|
||||
// we should nudge the float thresholds a bit.
|
||||
bool sd_interlace;
|
||||
if (num_lines > 288.5 && num_lines < 576.5)
|
||||
{sd_interlace = true;}
|
||||
else
|
||||
{sd_interlace = false;}
|
||||
bool hd_interlace;
|
||||
if (num_lines > 1079.5 && num_lines < 1080.5)
|
||||
{hd_interlace = false;}
|
||||
else
|
||||
{hd_interlace = sd_interlace || hd_interlace;}
|
||||
}
|
||||
else
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#endif // SCANLINE_FUNCTIONS_H
|
||||
|
||||
}
|
498
crt/shaders/crt-royale/src/special-functions-old.h
Normal file
498
crt/shaders/crt-royale/src/special-functions-old.h
Normal file
|
@ -0,0 +1,498 @@
|
|||
#ifndef SPECIAL_FUNCTIONS_H
|
||||
#define SPECIAL_FUNCTIONS_H
|
||||
|
||||
///////////////////////////////// MIT LICENSE ////////////////////////////////
|
||||
|
||||
// Copyright (C) 2014 TroggleMonkey
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to
|
||||
// deal in the Software without restriction, including without limitation the
|
||||
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
// sell copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
// IN THE SOFTWARE.
|
||||
|
||||
|
||||
///////////////////////////////// DESCRIPTION ////////////////////////////////
|
||||
|
||||
// This file implements the following mathematical special functions:
|
||||
// 1.) erf() = 2/sqrt(pi) * indefinite_integral(e**(-x**2))
|
||||
// 2.) gamma(s), a real-numbered extension of the integer factorial function
|
||||
// It also implements normalized_ligamma(s, z), a normalized lower incomplete
|
||||
// gamma function for s < 0.5 only. Both gamma() and normalized_ligamma() can
|
||||
// be called with an _impl suffix to use an implementation version with a few
|
||||
// extra precomputed parameters (which may be useful for the caller to reuse).
|
||||
// See below for details.
|
||||
//
|
||||
// Design Rationale:
|
||||
// Pretty much every line of code in this file is duplicated four times for
|
||||
// different input types (vec4/vec3/vec2/float). This is unfortunate,
|
||||
// but Cg doesn't allow function templates. Macros would be far less verbose,
|
||||
// but they would make the code harder to document and read. I don't expect
|
||||
// these functions will require a whole lot of maintenance changes unless
|
||||
// someone ever has need for more robust incomplete gamma functions, so code
|
||||
// duplication seems to be the lesser evil in this case.
|
||||
|
||||
|
||||
/////////////////////////// GAUSSIAN ERROR FUNCTION //////////////////////////
|
||||
|
||||
vec4 erf6(vec4 x)
|
||||
{
|
||||
// Requires: x is the standard parameter to erf().
|
||||
// Returns: Return an Abramowitz/Stegun approximation of erf(), where:
|
||||
// erf(x) = 2/sqrt(pi) * integral(e**(-x**2))
|
||||
// This approximation has a max absolute error of 2.5*10**-5
|
||||
// with solid numerical robustness and efficiency. See:
|
||||
// https://en.wikipedia.org/wiki/Error_function#Approximation_with_elementary_functions
|
||||
const vec4 one = vec4(1.0);
|
||||
const vec4 sign_x = sign(x);
|
||||
const vec4 t = one/(one + 0.47047*abs(x));
|
||||
const vec4 result = one - t*(0.3480242 + t*(-0.0958798 + t*0.7478556))*
|
||||
exp(-(x*x));
|
||||
return result * sign_x;
|
||||
}
|
||||
|
||||
vec3 erf6(const vec3 x)
|
||||
{
|
||||
// vec3 version:
|
||||
const vec3 one = vec3(1.0);
|
||||
const vec3 sign_x = sign(x);
|
||||
const vec3 t = one/(one + 0.47047*abs(x));
|
||||
const vec3 result = one - t*(0.3480242 + t*(-0.0958798 + t*0.7478556))*
|
||||
exp(-(x*x));
|
||||
return result * sign_x;
|
||||
}
|
||||
|
||||
vec2 erf6(const vec2 x)
|
||||
{
|
||||
// vec2 version:
|
||||
const vec2 one = vec2(1.0);
|
||||
const vec2 sign_x = sign(x);
|
||||
const vec2 t = one/(one + 0.47047*abs(x));
|
||||
const vec2 result = one - t*(0.3480242 + t*(-0.0958798 + t*0.7478556))*
|
||||
exp(-(x*x));
|
||||
return result * sign_x;
|
||||
}
|
||||
|
||||
float erf6(const float x)
|
||||
{
|
||||
// Float version:
|
||||
const float sign_x = sign(x);
|
||||
const float t = 1.0/(1.0 + 0.47047*abs(x));
|
||||
const float result = 1.0 - t*(0.3480242 + t*(-0.0958798 + t*0.7478556))*
|
||||
exp(-(x*x));
|
||||
return result * sign_x;
|
||||
}
|
||||
|
||||
vec4 erft(const vec4 x)
|
||||
{
|
||||
// Requires: x is the standard parameter to erf().
|
||||
// Returns: Approximate erf() with the hyperbolic tangent. The error is
|
||||
// visually noticeable, but it's blazing fast and perceptually
|
||||
// close...at least on ATI hardware. See:
|
||||
// http://www.maplesoft.com/applications/view.aspx?SID=5525&view=html
|
||||
// Warning: Only use this if your hardware drivers correctly implement
|
||||
// tanh(): My nVidia 8800GTS returns garbage output.
|
||||
return tanh(1.202760580 * x);
|
||||
}
|
||||
|
||||
vec3 erft(const vec3 x)
|
||||
{
|
||||
// vec3 version:
|
||||
return tanh(1.202760580 * x);
|
||||
}
|
||||
|
||||
vec2 erft(const vec2 x)
|
||||
{
|
||||
// vec2 version:
|
||||
return tanh(1.202760580 * x);
|
||||
}
|
||||
|
||||
float erft(const float x)
|
||||
{
|
||||
// Float version:
|
||||
return tanh(1.202760580 * x);
|
||||
}
|
||||
|
||||
vec4 erf(const vec4 x)
|
||||
{
|
||||
// Requires: x is the standard parameter to erf().
|
||||
// Returns: Some approximation of erf(x), depending on user settings.
|
||||
#ifdef ERF_FAST_APPROXIMATION
|
||||
return erft(x);
|
||||
#else
|
||||
return erf6(x);
|
||||
#endif
|
||||
}
|
||||
|
||||
vec3 erf(const vec3 x)
|
||||
{
|
||||
// vec3 version:
|
||||
#ifdef ERF_FAST_APPROXIMATION
|
||||
return erft(x);
|
||||
#else
|
||||
return erf6(x);
|
||||
#endif
|
||||
}
|
||||
|
||||
vec2 erf(const vec2 x)
|
||||
{
|
||||
// vec2 version:
|
||||
#ifdef ERF_FAST_APPROXIMATION
|
||||
return erft(x);
|
||||
#else
|
||||
return erf6(x);
|
||||
#endif
|
||||
}
|
||||
|
||||
float erf(const float x)
|
||||
{
|
||||
// Float version:
|
||||
#ifdef ERF_FAST_APPROXIMATION
|
||||
return erft(x);
|
||||
#else
|
||||
return erf6(x);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
/////////////////////////// COMPLETE GAMMA FUNCTION //////////////////////////
|
||||
|
||||
vec4 gamma_impl(const vec4 s, const vec4 s_inv)
|
||||
{
|
||||
// Requires: 1.) s is the standard parameter to the gamma function, and
|
||||
// it should lie in the [0, 36] range.
|
||||
// 2.) s_inv = 1.0/s. This implementation function requires
|
||||
// the caller to precompute this value, giving users the
|
||||
// opportunity to reuse it.
|
||||
// Returns: Return approximate gamma function (real-numbered factorial)
|
||||
// output using the Lanczos approximation with two coefficients
|
||||
// calculated using Paul Godfrey's method here:
|
||||
// http://my.fit.edu/~gabdo/gamma.txt
|
||||
// An optimal g value for s in [0, 36] is ~1.12906830989, with
|
||||
// a maximum relative error of 0.000463 for 2**16 equally
|
||||
// evals. We could use three coeffs (0.0000346 error) without
|
||||
// hurting latency, but this allows more parallelism with
|
||||
// outside instructions.
|
||||
const vec4 g = vec4(1.12906830989);
|
||||
const vec4 c0 = vec4(0.8109119309638332633713423362694399653724431);
|
||||
const vec4 c1 = vec4(0.4808354605142681877121661197951496120000040);
|
||||
const vec4 e = vec4(2.71828182845904523536028747135266249775724709);
|
||||
const vec4 sph = s + vec4(0.5);
|
||||
const vec4 lanczos_sum = c0 + c1/(s + vec4(1.0));
|
||||
const vec4 base = (sph + g)/e; // or (s + g + vec4(0.5))/e
|
||||
// gamma(s + 1) = base**sph * lanczos_sum; divide by s for gamma(s).
|
||||
// This has less error for small s's than (s -= 1.0) at the beginning.
|
||||
return (pow(base, sph) * lanczos_sum) * s_inv;
|
||||
}
|
||||
|
||||
vec3 gamma_impl(const vec3 s, const vec3 s_inv)
|
||||
{
|
||||
// vec3 version:
|
||||
const vec3 g = vec3(1.12906830989);
|
||||
const vec3 c0 = vec3(0.8109119309638332633713423362694399653724431);
|
||||
const vec3 c1 = vec3(0.4808354605142681877121661197951496120000040);
|
||||
const vec3 e = vec3(2.71828182845904523536028747135266249775724709);
|
||||
const vec3 sph = s + vec3(0.5);
|
||||
const vec3 lanczos_sum = c0 + c1/(s + vec3(1.0));
|
||||
const vec3 base = (sph + g)/e;
|
||||
return (pow(base, sph) * lanczos_sum) * s_inv;
|
||||
}
|
||||
|
||||
vec2 gamma_impl(const vec2 s, const vec2 s_inv)
|
||||
{
|
||||
// vec2 version:
|
||||
const vec2 g = vec2(1.12906830989);
|
||||
const vec2 c0 = vec2(0.8109119309638332633713423362694399653724431);
|
||||
const vec2 c1 = vec2(0.4808354605142681877121661197951496120000040);
|
||||
const vec2 e = vec2(2.71828182845904523536028747135266249775724709);
|
||||
const vec2 sph = s + vec2(0.5);
|
||||
const vec2 lanczos_sum = c0 + c1/(s + vec2(1.0));
|
||||
const vec2 base = (sph + g)/e;
|
||||
return (pow(base, sph) * lanczos_sum) * s_inv;
|
||||
}
|
||||
|
||||
float gamma_impl(const float s, const float s_inv)
|
||||
{
|
||||
// Float version:
|
||||
const float g = 1.12906830989;
|
||||
const float c0 = 0.8109119309638332633713423362694399653724431;
|
||||
const float c1 = 0.4808354605142681877121661197951496120000040;
|
||||
const float e = 2.71828182845904523536028747135266249775724709;
|
||||
const float sph = s + 0.5;
|
||||
const float lanczos_sum = c0 + c1/(s + 1.0);
|
||||
const float base = (sph + g)/e;
|
||||
return (pow(base, sph) * lanczos_sum) * s_inv;
|
||||
}
|
||||
|
||||
vec4 gamma(const vec4 s)
|
||||
{
|
||||
// Requires: s is the standard parameter to the gamma function, and it
|
||||
// should lie in the [0, 36] range.
|
||||
// Returns: Return approximate gamma function output with a maximum
|
||||
// relative error of 0.000463. See gamma_impl for details.
|
||||
return gamma_impl(s, vec4(1.0)/s);
|
||||
}
|
||||
|
||||
vec3 gamma(const vec3 s)
|
||||
{
|
||||
// vec3 version:
|
||||
return gamma_impl(s, vec3(1.0)/s);
|
||||
}
|
||||
|
||||
vec2 gamma(const vec2 s)
|
||||
{
|
||||
// vec2 version:
|
||||
return gamma_impl(s, vec2(1.0)/s);
|
||||
}
|
||||
|
||||
float gamma(const float s)
|
||||
{
|
||||
// Float version:
|
||||
return gamma_impl(s, 1.0/s);
|
||||
}
|
||||
|
||||
|
||||
//////////////// INCOMPLETE GAMMA FUNCTIONS (RESTRICTED INPUT) ///////////////
|
||||
|
||||
// Lower incomplete gamma function for small s and z (implementation):
|
||||
vec4 ligamma_small_z_impl(const vec4 s, const vec4 z, const vec4 s_inv)
|
||||
{
|
||||
// Requires: 1.) s < ~0.5
|
||||
// 2.) z <= ~0.775075
|
||||
// 3.) s_inv = 1.0/s (precomputed for outside reuse)
|
||||
// Returns: A series representation for the lower incomplete gamma
|
||||
// function for small s and small z (4 terms).
|
||||
// The actual "rolled up" summation looks like:
|
||||
// last_sign = 1.0; last_pow = 1.0; last_factorial = 1.0;
|
||||
// sum = last_sign * last_pow / ((s + k) * last_factorial)
|
||||
// for(int i = 0; i < 4; ++i)
|
||||
// {
|
||||
// last_sign *= -1.0; last_pow *= z; last_factorial *= i;
|
||||
// sum += last_sign * last_pow / ((s + k) * last_factorial);
|
||||
// }
|
||||
// Unrolled, constant-unfolded and arranged for madds and parallelism:
|
||||
const vec4 scale = pow(z, s);
|
||||
vec4 sum = s_inv; // Summation iteration 0 result
|
||||
// Summation iterations 1, 2, and 3:
|
||||
const vec4 z_sq = z*z;
|
||||
const vec4 denom1 = s + vec4(1.0);
|
||||
const vec4 denom2 = 2.0*s + vec4(4.0);
|
||||
const vec4 denom3 = 6.0*s + vec4(18.0);
|
||||
//vec4 denom4 = 24.0*s + vec4(96.0);
|
||||
sum -= z/denom1;
|
||||
sum += z_sq/denom2;
|
||||
sum -= z * z_sq/denom3;
|
||||
//sum += z_sq * z_sq / denom4;
|
||||
// Scale and return:
|
||||
return scale * sum;
|
||||
}
|
||||
|
||||
vec3 ligamma_small_z_impl(const vec3 s, const vec3 z, const vec3 s_inv)
|
||||
{
|
||||
// vec3 version:
|
||||
const vec3 scale = pow(z, s);
|
||||
vec3 sum = s_inv;
|
||||
const vec3 z_sq = z*z;
|
||||
const vec3 denom1 = s + vec3(1.0);
|
||||
const vec3 denom2 = 2.0*s + vec3(4.0);
|
||||
const vec3 denom3 = 6.0*s + vec3(18.0);
|
||||
sum -= z/denom1;
|
||||
sum += z_sq/denom2;
|
||||
sum -= z * z_sq/denom3;
|
||||
return scale * sum;
|
||||
}
|
||||
|
||||
vec2 ligamma_small_z_impl(const vec2 s, const vec2 z, const vec2 s_inv)
|
||||
{
|
||||
// vec2 version:
|
||||
const vec2 scale = pow(z, s);
|
||||
vec2 sum = s_inv;
|
||||
const vec2 z_sq = z*z;
|
||||
const vec2 denom1 = s + vec2(1.0);
|
||||
const vec2 denom2 = 2.0*s + vec2(4.0);
|
||||
const vec2 denom3 = 6.0*s + vec2(18.0);
|
||||
sum -= z/denom1;
|
||||
sum += z_sq/denom2;
|
||||
sum -= z * z_sq/denom3;
|
||||
return scale * sum;
|
||||
}
|
||||
|
||||
float ligamma_small_z_impl(const float s, const float z, const float s_inv)
|
||||
{
|
||||
// Float version:
|
||||
const float scale = pow(z, s);
|
||||
float sum = s_inv;
|
||||
const float z_sq = z*z;
|
||||
const float denom1 = s + 1.0;
|
||||
const float denom2 = 2.0*s + 4.0;
|
||||
const float denom3 = 6.0*s + 18.0;
|
||||
sum -= z/denom1;
|
||||
sum += z_sq/denom2;
|
||||
sum -= z * z_sq/denom3;
|
||||
return scale * sum;
|
||||
}
|
||||
|
||||
// Upper incomplete gamma function for small s and large z (implementation):
|
||||
vec4 uigamma_large_z_impl(const vec4 s, const vec4 z)
|
||||
{
|
||||
// Requires: 1.) s < ~0.5
|
||||
// 2.) z > ~0.775075
|
||||
// Returns: Gauss's continued fraction representation for the upper
|
||||
// incomplete gamma function (4 terms).
|
||||
// The "rolled up" continued fraction looks like this. The denominator
|
||||
// is truncated, and it's calculated "from the bottom up:"
|
||||
// denom = vec4('inf');
|
||||
// vec4 one = vec4(1.0);
|
||||
// for(int i = 4; i > 0; --i)
|
||||
// {
|
||||
// denom = ((i * 2.0) - one) + z - s + (i * (s - i))/denom;
|
||||
// }
|
||||
// Unrolled and constant-unfolded for madds and parallelism:
|
||||
const vec4 numerator = pow(z, s) * exp(-z);
|
||||
vec4 denom = vec4(7.0) + z - s;
|
||||
denom = vec4(5.0) + z - s + (3.0*s - vec4(9.0))/denom;
|
||||
denom = vec4(3.0) + z - s + (2.0*s - vec4(4.0))/denom;
|
||||
denom = vec4(1.0) + z - s + (s - vec4(1.0))/denom;
|
||||
return numerator / denom;
|
||||
}
|
||||
|
||||
vec3 uigamma_large_z_impl(const vec3 s, const vec3 z)
|
||||
{
|
||||
// vec3 version:
|
||||
const vec3 numerator = pow(z, s) * exp(-z);
|
||||
vec3 denom = vec3(7.0) + z - s;
|
||||
denom = vec3(5.0) + z - s + (3.0*s - vec3(9.0))/denom;
|
||||
denom = vec3(3.0) + z - s + (2.0*s - vec3(4.0))/denom;
|
||||
denom = vec3(1.0) + z - s + (s - vec3(1.0))/denom;
|
||||
return numerator / denom;
|
||||
}
|
||||
|
||||
vec2 uigamma_large_z_impl(const vec2 s, const vec2 z)
|
||||
{
|
||||
// vec2 version:
|
||||
const vec2 numerator = pow(z, s) * exp(-z);
|
||||
vec2 denom = vec2(7.0) + z - s;
|
||||
denom = vec2(5.0) + z - s + (3.0*s - vec2(9.0))/denom;
|
||||
denom = vec2(3.0) + z - s + (2.0*s - vec2(4.0))/denom;
|
||||
denom = vec2(1.0) + z - s + (s - vec2(1.0))/denom;
|
||||
return numerator / denom;
|
||||
}
|
||||
|
||||
float uigamma_large_z_impl(const float s, const float z)
|
||||
{
|
||||
// Float version:
|
||||
const float numerator = pow(z, s) * exp(-z);
|
||||
float denom = 7.0 + z - s;
|
||||
denom = 5.0 + z - s + (3.0*s - 9.0)/denom;
|
||||
denom = 3.0 + z - s + (2.0*s - 4.0)/denom;
|
||||
denom = 1.0 + z - s + (s - 1.0)/denom;
|
||||
return numerator / denom;
|
||||
}
|
||||
|
||||
// Normalized lower incomplete gamma function for small s (implementation):
|
||||
vec4 normalized_ligamma_impl(const vec4 s, const vec4 z,
|
||||
const vec4 s_inv, const vec4 gamma_s_inv)
|
||||
{
|
||||
// Requires: 1.) s < ~0.5
|
||||
// 2.) s_inv = 1/s (precomputed for outside reuse)
|
||||
// 3.) gamma_s_inv = 1/gamma(s) (precomputed for outside reuse)
|
||||
// Returns: Approximate the normalized lower incomplete gamma function
|
||||
// for s < 0.5. Since we only care about s < 0.5, we only need
|
||||
// to evaluate two branches (not four) based on z. Each branch
|
||||
// uses four terms, with a max relative error of ~0.00182. The
|
||||
// branch threshold and specifics were adapted for fewer terms
|
||||
// from Gil/Segura/Temme's paper here:
|
||||
// http://oai.cwi.nl/oai/asset/20433/20433B.pdf
|
||||
// Evaluate both branches: Real branches test slower even when available.
|
||||
const vec4 thresh = vec4(0.775075);
|
||||
const bool4 z_is_large = z > thresh;
|
||||
const vec4 large_z = vec4(1.0) - uigamma_large_z_impl(s, z) * gamma_s_inv;
|
||||
const vec4 small_z = ligamma_small_z_impl(s, z, s_inv) * gamma_s_inv;
|
||||
// Combine the results from both branches:
|
||||
return large_z * vec4(z_is_large) + small_z * vec4(!z_is_large);
|
||||
}
|
||||
|
||||
vec3 normalized_ligamma_impl(const vec3 s, const vec3 z,
|
||||
const vec3 s_inv, const vec3 gamma_s_inv)
|
||||
{
|
||||
// vec3 version:
|
||||
const vec3 thresh = vec3(0.775075);
|
||||
const bool3 z_is_large = z > thresh;
|
||||
const vec3 large_z = vec3(1.0) - uigamma_large_z_impl(s, z) * gamma_s_inv;
|
||||
const vec3 small_z = ligamma_small_z_impl(s, z, s_inv) * gamma_s_inv;
|
||||
return large_z * vec3(z_is_large) + small_z * vec3(!z_is_large);
|
||||
}
|
||||
|
||||
vec2 normalized_ligamma_impl(const vec2 s, const vec2 z,
|
||||
const vec2 s_inv, const vec2 gamma_s_inv)
|
||||
{
|
||||
// vec2 version:
|
||||
const vec2 thresh = vec2(0.775075);
|
||||
const bool2 z_is_large = z > thresh;
|
||||
const vec2 large_z = vec2(1.0) - uigamma_large_z_impl(s, z) * gamma_s_inv;
|
||||
const vec2 small_z = ligamma_small_z_impl(s, z, s_inv) * gamma_s_inv;
|
||||
return large_z * vec2(z_is_large) + small_z * vec2(!z_is_large);
|
||||
}
|
||||
|
||||
float normalized_ligamma_impl(const float s, const float z,
|
||||
const float s_inv, const float gamma_s_inv)
|
||||
{
|
||||
// Float version:
|
||||
const float thresh = 0.775075;
|
||||
const bool z_is_large = z > thresh;
|
||||
const float large_z = 1.0 - uigamma_large_z_impl(s, z) * gamma_s_inv;
|
||||
const float small_z = ligamma_small_z_impl(s, z, s_inv) * gamma_s_inv;
|
||||
return large_z * float(z_is_large) + small_z * float(!z_is_large);
|
||||
}
|
||||
|
||||
// Normalized lower incomplete gamma function for small s:
|
||||
vec4 normalized_ligamma(const vec4 s, const vec4 z)
|
||||
{
|
||||
// Requires: s < ~0.5
|
||||
// Returns: Approximate the normalized lower incomplete gamma function
|
||||
// for s < 0.5. See normalized_ligamma_impl() for details.
|
||||
const vec4 s_inv = vec4(1.0)/s;
|
||||
const vec4 gamma_s_inv = vec4(1.0)/gamma_impl(s, s_inv);
|
||||
return normalized_ligamma_impl(s, z, s_inv, gamma_s_inv);
|
||||
}
|
||||
|
||||
vec3 normalized_ligamma(const vec3 s, const vec3 z)
|
||||
{
|
||||
// vec3 version:
|
||||
const vec3 s_inv = vec3(1.0)/s;
|
||||
const vec3 gamma_s_inv = vec3(1.0)/gamma_impl(s, s_inv);
|
||||
return normalized_ligamma_impl(s, z, s_inv, gamma_s_inv);
|
||||
}
|
||||
|
||||
vec2 normalized_ligamma(const vec2 s, const vec2 z)
|
||||
{
|
||||
// vec2 version:
|
||||
const vec2 s_inv = vec2(1.0)/s;
|
||||
const vec2 gamma_s_inv = vec2(1.0)/gamma_impl(s, s_inv);
|
||||
return normalized_ligamma_impl(s, z, s_inv, gamma_s_inv);
|
||||
}
|
||||
|
||||
float normalized_ligamma(const float s, const float z)
|
||||
{
|
||||
// Float version:
|
||||
const float s_inv = 1.0/s;
|
||||
const float gamma_s_inv = 1.0/gamma_impl(s, s_inv);
|
||||
return normalized_ligamma_impl(s, z, s_inv, gamma_s_inv);
|
||||
}
|
||||
|
||||
|
||||
#endif // SPECIAL_FUNCTIONS_H
|
||||
|
||||
|
492
crt/shaders/crt-royale/src/special-functions.h
Normal file
492
crt/shaders/crt-royale/src/special-functions.h
Normal file
|
@ -0,0 +1,492 @@
|
|||
///////////////////////////////// MIT LICENSE ////////////////////////////////
|
||||
|
||||
// Copyright (C) 2014 TroggleMonkey
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to
|
||||
// deal in the Software without restriction, including without limitation the
|
||||
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
// sell copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
// IN THE SOFTWARE.
|
||||
|
||||
|
||||
///////////////////////////////// DESCRIPTION ////////////////////////////////
|
||||
|
||||
// This file implements the following mathematical special functions:
|
||||
// 1.) erf() = 2/sqrt(pi) * indefinite_integral(e**(-x**2))
|
||||
// 2.) gamma(s), a real-numbered extension of the integer factorial function
|
||||
// It also implements normalized_ligamma(s, z), a normalized lower incomplete
|
||||
// gamma function for s < 0.5 only. Both gamma() and normalized_ligamma() can
|
||||
// be called with an _impl suffix to use an implementation version with a few
|
||||
// extra precomputed parameters (which may be useful for the caller to reuse).
|
||||
// See below for details.
|
||||
//
|
||||
// Design Rationale:
|
||||
// Pretty much every line of code in this file is duplicated four times for
|
||||
// different input types (vec4/vec3/vec2/float). This is unfortunate,
|
||||
// but Cg doesn't allow function templates. Macros would be far less verbose,
|
||||
// but they would make the code harder to document and read. I don't expect
|
||||
// these functions will require a whole lot of maintenance changes unless
|
||||
// someone ever has need for more robust incomplete gamma functions, so code
|
||||
// duplication seems to be the lesser evil in this case.
|
||||
|
||||
|
||||
/////////////////////////// GAUSSIAN ERROR FUNCTION //////////////////////////
|
||||
|
||||
vec4 erf6(vec4 x)
|
||||
{
|
||||
// Requires: x is the standard parameter to erf().
|
||||
// Returns: Return an Abramowitz/Stegun approximation of erf(), where:
|
||||
// erf(x) = 2/sqrt(pi) * integral(e**(-x**2))
|
||||
// This approximation has a max absolute error of 2.5*10**-5
|
||||
// with solid numerical robustness and efficiency. See:
|
||||
// https://en.wikipedia.org/wiki/Error_function#Approximation_with_elementary_functions
|
||||
const vec4 one = vec4(1.0);
|
||||
const vec4 sign_x = sign(x);
|
||||
const vec4 t = one/(one + 0.47047*abs(x));
|
||||
const vec4 result = one - t*(0.3480242 + t*(-0.0958798 + t*0.7478556))*
|
||||
exp(-(x*x));
|
||||
return result * sign_x;
|
||||
}
|
||||
|
||||
vec3 erf6(const vec3 x)
|
||||
{
|
||||
// vec3 version:
|
||||
const vec3 one = vec3(1.0);
|
||||
const vec3 sign_x = sign(x);
|
||||
const vec3 t = one/(one + 0.47047*abs(x));
|
||||
const vec3 result = one - t*(0.3480242 + t*(-0.0958798 + t*0.7478556))*
|
||||
exp(-(x*x));
|
||||
return result * sign_x;
|
||||
}
|
||||
|
||||
vec2 erf6(const vec2 x)
|
||||
{
|
||||
// vec2 version:
|
||||
const vec2 one = vec2(1.0);
|
||||
const vec2 sign_x = sign(x);
|
||||
const vec2 t = one/(one + 0.47047*abs(x));
|
||||
const vec2 result = one - t*(0.3480242 + t*(-0.0958798 + t*0.7478556))*
|
||||
exp(-(x*x));
|
||||
return result * sign_x;
|
||||
}
|
||||
|
||||
float erf6(const float x)
|
||||
{
|
||||
// Float version:
|
||||
const float sign_x = sign(x);
|
||||
const float t = 1.0/(1.0 + 0.47047*abs(x));
|
||||
const float result = 1.0 - t*(0.3480242 + t*(-0.0958798 + t*0.7478556))*
|
||||
exp(-(x*x));
|
||||
return result * sign_x;
|
||||
}
|
||||
|
||||
vec4 erft(const vec4 x)
|
||||
{
|
||||
// Requires: x is the standard parameter to erf().
|
||||
// Returns: Approximate erf() with the hyperbolic tangent. The error is
|
||||
// visually noticeable, but it's blazing fast and perceptually
|
||||
// close...at least on ATI hardware. See:
|
||||
// http://www.maplesoft.com/applications/view.aspx?SID=5525&view=html
|
||||
// Warning: Only use this if your hardware drivers correctly implement
|
||||
// tanh(): My nVidia 8800GTS returns garbage output.
|
||||
return tanh(1.202760580 * x);
|
||||
}
|
||||
|
||||
vec3 erft(const vec3 x)
|
||||
{
|
||||
// vec3 version:
|
||||
return tanh(1.202760580 * x);
|
||||
}
|
||||
|
||||
vec2 erft(const vec2 x)
|
||||
{
|
||||
// vec2 version:
|
||||
return tanh(1.202760580 * x);
|
||||
}
|
||||
|
||||
float erft(const float x)
|
||||
{
|
||||
// Float version:
|
||||
return tanh(1.202760580 * x);
|
||||
}
|
||||
|
||||
vec4 erf(const vec4 x)
|
||||
{
|
||||
// Requires: x is the standard parameter to erf().
|
||||
// Returns: Some approximation of erf(x), depending on user settings.
|
||||
#ifdef ERF_FAST_APPROXIMATION
|
||||
return erft(x);
|
||||
#else
|
||||
return erf6(x);
|
||||
#endif
|
||||
}
|
||||
|
||||
vec3 erf(const vec3 x)
|
||||
{
|
||||
// vec3 version:
|
||||
#ifdef ERF_FAST_APPROXIMATION
|
||||
return erft(x);
|
||||
#else
|
||||
return erf6(x);
|
||||
#endif
|
||||
}
|
||||
|
||||
vec2 erf(const vec2 x)
|
||||
{
|
||||
// vec2 version:
|
||||
#ifdef ERF_FAST_APPROXIMATION
|
||||
return erft(x);
|
||||
#else
|
||||
return erf6(x);
|
||||
#endif
|
||||
}
|
||||
|
||||
float erf(const float x)
|
||||
{
|
||||
// Float version:
|
||||
#ifdef ERF_FAST_APPROXIMATION
|
||||
return erft(x);
|
||||
#else
|
||||
return erf6(x);
|
||||
#endif
|
||||
}
|
||||
|
||||
/////////////////////////// COMPLETE GAMMA FUNCTION //////////////////////////
|
||||
|
||||
vec4 gamma_impl(const vec4 s, const vec4 s_inv)
|
||||
{
|
||||
// Requires: 1.) s is the standard parameter to the gamma function, and
|
||||
// it should lie in the [0, 36] range.
|
||||
// 2.) s_inv = 1.0/s. This implementation function requires
|
||||
// the caller to precompute this value, giving users the
|
||||
// opportunity to reuse it.
|
||||
// Returns: Return approximate gamma function (real-numbered factorial)
|
||||
// output using the Lanczos approximation with two coefficients
|
||||
// calculated using Paul Godfrey's method here:
|
||||
// http://my.fit.edu/~gabdo/gamma.txt
|
||||
// An optimal g value for s in [0, 36] is ~1.12906830989, with
|
||||
// a maximum relative error of 0.000463 for 2**16 equally
|
||||
// evals. We could use three coeffs (0.0000346 error) without
|
||||
// hurting latency, but this allows more parallelism with
|
||||
// outside instructions.
|
||||
const vec4 g = vec4(1.12906830989);
|
||||
const vec4 c0 = vec4(0.8109119309638332633713423362694399653724431);
|
||||
const vec4 c1 = vec4(0.4808354605142681877121661197951496120000040);
|
||||
const vec4 e = vec4(2.71828182845904523536028747135266249775724709);
|
||||
const vec4 sph = s + vec4(0.5);
|
||||
const vec4 lanczos_sum = c0 + c1/(s + vec4(1.0));
|
||||
const vec4 base = (sph + g)/e; // or (s + g + vec4(0.5))/e
|
||||
// gamma(s + 1) = base**sph * lanczos_sum; divide by s for gamma(s).
|
||||
// This has less error for small s's than (s -= 1.0) at the beginning.
|
||||
return (pow(base, sph) * lanczos_sum) * s_inv;
|
||||
}
|
||||
|
||||
vec3 gamma_impl(const vec3 s, const vec3 s_inv)
|
||||
{
|
||||
// vec3 version:
|
||||
const vec3 g = vec3(1.12906830989);
|
||||
const vec3 c0 = vec3(0.8109119309638332633713423362694399653724431);
|
||||
const vec3 c1 = vec3(0.4808354605142681877121661197951496120000040);
|
||||
const vec3 e = vec3(2.71828182845904523536028747135266249775724709);
|
||||
const vec3 sph = s + vec3(0.5);
|
||||
const vec3 lanczos_sum = c0 + c1/(s + vec3(1.0));
|
||||
const vec3 base = (sph + g)/e;
|
||||
return (pow(base, sph) * lanczos_sum) * s_inv;
|
||||
}
|
||||
|
||||
vec2 gamma_impl(const vec2 s, const vec2 s_inv)
|
||||
{
|
||||
// vec2 version:
|
||||
const vec2 g = vec2(1.12906830989);
|
||||
const vec2 c0 = vec2(0.8109119309638332633713423362694399653724431);
|
||||
const vec2 c1 = vec2(0.4808354605142681877121661197951496120000040);
|
||||
const vec2 e = vec2(2.71828182845904523536028747135266249775724709);
|
||||
const vec2 sph = s + vec2(0.5);
|
||||
const vec2 lanczos_sum = c0 + c1/(s + vec2(1.0));
|
||||
const vec2 base = (sph + g)/e;
|
||||
return (pow(base, sph) * lanczos_sum) * s_inv;
|
||||
}
|
||||
|
||||
float gamma_impl(const float s, const float s_inv)
|
||||
{
|
||||
// Float version:
|
||||
const float g = 1.12906830989;
|
||||
const float c0 = 0.8109119309638332633713423362694399653724431;
|
||||
const float c1 = 0.4808354605142681877121661197951496120000040;
|
||||
const float e = 2.71828182845904523536028747135266249775724709;
|
||||
const float sph = s + 0.5;
|
||||
const float lanczos_sum = c0 + c1/(s + 1.0);
|
||||
const float base = (sph + g)/e;
|
||||
return (pow(base, sph) * lanczos_sum) * s_inv;
|
||||
}
|
||||
|
||||
vec4 gamma(const vec4 s)
|
||||
{
|
||||
// Requires: s is the standard parameter to the gamma function, and it
|
||||
// should lie in the [0, 36] range.
|
||||
// Returns: Return approximate gamma function output with a maximum
|
||||
// relative error of 0.000463. See gamma_impl for details.
|
||||
return gamma_impl(s, vec4(1.0)/s);
|
||||
}
|
||||
|
||||
vec3 gamma(const vec3 s)
|
||||
{
|
||||
// vec3 version:
|
||||
return gamma_impl(s, vec3(1.0)/s);
|
||||
}
|
||||
|
||||
vec2 gamma(const vec2 s)
|
||||
{
|
||||
// vec2 version:
|
||||
return gamma_impl(s, vec2(1.0)/s);
|
||||
}
|
||||
|
||||
float gamma(const float s)
|
||||
{
|
||||
// Float version:
|
||||
return gamma_impl(s, 1.0/s);
|
||||
}
|
||||
|
||||
//////////////// INCOMPLETE GAMMA FUNCTIONS (RESTRICTED INPUT) ///////////////
|
||||
|
||||
// Lower incomplete gamma function for small s and z (implementation):
|
||||
vec4 ligamma_small_z_impl(const vec4 s, const vec4 z, const vec4 s_inv)
|
||||
{
|
||||
// Requires: 1.) s < ~0.5
|
||||
// 2.) z <= ~0.775075
|
||||
// 3.) s_inv = 1.0/s (precomputed for outside reuse)
|
||||
// Returns: A series representation for the lower incomplete gamma
|
||||
// function for small s and small z (4 terms).
|
||||
// The actual "rolled up" summation looks like:
|
||||
// last_sign = 1.0; last_pow = 1.0; last_factorial = 1.0;
|
||||
// sum = last_sign * last_pow / ((s + k) * last_factorial)
|
||||
// for(int i = 0; i < 4; ++i)
|
||||
// {
|
||||
// last_sign *= -1.0; last_pow *= z; last_factorial *= i;
|
||||
// sum += last_sign * last_pow / ((s + k) * last_factorial);
|
||||
// }
|
||||
// Unrolled, constant-unfolded and arranged for madds and parallelism:
|
||||
const vec4 scale = pow(z, s);
|
||||
vec4 sum = s_inv; // Summation iteration 0 result
|
||||
// Summation iterations 1, 2, and 3:
|
||||
const vec4 z_sq = z*z;
|
||||
const vec4 denom1 = s + vec4(1.0);
|
||||
const vec4 denom2 = 2.0*s + vec4(4.0);
|
||||
const vec4 denom3 = 6.0*s + vec4(18.0);
|
||||
//vec4 denom4 = 24.0*s + vec4(96.0);
|
||||
sum -= z/denom1;
|
||||
sum += z_sq/denom2;
|
||||
sum -= z * z_sq/denom3;
|
||||
//sum += z_sq * z_sq / denom4;
|
||||
// Scale and return:
|
||||
return scale * sum;
|
||||
}
|
||||
|
||||
vec3 ligamma_small_z_impl(const vec3 s, const vec3 z, const vec3 s_inv)
|
||||
{
|
||||
// vec3 version:
|
||||
const vec3 scale = pow(z, s);
|
||||
vec3 sum = s_inv;
|
||||
const vec3 z_sq = z*z;
|
||||
const vec3 denom1 = s + vec3(1.0);
|
||||
const vec3 denom2 = 2.0*s + vec3(4.0);
|
||||
const vec3 denom3 = 6.0*s + vec3(18.0);
|
||||
sum -= z/denom1;
|
||||
sum += z_sq/denom2;
|
||||
sum -= z * z_sq/denom3;
|
||||
return scale * sum;
|
||||
}
|
||||
|
||||
vec2 ligamma_small_z_impl(const vec2 s, const vec2 z, const vec2 s_inv)
|
||||
{
|
||||
// vec2 version:
|
||||
const vec2 scale = pow(z, s);
|
||||
vec2 sum = s_inv;
|
||||
const vec2 z_sq = z*z;
|
||||
const vec2 denom1 = s + vec2(1.0);
|
||||
const vec2 denom2 = 2.0*s + vec2(4.0);
|
||||
const vec2 denom3 = 6.0*s + vec2(18.0);
|
||||
sum -= z/denom1;
|
||||
sum += z_sq/denom2;
|
||||
sum -= z * z_sq/denom3;
|
||||
return scale * sum;
|
||||
}
|
||||
|
||||
float ligamma_small_z_impl(const float s, const float z, const float s_inv)
|
||||
{
|
||||
// Float version:
|
||||
const float scale = pow(z, s);
|
||||
float sum = s_inv;
|
||||
const float z_sq = z*z;
|
||||
const float denom1 = s + 1.0;
|
||||
const float denom2 = 2.0*s + 4.0;
|
||||
const float denom3 = 6.0*s + 18.0;
|
||||
sum -= z/denom1;
|
||||
sum += z_sq/denom2;
|
||||
sum -= z * z_sq/denom3;
|
||||
return scale * sum;
|
||||
}
|
||||
|
||||
// Upper incomplete gamma function for small s and large z (implementation):
|
||||
vec4 uigamma_large_z_impl(const vec4 s, const vec4 z)
|
||||
{
|
||||
// Requires: 1.) s < ~0.5
|
||||
// 2.) z > ~0.775075
|
||||
// Returns: Gauss's continued fraction representation for the upper
|
||||
// incomplete gamma function (4 terms).
|
||||
// The "rolled up" continued fraction looks like this. The denominator
|
||||
// is truncated, and it's calculated "from the bottom up:"
|
||||
// denom = vec4('inf');
|
||||
// vec4 one = vec4(1.0);
|
||||
// for(int i = 4; i > 0; --i)
|
||||
// {
|
||||
// denom = ((i * 2.0) - one) + z - s + (i * (s - i))/denom;
|
||||
// }
|
||||
// Unrolled and constant-unfolded for madds and parallelism:
|
||||
const vec4 numerator = pow(z, s) * exp(-z);
|
||||
vec4 denom = vec4(7.0) + z - s;
|
||||
denom = vec4(5.0) + z - s + (3.0*s - vec4(9.0))/denom;
|
||||
denom = vec4(3.0) + z - s + (2.0*s - vec4(4.0))/denom;
|
||||
denom = vec4(1.0) + z - s + (s - vec4(1.0))/denom;
|
||||
return numerator / denom;
|
||||
}
|
||||
|
||||
vec3 uigamma_large_z_impl(const vec3 s, const vec3 z)
|
||||
{
|
||||
// vec3 version:
|
||||
const vec3 numerator = pow(z, s) * exp(-z);
|
||||
vec3 denom = vec3(7.0) + z - s;
|
||||
denom = vec3(5.0) + z - s + (3.0*s - vec3(9.0))/denom;
|
||||
denom = vec3(3.0) + z - s + (2.0*s - vec3(4.0))/denom;
|
||||
denom = vec3(1.0) + z - s + (s - vec3(1.0))/denom;
|
||||
return numerator / denom;
|
||||
}
|
||||
|
||||
vec2 uigamma_large_z_impl(const vec2 s, const vec2 z)
|
||||
{
|
||||
// vec2 version:
|
||||
const vec2 numerator = pow(z, s) * exp(-z);
|
||||
vec2 denom = vec2(7.0) + z - s;
|
||||
denom = vec2(5.0) + z - s + (3.0*s - vec2(9.0))/denom;
|
||||
denom = vec2(3.0) + z - s + (2.0*s - vec2(4.0))/denom;
|
||||
denom = vec2(1.0) + z - s + (s - vec2(1.0))/denom;
|
||||
return numerator / denom;
|
||||
}
|
||||
|
||||
float uigamma_large_z_impl(const float s, const float z)
|
||||
{
|
||||
// Float version:
|
||||
const float numerator = pow(z, s) * exp(-z);
|
||||
float denom = 7.0 + z - s;
|
||||
denom = 5.0 + z - s + (3.0*s - 9.0)/denom;
|
||||
denom = 3.0 + z - s + (2.0*s - 4.0)/denom;
|
||||
denom = 1.0 + z - s + (s - 1.0)/denom;
|
||||
return numerator / denom;
|
||||
}
|
||||
|
||||
// Normalized lower incomplete gamma function for small s (implementation):
|
||||
vec4 normalized_ligamma_impl(const vec4 s, const vec4 z,
|
||||
const vec4 s_inv, const vec4 gamma_s_inv)
|
||||
{
|
||||
// Requires: 1.) s < ~0.5
|
||||
// 2.) s_inv = 1/s (precomputed for outside reuse)
|
||||
// 3.) gamma_s_inv = 1/gamma(s) (precomputed for outside reuse)
|
||||
// Returns: Approximate the normalized lower incomplete gamma function
|
||||
// for s < 0.5. Since we only care about s < 0.5, we only need
|
||||
// to evaluate two branches (not four) based on z. Each branch
|
||||
// uses four terms, with a max relative error of ~0.00182. The
|
||||
// branch threshold and specifics were adapted for fewer terms
|
||||
// from Gil/Segura/Temme's paper here:
|
||||
// http://oai.cwi.nl/oai/asset/20433/20433B.pdf
|
||||
// Evaluate both branches: Real branches test slower even when available.
|
||||
const vec4 thresh = vec4(0.775075);
|
||||
bvec4 z_is_large = greaterThan(z , thresh);
|
||||
vec4 z_size_check = vec4(z_is_large.x ? 1.0 : 0.0, z_is_large.y ? 1.0 : 0.0, z_is_large.z ? 1.0 : 0.0, z_is_large.w ? 1.0 : 0.0);
|
||||
const vec4 large_z = vec4(1.0) - uigamma_large_z_impl(s, z) * gamma_s_inv;
|
||||
const vec4 small_z = ligamma_small_z_impl(s, z, s_inv) * gamma_s_inv;
|
||||
// Combine the results from both branches:
|
||||
return large_z * vec4(z_size_check) + small_z * vec4(z_size_check);
|
||||
}
|
||||
|
||||
vec3 normalized_ligamma_impl(const vec3 s, const vec3 z,
|
||||
const vec3 s_inv, const vec3 gamma_s_inv)
|
||||
{
|
||||
// vec3 version:
|
||||
const vec3 thresh = vec3(0.775075);
|
||||
bvec3 z_is_large = greaterThan(z , thresh);
|
||||
vec3 z_size_check = vec3(z_is_large.x ? 1.0 : 0.0, z_is_large.y ? 1.0 : 0.0, z_is_large.z ? 1.0 : 0.0);
|
||||
const vec3 large_z = vec3(1.0) - uigamma_large_z_impl(s, z) * gamma_s_inv;
|
||||
const vec3 small_z = ligamma_small_z_impl(s, z, s_inv) * gamma_s_inv;
|
||||
return large_z * vec3(z_size_check) + small_z * vec3(z_size_check);
|
||||
}
|
||||
|
||||
vec2 normalized_ligamma_impl(const vec2 s, const vec2 z,
|
||||
const vec2 s_inv, const vec2 gamma_s_inv)
|
||||
{
|
||||
// vec2 version:
|
||||
const vec2 thresh = vec2(0.775075);
|
||||
bvec2 z_is_large = greaterThan(z , thresh);
|
||||
vec2 z_size_check = vec2(z_is_large.x ? 1.0 : 0.0, z_is_large.y ? 1.0 : 0.0);
|
||||
const vec2 large_z = vec2(1.0) - uigamma_large_z_impl(s, z) * gamma_s_inv;
|
||||
const vec2 small_z = ligamma_small_z_impl(s, z, s_inv) * gamma_s_inv;
|
||||
return large_z * vec2(z_size_check) + small_z * vec2(z_size_check);
|
||||
}
|
||||
|
||||
float normalized_ligamma_impl(const float s, const float z,
|
||||
const float s_inv, const float gamma_s_inv)
|
||||
{
|
||||
// Float version:
|
||||
const float thresh = 0.775075;
|
||||
float z_size_check = 0.0;
|
||||
if (z > thresh) z_size_check = 1.0;
|
||||
const float large_z = 1.0 - uigamma_large_z_impl(s, z) * gamma_s_inv;
|
||||
const float small_z = ligamma_small_z_impl(s, z, s_inv) * gamma_s_inv;
|
||||
return large_z * float(z_size_check) + small_z * float(z_size_check);
|
||||
}
|
||||
|
||||
// Normalized lower incomplete gamma function for small s:
|
||||
vec4 normalized_ligamma(const vec4 s, const vec4 z)
|
||||
{
|
||||
// Requires: s < ~0.5
|
||||
// Returns: Approximate the normalized lower incomplete gamma function
|
||||
// for s < 0.5. See normalized_ligamma_impl() for details.
|
||||
const vec4 s_inv = vec4(1.0)/s;
|
||||
const vec4 gamma_s_inv = vec4(1.0)/gamma_impl(s, s_inv);
|
||||
return normalized_ligamma_impl(s, z, s_inv, gamma_s_inv);
|
||||
}
|
||||
|
||||
vec3 normalized_ligamma(const vec3 s, const vec3 z)
|
||||
{
|
||||
// vec3 version:
|
||||
const vec3 s_inv = vec3(1.0)/s;
|
||||
const vec3 gamma_s_inv = vec3(1.0)/gamma_impl(s, s_inv);
|
||||
return normalized_ligamma_impl(s, z, s_inv, gamma_s_inv);
|
||||
}
|
||||
|
||||
vec2 normalized_ligamma(const vec2 s, const vec2 z)
|
||||
{
|
||||
// vec2 version:
|
||||
const vec2 s_inv = vec2(1.0)/s;
|
||||
const vec2 gamma_s_inv = vec2(1.0)/gamma_impl(s, s_inv);
|
||||
return normalized_ligamma_impl(s, z, s_inv, gamma_s_inv);
|
||||
}
|
||||
|
||||
float normalized_ligamma(const float s, const float z)
|
||||
{
|
||||
// Float version:
|
||||
const float s_inv = 1.0/s;
|
||||
const float gamma_s_inv = 1.0/gamma_impl(s, s_inv);
|
||||
return normalized_ligamma_impl(s, z, s_inv, gamma_s_inv);
|
||||
}
|
|
@ -12,6 +12,7 @@
|
|||
// this shader: One does a viewport-scale bloom, and the other skips it. The
|
||||
// latter benefits from a higher bloom_approx_scale_x, so save both separately:
|
||||
const float bloom_approx_size_x = 320.0;
|
||||
const float bloom_approx_scale_x = 320.0; //dunno why this is necessary
|
||||
const float bloom_approx_size_x_for_fake = 400.0;
|
||||
// Copy the viewport-relative scales of the phosphor mask resize passes
|
||||
// (MASK_RESIZE and the pass immediately preceding it):
|
||||
|
|
Loading…
Reference in a new issue