Convert floating-point numbers to decimal digits in GLSL
Asked Answered
C

3

19

As others have discussed, GLSL lacks any kind of printf debugging.

But sometimes I really want to examine numeric values while debugging my shaders.

I've been trying to create a visual debugging tool. I found that it's possible to render an arbitrary series of digits fairly easily in a shader, if you work with a sampler2D in which the digits 0123456789 have been rendered in monospace. Basically, you just juggle your x coordinate.

Now, to use this to examine a floating-point number, I need an algorithm for converting a float to a sequence of decimal digits, such as you might find in any printf implementation.

Unfortunately, as far as I understand the topic, these algorithms seem to need to represent the floating-point number in a higher-precision format, and I don't see how this is going to be possible in GLSL where I seem to have only 32-bit floats available.

For this reason, I think this question is not a duplicate of any general "how does printf work" question, but rather specifically about how such algorithms can be made to work under the constraints of GLSL. I've seen this question and answer, but have no idea what's going on there.

The algorithms I've tried aren't very good.

My first try, marked Version A (commented out) seemed pretty bad: to take three random examples, RenderDecimal(1.0) rendered as 1.099999702, RenderDecimal(2.5) gave me 2.599999246 and RenderDecimal(2.6) came out as 2.699999280.

My second try, marked Version B, seemed slightly better: 1.0 and 2.6 both come out fine, but RenderDecimal(2.5) still mismatches an apparent rounding-up of the 5 with the fact that the residual is 0.099.... The result appears as 2.599000022.

My minimal/complete/verifiable example, below, starts with some shortish GLSL 1.20 code, and then I happen to have chosen Python 2.x for the rest, just to get the shaders compiled and the textures loaded and rendered. It requires the pygame, NumPy, PyOpenGL and PIL third-party packages. Note that the Python is really just boilerplate and could be trivially (though tediously) re-written in C or anything else. Only the GLSL code at the top is critical for this question, and for this reason I don't think the python or python 2.x tags would be helpful.

It requires the following image to be saved as digits.png:

0123456789

vertexShaderSource = """\

varying vec2 vFragCoordinate;
void main(void)
{
    vFragCoordinate = gl_Vertex.xy;
    gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
}

"""
fragmentShaderSource = """\

varying vec2      vFragCoordinate;

uniform vec2      uTextureSize;
uniform sampler2D uTextureSlotNumber;

float OrderOfMagnitude( float x )
{
    return x == 0.0 ? 0.0 : floor( log( abs( x ) ) / log( 10.0 ) );
}
void RenderDecimal( float value )
{
    // Assume that the texture to which uTextureSlotNumber refers contains
    // a rendering of the digits '0123456789' packed together, such that
    const vec2 startOfDigitsInTexture = vec2( 0, 0 ); // the lower-left corner of the first digit starts here and
    const vec2 sizeOfDigit = vec2( 100, 125 ); // each digit spans this many pixels
    const float nSpaces = 10.0; // assume we have this many digits' worth of space to render in
    
    value = abs( value );
    vec2 pos = vFragCoordinate - startOfDigitsInTexture;
    float dpstart = max( 0.0, OrderOfMagnitude( value ) );
    float decimal_position = dpstart - floor( pos.x / sizeOfDigit.x );
    float remainder = mod( pos.x, sizeOfDigit.x );
    
    if( pos.x >= 0 && pos.x < sizeOfDigit.x * nSpaces && pos.y >= 0 && pos.y < sizeOfDigit.y  )
    {
        float digit_value;
        
        // Version B
        float dp, running_value = value;
        for( dp = dpstart; dp >= decimal_position; dp -= 1.0 )
        {
            float base = pow( 10.0, dp );
            digit_value = mod( floor( running_value / base ), 10.0 );
            running_value -= digit_value * base;
        }
        
        // Version A
        //digit_value = mod( floor( value * pow( 10.0, -decimal_position ) ), 10.0 );
        
        

        vec2 textureSourcePosition = vec2( startOfDigitsInTexture.x + remainder + digit_value * sizeOfDigit.x, startOfDigitsInTexture.y + pos.y );
        gl_FragColor = texture2D( uTextureSlotNumber, textureSourcePosition / uTextureSize );
    }
    
    // Render the decimal point
    if( ( decimal_position == -1.0 && remainder / sizeOfDigit.x < 0.1 && abs( pos.y ) / sizeOfDigit.y < 0.1 ) ||
        ( decimal_position ==  0.0 && remainder / sizeOfDigit.x > 0.9 && abs( pos.y ) / sizeOfDigit.y < 0.1 ) )
    {
        gl_FragColor = texture2D( uTextureSlotNumber, ( startOfDigitsInTexture + sizeOfDigit * vec2( 1.5, 0.5 ) ) / uTextureSize );
    }
}

void main(void)
{
    gl_FragColor = texture2D( uTextureSlotNumber, vFragCoordinate / uTextureSize );
    RenderDecimal( 2.5 ); // for current demonstration purposes, just a constant
}

"""

# Python (PyOpenGL) code to demonstrate the above
# (Note: the same OpenGL calls could be made from any language)

import os, sys, time

import OpenGL
from OpenGL.GL import *
from OpenGL.GLU import *

import pygame, pygame.locals # just for getting a canvas to draw on

try: from PIL import Image  # PIL.Image module for loading image from disk
except ImportError: import Image  # old PIL didn't package its submodules on the path

import numpy # for manipulating pixel values on the Python side

def CompileShader( type, source ):
    shader = glCreateShader( type )
    glShaderSource( shader, source )
    glCompileShader( shader )
    result = glGetShaderiv( shader, GL_COMPILE_STATUS )
    if result != 1:
        raise Exception( "Shader compilation failed:\n" + glGetShaderInfoLog( shader ) )
    return shader

class World:
    def __init__( self, width, height ):

        self.window = pygame.display.set_mode( ( width, height ), pygame.OPENGL | pygame.DOUBLEBUF )

        # compile shaders
        vertexShader = CompileShader( GL_VERTEX_SHADER, vertexShaderSource )
        fragmentShader = CompileShader( GL_FRAGMENT_SHADER, fragmentShaderSource )
        # build shader program
        self.program = glCreateProgram()
        glAttachShader( self.program, vertexShader )
        glAttachShader( self.program, fragmentShader )
        glLinkProgram( self.program )
        # try to activate/enable shader program, handling errors wisely
        try:
            glUseProgram( self.program )
        except OpenGL.error.GLError:
            print( glGetProgramInfoLog( self.program ) )
            raise

        # enable alpha blending
        glTexEnvf( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE )
        glEnable( GL_DEPTH_TEST )
        glEnable( GL_BLEND )
        glBlendEquation( GL_FUNC_ADD )
        glBlendFunc( GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA )

        # set projection and background color
        gluOrtho2D( 0, width, 0, height )
        glClearColor( 0.0, 0.0, 0.0, 1.0 )
        
        self.uTextureSlotNumber_addr = glGetUniformLocation( self.program, 'uTextureSlotNumber' )
        self.uTextureSize_addr = glGetUniformLocation( self.program, 'uTextureSize' )

    def RenderFrame( self, *textures ):
        glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT )
        for t in textures: t.Draw( world=self )
        pygame.display.flip()
        
    def Close( self ):
        pygame.display.quit()
        
    def Capture( self ):
        w, h = self.window.get_size()
        rawRGB = glReadPixels( 0, 0, w, h, GL_RGB, GL_UNSIGNED_BYTE )
        return Image.frombuffer( 'RGB', ( w, h ), rawRGB, 'raw', 'RGB', 0, 1 ).transpose( Image.FLIP_TOP_BOTTOM )
    
class Texture:
    def __init__( self, source, slot=0, position=(0,0,0) ):
        
        # wrangle array
        source = numpy.array( source )
        if source.dtype.type not in [ numpy.float32, numpy.float64 ]: source = source.astype( float ) / 255.0
        while source.ndim < 3: source = numpy.expand_dims( source, -1 )
        if source.shape[ 2 ] == 1: source = source[ :, :, [ 0, 0, 0 ] ]    # LUMINANCE -> RGB
        if source.shape[ 2 ] == 2: source = source[ :, :, [ 0, 0, 0, 1 ] ] # LUMINANCE_ALPHA -> RGBA
        if source.shape[ 2 ] == 3: source = source[ :, :, [ 0, 1, 2, 2 ] ]; source[ :, :, 3 ] = 1.0  # RGB -> RGBA
        # now it can be transferred as GL_RGBA and GL_FLOAT
        
        # housekeeping
        self.textureSize = [ source.shape[ 1 ], source.shape[ 0 ] ]
        self.textureSlotNumber = slot
        self.textureSlotCode = getattr( OpenGL.GL, 'GL_TEXTURE%d' % slot )
        self.listNumber = slot + 1
        self.position = list( position )
        
        # transfer texture content
        glActiveTexture( self.textureSlotCode )
        self.textureID = glGenTextures( 1 )
        glBindTexture( GL_TEXTURE_2D, self.textureID )
        glEnable( GL_TEXTURE_2D )
        glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA32F, self.textureSize[ 0 ], self.textureSize[ 1 ], 0, GL_RGBA, GL_FLOAT, source[ ::-1 ] )
        glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST )
        glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST )

        # define surface
        w, h = self.textureSize
        glNewList( self.listNumber, GL_COMPILE )
        glBegin( GL_QUADS )
        glColor3f( 1, 1, 1 )
        glNormal3f( 0, 0, 1 )
        glVertex3f( 0, h, 0 )
        glVertex3f( w, h, 0 )
        glVertex3f( w, 0, 0 )
        glVertex3f( 0, 0, 0 )
        glEnd()
        glEndList()
        
    def Draw( self, world ):
        glPushMatrix()
        glTranslate( *self.position )
        glUniform1i( world.uTextureSlotNumber_addr, self.textureSlotNumber )
        glUniform2f( world.uTextureSize_addr, *self.textureSize )
        glCallList( self.listNumber )
        glPopMatrix()
        

world = World( 1000, 800 )
digits = Texture( Image.open( 'digits.png' ) )
done = False
while not done:
    world.RenderFrame( digits )
    for event in pygame.event.get():
        # Press 'q' to quit or 's' to save a timestamped snapshot
        if event.type  == pygame.locals.QUIT: done = True
        elif event.type == pygame.locals.KEYUP and event.key in [ ord( 'q' ), 27 ]: done = True
        elif event.type == pygame.locals.KEYUP and event.key in [ ord( 's' ) ]:
            world.Capture().save( time.strftime( 'snapshot-%Y%m%d-%H%M%S.png' ) )
world.Close()
Cunning answered 28/6, 2017 at 4:50 Comment(1)
This might interest you: my best attempt to print 32 bit floats with least rounding errors (integer math only)Waterford
W
23

+1 for interesting problem. Was curious so I tried to code this. I need the use of arrays so I chose #version 420 core. My app is rendering single quad covering screen with coordinates <-1,+1>. I am using whole ASCII 8x8 pixel 32x8 characters font texture I created some years ago:

font

The vertex is simple:

//---------------------------------------------------------------------------
// Vertex
//---------------------------------------------------------------------------
#version 420 core
//---------------------------------------------------------------------------
layout(location=0) in vec4 vertex;
out vec2 pos;   // screen position <-1,+1>
void main()
    {
    pos=vertex.xy;
    gl_Position=vertex;
    }
//---------------------------------------------------------------------------

Fragment is a bit more complicated:

//---------------------------------------------------------------------------
// Fragment
//---------------------------------------------------------------------------
#version 420 core
//---------------------------------------------------------------------------
in vec2 pos;                    // screen position <-1,+1>
out vec4 gl_FragColor;          // fragment output color
uniform sampler2D txr_font;     // ASCII 32x8 characters font texture unit
uniform float fxs,fys;          // font/screen resolution ratio
//---------------------------------------------------------------------------
const int _txtsiz=32;           // text buffer size
int txt[_txtsiz],txtsiz;        // text buffer and its actual size
vec4 col;                       // color interface for txt_print()
//---------------------------------------------------------------------------
void txt_decimal(float x)       // print float x into txt
    {
    int i,j,c;          // l is size of string
    float y,a;
    const float base=10;
    // handle sign
    if (x<0.0) { txt[txtsiz]='-'; txtsiz++; x=-x; }
     else      { txt[txtsiz]='+'; txtsiz++; }
    // divide to int(x).fract(y) parts of number
    y=x; x=floor(x); y-=x;
    // handle integer part
    i=txtsiz;                   // start of integer part
    for (;txtsiz<_txtsiz;)
        {
        a=x;
        x=floor(x/base);
        a-=base*x;
        txt[txtsiz]=int(a)+'0'; txtsiz++;
        if (x<=0.0) break;
        }
    j=txtsiz-1;                 // end of integer part
    for (;i<j;i++,j--)      // reverse integer digits
        {
        c=txt[i]; txt[i]=txt[j]; txt[j]=c;
        }
    // handle fractional part
    for (txt[txtsiz]='.',txtsiz++;txtsiz<_txtsiz;)
        {
        y*=base;
        a=floor(y);
        y-=a;
        txt[txtsiz]=int(a)+'0'; txtsiz++;
        if (y<=0.0) break;
        }
    txt[txtsiz]=0;  // string terminator
    }
//---------------------------------------------------------------------------
void txt_print(float x0,float y0)   // print txt at x0,y0 [chars]
    {
    int i;
    float x,y;
    // fragment position [chars] relative to x0,y0
    x=0.5*(1.0+pos.x)/fxs; x-=x0;
    y=0.5*(1.0-pos.y)/fys; y-=y0;
    // inside bbox?
    if ((x<0.0)||(x>float(txtsiz))||(y<0.0)||(y>1.0)) return;
    // get font texture position for target ASCII
    i=int(x);               // char index in txt
    x-=float(i);
    i=txt[i];
    x+=float(int(i&31));
    y+=float(int(i>>5));
    x/=32.0; y/=8.0;    // offset in char texture
    col=texture2D(txr_font,vec2(x,y));
    }
//---------------------------------------------------------------------------
void main()
    {
    col=vec4(0.0,1.0,0.0,1.0);  // background color
    txtsiz=0;
    txt[txtsiz]='F'; txtsiz++;
    txt[txtsiz]='l'; txtsiz++;
    txt[txtsiz]='o'; txtsiz++;
    txt[txtsiz]='a'; txtsiz++;
    txt[txtsiz]='t'; txtsiz++;
    txt[txtsiz]=':'; txtsiz++;
    txt[txtsiz]=' '; txtsiz++;
    txt_decimal(12.345);
    txt_print(1.0,1.0);
    gl_FragColor=col;
    }
//---------------------------------------------------------------------------

Here my CPU side uniforms:

    glUniform1i(glGetUniformLocation(prog_id,"txr_font"),0);
    glUniform1f(glGetUniformLocation(prog_id,"fxs"),(8.0)/float(xs));
    glUniform1f(glGetUniformLocation(prog_id,"fys"),(8.0)/float(ys));

where xs,ys is my screen resolution. Font is 8x8 in unit 0

Here output for the test fragment code:

screenshot

If your floating point accuracy is decreased due to HW implementation then you should consider printing in hex where no accuracy loss is present (using binary access). That could be converted to decadic base on integers later ...

see:

[Edit2] old style GLSL shaders

I tried to port to old style GLSL and suddenly it works (before it would not compile with arrays present but when I think of it I was trying char[] which was the real reason).

//---------------------------------------------------------------------------
// Vertex
//---------------------------------------------------------------------------
varying vec2 pos;   // screen position <-1,+1>
void main()
    {
    pos=gl_Vertex.xy;
    gl_Position=gl_Vertex;
    }
//---------------------------------------------------------------------------
//---------------------------------------------------------------------------
// Fragment
//---------------------------------------------------------------------------
varying vec2 pos;                   // screen position <-1,+1>
uniform sampler2D txr_font;     // ASCII 32x8 characters font texture unit
uniform float fxs,fys;          // font/screen resolution ratio
//---------------------------------------------------------------------------
const int _txtsiz=32;           // text buffer size
int txt[_txtsiz],txtsiz;        // text buffer and its actual size
vec4 col;                       // color interface for txt_print()
//---------------------------------------------------------------------------
void txt_decimal(float x)       // print float x into txt
    {
    int i,j,c;          // l is size of string
    float y,a;
    const float base=10.0;
    // handle sign
    if (x<0.0) { txt[txtsiz]='-'; txtsiz++; x=-x; }
     else      { txt[txtsiz]='+'; txtsiz++; }
    // divide to int(x).fract(y) parts of number
    y=x; x=floor(x); y-=x;
    // handle integer part
    i=txtsiz;                   // start of integer part
    for (;txtsiz<_txtsiz;)
        {
        a=x;
        x=floor(x/base);
        a-=base*x;
        txt[txtsiz]=int(a)+'0'; txtsiz++;
        if (x<=0.0) break;
        }
    j=txtsiz-1;                 // end of integer part
    for (;i<j;i++,j--)      // reverse integer digits
        {
        c=txt[i]; txt[i]=txt[j]; txt[j]=c;
        }
    // handle fractional part
    for (txt[txtsiz]='.',txtsiz++;txtsiz<_txtsiz;)
        {
        y*=base;
        a=floor(y);
        y-=a;
        txt[txtsiz]=int(a)+'0'; txtsiz++;
        if (y<=0.0) break;
        }
    txt[txtsiz]=0;  // string terminator
    }
//---------------------------------------------------------------------------
void txt_print(float x0,float y0)   // print txt at x0,y0 [chars]
    {
    int i;
    float x,y;
    // fragment position [chars] relative to x0,y0
    x=0.5*(1.0+pos.x)/fxs; x-=x0;
    y=0.5*(1.0-pos.y)/fys; y-=y0;
    // inside bbox?
    if ((x<0.0)||(x>float(txtsiz))||(y<0.0)||(y>1.0)) return;
    // get font texture position for target ASCII
    i=int(x);               // char index in txt
    x-=float(i);
    i=txt[i];
    x+=float(int(i-((i/32)*32)));
    y+=float(int(i/32));
    x/=32.0; y/=8.0;    // offset in char texture
    col=texture2D(txr_font,vec2(x,y));
    }
//---------------------------------------------------------------------------
void main()
    {
    col=vec4(0.0,1.0,0.0,1.0);  // background color
    txtsiz=0;
    txt[txtsiz]='F'; txtsiz++;
    txt[txtsiz]='l'; txtsiz++;
    txt[txtsiz]='o'; txtsiz++;
    txt[txtsiz]='a'; txtsiz++;
    txt[txtsiz]='t'; txtsiz++;
    txt[txtsiz]=':'; txtsiz++;
    txt[txtsiz]=' '; txtsiz++;
    txt_decimal(12.345);
    txt_print(1.0,1.0);
    gl_FragColor=col;
    }
//---------------------------------------------------------------------------
Waterford answered 28/6, 2017 at 8:53 Comment(7)
This is impressive work! Thank you! For some reason, although one of my machines has GLSL >4.20 (4.30 in fact) its compiler wouldn't allow character literals like '-' (it says ERROR: 0:22: '' : illegal character (') (0x27)). So I had to replace these with corresponding integer literals throughout, but then I was able to get this working. Very cool! I should have mentioned that I really want to be able to support GLSL versions back as far as 1.20 but that shouldn't detract from your solution particularly as the core decimalization algorithm looks like it should be portable there too.Cunning
Also: I take your point about rendering in binary and would gladly do this instead, since it should be easy enough to automate screen capture followed by decoding of a sequence of on-off pixels. This might actually be a more useful general-purpose solution. But can I even do that in early GLSL versions? I know about FloatBitsToUint but that's only available in version 3.30+. Did you have other functions in mind when you talked about "binary access", and if so would they be available on legacy systems? This is a whole separate question I suppose...Cunning
@jaz I never tried that in GLSL but for float binary access in C++ I am using union { float f; DWORD u; } y; y.f=12.34; now on y.u is integer with binary access ... no pointers no casts needed. Also The only reason for core 420 was the use of arrays I do not know from which version they added them into GLSL but in that it works so I left it as is ... You can avoid use of arrays with set of variables instead but the code would look messy ...Waterford
Thanks for the tip re: union (edit: oh, wait, I see you're talking about C. GLSL doesn't have union, in fact...) I would have a couple of other uses for that trick, too, if it existed. The digits algorithm can be written for v.1.20 fairly nicely - see my answer below.Cunning
@jaz I already did and upvoted... also take a look at this Write your own implementation of math's floor function, C for example of the union ... before that I was using pointers but union I like moreWaterford
@Cunning see edit2 I ported to old GLSL code style ... and it works now even with arrays ... btw now when I got text output in shader it is possible to do even this Image to ASCII art conversion inside shader ...Waterford
Through half-closed Venetian blinds, passing headlights threw sprays of semicolons across the wall...Cunning
O
4

First of all I want to mention that the amazing solution of Spektre is almost perfect and even more a general solution for text output. I gave his answer an upvote. As an alternative, I present a minimally invasive solution, and improve the code of the question.

I do not want to conceal the fact that I have studied the solution of Spektre and integrated into my solution.

// Assume that the texture to which uTextureSlotNumber refers contains
// a rendering of the digits '0123456789' packed together, such that
const vec2 startOfDigitsInTexture = vec2( 100, 125 ); // the lower-left corner of the first digit starts here and
const vec2 sizeOfDigit = vec2( 0.1, 0.2 ); // each digit spans this many pixels
const float nSpaces = 10.0; // assume we have this many digits' worth of space to render in

void RenderDigit( int strPos, int digit, vec2 pos )
{
    float testStrPos = pos.x / sizeOfDigit.x;
    if ( testStrPos >= float(strPos) && testStrPos < float(strPos+1) )
    {
        float start = sizeOfDigit.x * float(digit);
        vec2 textureSourcePosition = vec2( startOfDigitsInTexture.x + start + mod( pos.x, sizeOfDigit.x ),     startOfDigitsInTexture.y + pos.y );
        gl_FragColor = texture2D( uTextureSlotNumber, textureSourcePosition / uTextureSize );
    }
}

The function ValueToDigits interprets a floating point number an fills up an array with the digits. Each number in the array is in (0, 9).

const int MAX_DIGITS = 32;
int       digits[MAX_DIGITS];
int       noOfDigits = 0;
int       posOfComma = 0;

void Reverse( int start, int end )
{
    for ( ; start < end; ++ start, -- end )
    {
        int digit = digits[start];
        digits[start] = digits[end];
        digits[end] = digit;
    }
}

void ValueToDigits( float value )
{
    const float base = 10.0;
    int start = noOfDigits;

    value = abs( value );
    float frac = value; value = floor(value); frac -= value;

    // integral digits
    for ( ; value > 0.0 && noOfDigits < MAX_DIGITS; ++ noOfDigits )
    {
        float newValue = floor( value / base );
        digits[noOfDigits] = int( value - base * newValue );
        value = newValue;
    }
    Reverse( start, noOfDigits-1 );

    posOfComma = noOfDigits;

    // fractional digits
    for ( ; frac > 0.0 && noOfDigits < MAX_DIGITS; ++ noOfDigits )
    {
        frac *= base;
        float digit = floor( frac );
        frac -= digit;
        digits[noOfDigits] = int( digit );
    }
}

Call ValueToDigits in your original function and find the digit and textur coordinates for the current fragment.

void RenderDecimal( float value )
{
    // fill the array of digits with the floating point value
    ValueToDigits( value );

    // Render the digits
    vec2 pos = vFragCoordinate.xy - startOfDigitsInTexture;
    if( pos.x >= 0 && pos.x < sizeOfDigit.x * nSpaces && pos.y >= 0 && pos.y < sizeOfDigit.y  )
    {
        // render the digits
        for ( int strPos = 0; strPos < noOfDigits; ++ strPos )
            RenderDigit( strPos, digits[strPos], pos );
    }

    // Render the decimal point
    float testStrPos = pos.x / sizeOfDigit.x;
    float remainder = mod( pos.x, sizeOfDigit.x );
    if( ( testStrPos >= float(posOfComma) && testStrPos < float(posOfComma+1) && remainder / sizeOfDigit.x < 0.1 && abs( pos.y     ) / sizeOfDigit.y < 0.1 ) ||
        ( testStrPos >= float(posOfComma-1) && testStrPos < float(posOfComma) && remainder / sizeOfDigit.x > 0.9 && abs( pos.y     ) / sizeOfDigit.y < 0.1 ) )
    {
        gl_FragColor = texture2D( uTextureSlotNumber, ( startOfDigitsInTexture + sizeOfDigit * vec2( 1.5, 0.5 ) ) /     uTextureSize );
    }
}
Odelle answered 28/6, 2017 at 19:18 Comment(0)
C
1

Here's my updated fragment shader, which can be dropped into the listing in my original question. It implements the decimal-digit-finding algorithm Spektre proposed, in a way that is even compatible with the legacy GLSL 1.20 dialect I'm using. Without that constraint, Spektre's solution is, of course, much more elegant and powerful.

varying vec2      vFragCoordinate;

uniform vec2      uTextureSize;
uniform sampler2D uTextureSlotNumber;

float Digit( float x, int position, float base )
{
    int i;
    float digit;

    if( position < 0 )
    {
        x = fract( x );
        for( i = -1; i >= position; i-- )
        {
            if( x <= 0.0 ) { digit = 0.0; break; }
            x *= base;
            digit = floor( x );
            x -= digit;
        }
    }
    else
    {
        x = floor( x );
        float prevx;
        for( i = 0; i <= position; i++ )
        {
            if( x <= 0.0 ) { digit = 0.0; break; }
            prevx = x;
            x = floor( x / base );
            digit = prevx - base * x;
        }
    }
    return digit;
}

float OrderOfMagnitude( float x )
{
    return x == 0.0 ? 0.0 : floor( log( abs( x ) ) / log( 10.0 ) );
}
void RenderDecimal( float value )
{
    // Assume that the texture to which uTextureSlotNumber refers contains
    // a rendering of the digits '0123456789' packed together, such that
    const vec2 startOfDigitsInTexture = vec2( 0, 0 ); // the lower-left corner of the first digit starts here and
    const vec2 sizeOfDigit = vec2( 100, 125 ); // each digit spans this many pixels
    const float nSpaces = 10.0; // assume we have this many digits' worth of space to render in

    value = abs( value );
    vec2 pos = vFragCoordinate - startOfDigitsInTexture;
    float dpstart = max( 0.0, OrderOfMagnitude( value ) );
    int decimal_position = int( dpstart - floor( pos.x / sizeOfDigit.x ) );
    float remainder = mod( pos.x, sizeOfDigit.x );

    if( pos.x >= 0.0 && pos.x < sizeOfDigit.x * nSpaces && pos.y >= 0.0 && pos.y < sizeOfDigit.y  )
    {
        float digit_value = Digit( value, decimal_position, 10.0 );
        vec2 textureSourcePosition = vec2( startOfDigitsInTexture.x + remainder + digit_value * sizeOfDigit.x, startOfDigitsInTexture.y + pos.y );
        gl_FragColor = texture2D( uTextureSlotNumber, textureSourcePosition / uTextureSize );
    }

    // Render the decimal point
    if( ( decimal_position == -1 && remainder / sizeOfDigit.x < 0.1 && abs( pos.y ) / sizeOfDigit.y < 0.1 ) ||
        ( decimal_position ==  0 && remainder / sizeOfDigit.x > 0.9 && abs( pos.y ) / sizeOfDigit.y < 0.1 ) )
    {
        gl_FragColor = texture2D( uTextureSlotNumber, ( startOfDigitsInTexture + sizeOfDigit * vec2( 1.5, 0.5 ) ) / uTextureSize );
    }
}

void main(void)
{
    gl_FragColor = texture2D( uTextureSlotNumber, vFragCoordinate / uTextureSize );
    RenderDecimal( 2.5 ); // for current demonstration purposes, just a constant
}
Cunning answered 29/6, 2017 at 5:20 Comment(0)

© 2022 - 2024 — McMap. All rights reserved.