GameMaker Packing coords at vertex position - negative values problem

vdweller

Member
OK bros this is driving me crazy. I am working on GUI widgets and I'm currently trying to entirely emulate a classic button with shaders.

upload_2019-12-20_19-0-2.png

Looks pretty, huh? My goal is to minimize breaking the vertex batch. Since setting uniforms does indeed break it, I chose the following strategy:
  1. No custom vertex formats for maximum speed. Just a draw_sprite_ext of a white pixel at position x/y, scale w/h and color blend data (I'll explain).
  2. Pass the button x/y position as a fraction to the vertex position. In the vertex shader, regain that information.
  3. Pass additional info like dimensions, button enabled/mouseover/down as color data.
GML-wise, this goes:
Code:
var xx=x+(x/10000);
var yy=y+(y/10000);
var data=(w<<13) | (h<<3) | (e<<2) | (m<<1) | d;
draw_sprite_ext(sprite0,0,xx,yy,w,h,0,data,1);
In the vertex shader:
Code:
precision highp float;

attribute vec3 in_Position;                  // (x,y,z)
attribute vec4 in_Colour;                    // (r,g,b,a)
attribute vec2 in_TextureCoord;              // (u,v)

varying vec2 v_vTexcoord;
varying vec4 v_vColour;
varying vec2 v_pos;

void main()
{
    float xx=in_Position.x ,yy=in_Position.y;
    float ix=(xx > 0.0) ? floor(xx) : -floor(-xx);
    float iy=(yy > 0.0) ? floor(yy) : -floor(-yy);
    vec4 object_space_pos = vec4( ix, iy, in_Position.z, 1.0);
    gl_Position = gm_Matrices[MATRIX_WORLD_VIEW_PROJECTION] * object_space_pos;
    v_vColour = in_Colour;
    v_vTexcoord = in_TextureCoord;
    v_pos=vec2((xx-ix)*10000.,(yy-iy)*10000.);
}
The last line supposedly "reclaims" the button's position data. Unfortunately, for negative coords, it doesn't work:
upload_2019-12-20_19-6-54.png
(button's y is <0 in this image).

I have confirmed that, by entering the exact button position coordinates in that last line, the shader works.
Example: If, in the last image the button coords are (64, -10), entering
Code:
v_pos=vec2(64., -10.);
Will make the button be rendered properly.

Which leads me to believe that somehow either in the GML code or the vert. shader code there is an error in how I pass/retrieve this data.

The shader itself, for completeness:
Vertex
Code:
precision highp float;

attribute vec3 in_Position;                  // (x,y,z)
attribute vec4 in_Colour;                    // (r,g,b,a)
attribute vec2 in_TextureCoord;              // (u,v)

varying vec2 v_vTexcoord;
varying vec4 v_vColour;
varying vec2 v_pos;

void main()
{
    float xx=in_Position.x ,yy=in_Position.y;
    float ix=(xx > 0.0) ? floor(xx) : -floor(-xx);
    float iy=(yy > 0.0) ? floor(yy) : -floor(-yy);
    vec4 object_space_pos = vec4( ix, iy, in_Position.z, 1.0);
    gl_Position = gm_Matrices[MATRIX_WORLD_VIEW_PROJECTION] * object_space_pos;
    v_vColour = in_Colour;
    v_vTexcoord = in_TextureCoord;
    v_pos=vec2((xx-ix)*10000.,(yy-iy)*10000.);
}

Fragment
Code:
precision highp float;
varying vec2 v_vTexcoord;
varying vec4 v_vColour;
varying vec2 v_pos;

uniform vec3 col_back;

const float shift1=255.*65536.;
const float shift2=255.*256.;
const vec3 make24bit=vec3(shift1,shift2,255.);
const vec3 col_light=vec3(.25);
const vec3 col_shadow=vec3(.45);
 
vec3 overlay (vec3 lower, vec3 upper) {
    vec3 th=vec3(step(upper.r,0.5),step(upper.g,0.5),step(upper.b,0.5));
    return th*((1.0 - (1.0-lower) * (1.0-2.0*(upper-0.5)))) + (1.0-th)*(lower * (2.0*upper));
}

vec3 grayscale (vec3 inp) {
    return vec3(dot(inp,vec3(0.2125,0.7154,0.0721))); 
}

float circle (vec2 p,float r ) {
    return length(p)-r;
}

vec3 rectangle (vec2 point, vec2 size, float thick) {
    float ix=1./size.x;
    float iy=1./size.y;
    vec2 iz=vec2(thick*ix,thick*iy);
    vec2 bl = step(iz,point);
    float pct = bl.x * bl.y;
    vec2 tr = step(iz,1.0-point);
    pct *= tr.x * tr.y; 
    return 1.0-vec3(pct);
}

vec3 hrectangle (vec2 point, vec2 size, float thick, float dir) {
    float ix=1./size.x;
    float iy=1./size.y;
    vec2 iz=vec2(thick*ix,thick*iy);
    vec2 bl = step(abs(iz),abs(point));
    float pct = bl.x * bl.y;
    vec2 tr = step(iz,1.-point);
    pct *= tr.x * tr.y;     
    float d=2.*ix;
    float l=min(size.x,size.y)/max(size.x,size.y);
    float p1=point.x-d, p2=point.x+d;
    float diag=min(1.0,step(0.5,point.y)*smoothstep(p1,p2,point.y*l+(1.-l))+smoothstep(p1,p2,point.y*l));
    return rectangle(point,size,2.)*vec3((dir*diag + (1.-dir)*(1.-diag)));
}

vec3 multiply (vec3 inp, vec3 col) {
    return vec3(inp*col); 
}

vec3 screen (vec3 inp, vec3 col) {
    return 1.-(1.-inp)*(1.-col); 
}

void main() {
    float data=dot(v_vColour.bgr,make24bit);
    float data_w=floor(data/8192.);
    float data_h=floor((data-(data_w*8192.))/8.);
    float enabled=floor(mod(data/4.,2.));
    float mouseover=floor(mod(data/2.,2.));
    float down=mod(data,2.);
    vec2 size=vec2(data_w,data_h);
    vec2 st = (gl_FragCoord.xy-v_pos.xy)/size.xy;
 
    vec3 diag_bl=hrectangle(st,size,8.,1.);
    vec3 diag_tr=hrectangle(st,size,2.,0.);
    vec3 face=1.-rectangle(st,size,2.);
 
    vec3 col_normal= face*col_back + multiply(col_back,col_shadow)*(diag_tr*down + diag_bl*(1.-down)) + screen(col_back,col_light)*(diag_bl*down + diag_tr*(1.-down));
    vec3 col_disabled=grayscale(col_normal);
    vec3 col_mouseover=overlay(col_normal,vec3(0.7));
    vec3 final=enabled*( down*col_normal + (1.-mouseover)*col_normal + mouseover*(1.-down)*col_mouseover ) + (1.-enabled)*col_disabled;
    gl_FragColor = vec4(final,1.0);
}

I'd greatly appreciate any help!
 
Last edited:

vdweller

Member
Bros I found the solution. My mistake was in assuming that all vertex coordinates have the same sign. But that is not true. For instance, in a 192x36 button with coords (-10,-10), the top left vertex will have negative coordinates but the bottom right will have positive coordinates.

A solution is to also pack the sign in the decimal place. For example, one can add 0.1 to the value 32.0008 to denote that the decimal part actually represents a negative value. The problem is that the more decimal digits the float has, the more precision errors you get and the button beveled perimeter becomes a bit wobbly at higher coordinate values. I found out that packing/unpacking decimals using division/multiplication with a power of 2 (eg 8192 instead of 10000) helps mitigate any precision errors but I have a suspicion this might be GPU dependent. Boy, what a headache.
 
Top