raycasting – Pinch Gesture for Function Other than Zooming- Unity

I’m working on a game in which I want to pinch outwards on a character, which causes the character to split into 2 smaller copies of itself.

The method I’m thinking about using is to have the “pinch out” gesture destroy the game object and simultaneously create two instances of the smaller game object, and have them follow the fingers that pinch out. The action would also be reversible with the “pinch in” function.

My idea would be to do a raycast to detect the two-finger touch on the object (would I need a collider for that?), then use the beginning and ending touch points to determine if it is pinching out or in.

The problem is I am brand new to Unity and C# and have no idea how to write all of this. All of the tutorials for multi-touch gestures have to do with camera zoom, which is not what I am going for.

Can anyone tell me if I’m on the right track with my logic and provide some guidance on writing the code?

Voxel raycasting algorithm not working correctly

Here is a full C99/C11 implementation of a simple voxel renderer:

// SPDX-License-Identifier: CC0-1.0
//
// Compile using e.g.
//      gcc -Wall -Wextra -O2 this.c -lm -o this

#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <stdio.h>
#include <math.h>
#include <errno.h>

/*
 *  float3 support
*/

typedef struct {
    float   x;
    float   y;
    float   z;
} float3;

static inline float3  Float3(const float x, const float y, const float z)
{
    const float3  result = { x, y, z };
    return result;
}

static inline float3  float3_sub3(const float3 a, const float3 b)
{
    const float3  result = { a.x - b.x, a.y - b.y, a.z - b.z };
    return result;
}

static inline float3  float3_add3(const float3 a, const float3 b)
{
    const float3  result = { a.x + b.x, a.y + b.y, a.z + b.z };
    return result;
}

static inline float  float3_length(const float3 a)
{
    return sqrtf(a.x*a.x + a.y*a.y + a.z*a.z);
}

static inline float  float3_dot(const float3 a, const float3 b)
{
    return a.x*b.x + a.y*b.y + a.z*b.z;
}

static inline float3  float3_cross(const float3 a, const float3 b)
{
    const float3  result = { a.y*b.z-a.z*b.y, a.z*b.x-a.x*b.z, a.x*b.y-a.y*b.x };
    return result;
}

static inline float3  float3_mul1(const float3 a, const float b)
{
    const float3  result = { a.x*b, a.y*b, a.z*b };
    return result;
}

static inline float3  float3_scale_to_length(const float3 a, const float b)
{
    const float n = float3_length(a);
    const float3  result = { a.x*b/n, a.y*b/n, a.z*b/n };
    return result;
}

/*
 *  float4 support
*/

typedef struct {
    float   x;
    float   y;
    float   z;
    float   w;
} float4;

static inline float4  Float4(const float x, const float y, const float z, const float w)
{
    const float4  result = { x, y, z, w };
    return result;
}

/* float4_length(a) == || a ||, Euclidean length of vector a */
static inline float  float4_length(const float4 a)
{
    return sqrtf(a.x*a.x + a.y*a.y + a.z*a.z + a.w*a.w);
}

/* float4_normalize(a) == a / || a || */
static inline float4  float4_normalize(const float4 a)
{
    const float   n = float4_length(a);
    const float4  result = { a.x/n, a.y/n, a.z/n, a.w/n };
    return result;
}

/* float4_sign(a): Component-wise sign: -1.0, 0.0, or +1.0 */
static inline float4  float4_sign(const float4 a)
{
    const float4  result = { (a.x < 0.0f) ? -1.0f : (a.x > 0.0f) ? +1.0f : 0.0f,
                             (a.y < 0.0f) ? -1.0f : (a.y > 0.0f) ? +1.0f : 0.0f,
                             (a.z < 0.0f) ? -1.0f : (a.z > 0.0f) ? +1.0f : 0.0f,
                             (a.w < 0.0f) ? -1.0f : (a.w > 0.0f) ? +1.0f : 0.0f };
    return result;
}

/* float4_max(a, b): Component-wise maximum */
static inline float4  float4_max4(const float4 a, const float4 b)
{
    const float4  result = { (a.x >= b.x) ? a.x : b.x,
                             (a.y >= b.y) ? a.y : b.y,
                             (a.z >= b.z) ? a.z : b.z,
                             (a.w >= b.w) ? a.w : b.w };
    return result;
}

/* float4_min(a, b): Component-wise minimum */
static inline float4  float4_min4(const float4 a, const float4 b)
{
    const float4  result = { (a.x <= b.x) ? a.x : b.x,
                             (a.y <= b.y) ? a.y : b.y,
                             (a.z <= b.z) ? a.z : b.z,
                             (a.w <= b.w) ? a.w : b.w };
    return result;
}

/* float4_add(a, b) == a + b */
static inline float4  float4_add4(const float4 a, const float4 b)
{
    const float4  result = { a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w };
    return result;
}

/* float4_sub4(a, b) == a - b */
static inline float4  float4_sub4(const float4 a, const float4 b)
{
    const float4  result = { a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w };
    return result;
}

/* float4_floor4(a): Round each component towards negative infinity */
static inline float4  float4_floor4(const float4 a)
{
    const float4  result = { floorf(a.x), floorf(a.y), floorf(a.z), floorf(a.w) };
    return result;
}

/* float4_mul1(a, b) = { a.x*b, a.y*b, a.z*b, a.w*b } */
static inline float4  float4_mul1(const float4 a, const float b)
{
    const float4  result = { a.x*b, a.y*b, a.z*b, a.w*b };
    return result;
}

/* float4_div1(a, b) = { a.x/b, a.y/b, a.z/b, a.w/b } */
static inline float4  float4_div1(const float4 a, const float b)
{
    const float4  result = { a.x/b, a.y/b, a.z/b, a.w/b };
    return result;
}

/* float4_div4(a, b) = { a.x/b.x, a.y/b.y, a.z/b.x, a.w/b.w } */
static inline float4  float4_div4(const float4 a, const float4 b)
{
    const float4  result = { a.x/b.x, a.y/b.y, a.z/b.z, a.w/b.w };
    return result;
}

/*
 * int4 support
*/

typedef struct {
    int     x;
    int     y;
    int     z;
    int     w;
} int4;

/* float4_int4(a): Component-wise cast to int. */
static inline int4  float4_int4(const float4 a)
{
    const int4  result = { (int)floorf(a.x), (int)floorf(a.y), (int)floorf(a.z), (int)floorf(a.w) };
    return result;
}

/*
 *  Voxel map
*/

float4           voxel_rgba(256)(8);    /* Look-up table for voxel faces; all components (0..1). x is red, y is green, z is blue, w is opacity (0=transparent, 1=opaque). */
int4             voxel_size;
unsigned char   *voxel_cell = NULL;
size_t           voxel_xstride = 0;     /* Typically 1 */
size_t           voxel_ystride = 0;     /* Typically voxel_size.x */
size_t           voxel_zstride = 0;     /* Typically voxel_size.x * voxel_size.y */


/* Trace one voxel ray starting at (projection plane) pos, with eye/camera at eye.
 * Return the color and distance { .x=red, .y=green, .z=blue, .w=distance }
*/
float4 voxel_ray(float4 eye, float4 pos, float maxdist)
{
    const float  eps = 0.5f / maxdist;

    /* On input, eye and pos are really 3-component vectors; we need the fourth one to be zero,
       so that it won't affect the unit direction length vector below. */
    eye.w = 0.0f;
    pos.w = 0.0f;

    /* Ray unit direction vector */
    float4  dir = float4_normalize(float4_sub4(pos, eye));
    /* We rely on the .w component to track length, so set that one now. */
    dir.w = 1.0f;

    float4  posf = float4_sub4(pos, float4_floor4(pos));
    /* Note: 0 <= posf.x < 1,
             0 <= posf.y < 1,
             0 <= posf.z < 1. */

    /* Find first intersections with a voxel cell wall (*next),
       and the delta to the consecutive following intersections */
    float4  xnext,  ynext,  znext;
    float4  xdelta, ydelta, zdelta;

    if (dir.x > eps) {
        xnext  = float4_add4(pos, float4_mul1(dir, 1.0f - posf.x));
        xdelta = float4_div1(dir, dir.x);
    } else
    if (dir.x < -eps) {
        xnext  = float4_sub4(pos, float4_mul1(dir, 0.0f - posf.x));
        xdelta = float4_div1(dir, -dir.x);
    } else {
        xnext  = Float4(0.0f, 0.0f, 0.0f, maxdist);
        xdelta = Float4(0.0f, 0.0f, 0.0f, 0.0f);
    }

    if (dir.y > eps) {
        ynext  = float4_add4(pos, float4_mul1(dir, 1.0f - posf.y));
        ydelta = float4_div1(dir, dir.y);
    } else
    if (dir.y < -eps) {
        ynext  = float4_sub4(pos, float4_mul1(dir, 0.0f - posf.y));
        ydelta = float4_div1(dir, -dir.y);
    } else {
        ynext  = Float4(0.0f, 0.0f, 0.0f, maxdist);
        ydelta = Float4(0.0f, 0.0f, 0.0f, 0.0f);
    }

    if (dir.z > eps) {
        znext  = float4_add4(pos, float4_mul1(dir, 1.0f - posf.z));
        zdelta = float4_div1(dir, dir.z);
    } else
    if (dir.z < -eps) {
        znext  = float4_sub4(pos, float4_mul1(dir, 0.0f - posf.z));
        zdelta = float4_div1(dir, -dir.z);
    } else {
        znext  = Float4(0.0f, 0.0f, 0.0f, maxdist);
        zdelta = Float4(0.0f, 0.0f, 0.0f, 0.0f);
    }

    if (xnext.w < 0.0f || xdelta.w < 0.0f) fprintf(stderr, "Warning: xnext.w = %.6f, xdelta.w = %.6fn", xnext.w, xdelta.w);
    if (ynext.w < 0.0f || ydelta.w < 0.0f) fprintf(stderr, "Warning: ynext.w = %.6f, ydelta.w = %.6fn", ynext.w, ydelta.w);
    if (znext.w < 0.0f || zdelta.w < 0.0f) fprintf(stderr, "Warning: znext.w = %.6f, zdelta.w = %.6fn", znext.w, zdelta.w);

    float4  color = { 0.0f, 0.0f, 0.0f, 0.0f };  /* Transparent! */

    while (1) {
        unsigned char  intersection = 0;    /* 1:x, 2:y, 4:z */

        if (pos.w >= maxdist)
            break;
        if (color.w >= 1.0f)
            break;

        /* Pick the closest next step first. */
        pos = xnext;
        if (pos.w > ynext.w) {
            pos = ynext;
        }
        if (pos.w > znext.w) {
            pos = znext;
        }

        /* Update intersection and prepare for the next step. */
        if (pos.w == xnext.w) {
            intersection |= 1;
            xnext = float4_add4(xnext, xdelta);
        }
        if (pos.w == ynext.w) {
            intersection |= 2;
            ynext = float4_add4(ynext, ydelta);
        }
        if (pos.w == znext.w) {
            intersection |= 4;
            znext = float4_add4(znext, zdelta);
        }

        /* If pos.w == INF, we have intersection = 0. */
        if (!intersection) {
            pos.w = maxdist;
            break;
        }

        /* Position within the wraparound voxel space. */
        float4  temp = float4_floor4(pos);
        int4    posi = float4_int4(temp);
        /* We could use the fractional positive sub-voxel coordinates posf,
              posf = float4_sub4(pos, temp);
           where 0 <= posf.x < 1, 0 <= posf.y < 1, 0 <= posf.z < 1
           and if (intersection & 1), posf.x = 0 (except for rounding errors),
               if (intersection & 2), posf.y = 0 (except for rounding errors),
               if (intersection & 4), posf.z = 0 (except for rounding errors),
           for interpolation etc.
        */

        /* Ensure posi is within the positive voxel space. */
        posi.x = posi.x % voxel_size.x; if (posi.x < 0) posi.x += voxel_size.x;
        posi.y = posi.y % voxel_size.y; if (posi.y < 0) posi.y += voxel_size.y;
        posi.z = posi.z % voxel_size.z; if (posi.z < 0) posi.z += voxel_size.z;

        /* Look up the voxel cell properties for this intersection. */
        float4  c = voxel_rgba(voxel_cell( (size_t)posi.x * voxel_xstride
                                         + (size_t)posi.y * voxel_ystride
                                         + (size_t)posi.z * voxel_zstride ))( intersection );

        if (c.w >= 1.0f) {
            /* Opaque; good, ray ends here. Blend 'c' behind 'color'. */
            color = float4_add4(color, float4_mul1(c, 1.0f - color.w));
            break;
        } else
        if (c.w > 0.0f) {
            /* Blend color 'color' *behind* color 'c'. */
            color = float4_add4(color, float4_mul1(c, 1.0f - color.w));
        }
    }
    color.w = pos.w;
    return color;
}

void renderPPM(FILE *outppm, FILE *outpgm, int width, int height, const float3 eye, const float3 forward, const float3 right, const float maxdist)
{
    /* Assume 'right' is perpendicular to 'forward'.  'up' is perpendicular to both, with length (height/width) times that of 'right'. */
    const float3  up = float3_scale_to_length(float3_cross(forward, right), float3_length(right) * (float)height / (float)width);

    /* Image plane corner, rowstart = eye + forward - right + up */
    float3  rowstart = float3_add3(float3_sub3(float3_add3(eye, forward), right), up);

    /* Delta vectors per pixel for the image plane */
    const float3  dx = float3_mul1(right, 2.0f / (float)width);
    const float3  dy = float3_mul1(up,   -2.0f / (float)height);

    if (outppm) fprintf(outppm, "P6n%d %d 255n", width, height);
    if (outpgm) fprintf(outpgm, "P5n%d %d 255n", width, height);

    float  mind = +3.0f*maxdist;
    float  maxd = -3.0f*maxdist;

    for (int y = 0; y < height; y++, rowstart = float3_add3(rowstart, dy)) {
        float3  pos = rowstart;
        for (int x = 0; x < width; x++, pos = float3_add3(pos, dx)) {
            const float4  c = voxel_ray( Float4(eye.x, eye.y, eye.z, 0.0f),
                                         Float4(pos.x, pos.y, pos.z, 0.0f), maxdist);
            const int  r8 = (c.x <= 0.0f) ? 0 : (c.x < 1.0f) ? (int)(0.5f + 255.0f * c.x) : 255;
            const int  g8 = (c.y <= 0.0f) ? 0 : (c.y < 1.0f) ? (int)(0.5f + 255.0f * c.y) : 255;
            const int  b8 = (c.z <= 0.0f) ? 0 : (c.z < 1.0f) ? (int)(0.5f + 255.0f * c.z) : 255;
            const int  d8 = (c.w <= 0.0f) ? 0 : (c.w < maxdist) ? (int)(0.5f + 255.0f * c.w / maxdist) : 255;
            if (mind > c.w) mind = c.w;
            if (maxd < c.w) maxd = c.w;

            if (outppm) {
                fputc(r8, outppm);
                fputc(g8, outppm);
                fputc(b8, outppm);
            }
            if (outpgm) {
                fputc(d8, outpgm);
            }
        }
        fprintf(stderr, "rRow %d of %d completed.", y + 1, height);
        fflush(stderr);
    }

    if (outppm) fflush(outppm);
    if (outpgm) fflush(outpgm);
    fprintf(stderr, "rRendering complete. Distances varied between %.6f and %.6f.n", mind, maxd);
    fflush(stderr);
}

int main(int argc, char *argv())
{
    FILE *ppm, *pgm;

    if (argc != 3 || !strcmp(argv(1), "-h") || !strcmp(argv(1), "--help")) {
        const char *arg0 = (argc > 0 && argv && argv(0) && argv(0)(0)) ? argv(0) : "(this)";
        fprintf(stderr, "n");
        fprintf(stderr, "Usage: %s ( -h | --help )n", arg0);
        fprintf(stderr, "       %s OUT.ppm DEPTH.pgmn", arg0);
        fprintf(stderr, "n");
        return EXIT_FAILURE;
    }

    voxel_size.x = 64;
    voxel_size.y = 64;
    voxel_size.z = 64;

    voxel_xstride = 1;
    voxel_ystride = (size_t)voxel_size.x;
    voxel_zstride = voxel_ystride * (size_t)voxel_size.y;
    const size_t  size = voxel_zstride * (size_t)voxel_size.y;

    voxel_cell = (unsigned char *)malloc(size);
    if (!voxel_cell) {
        fprintf(stderr, "Not enough memory for a %d x %d x %d voxel map.n", voxel_size.x, voxel_size.y, voxel_size.z);
        return EXIT_FAILURE;
    }
    memset(voxel_cell, 0, size);

    /* Make all cell values transparent, */
    for (int i = 0; i < 256; i++) {
        for (int k = 0; k < 8; k++) {
            voxel_rgba(i)(k) = Float4(0.0f, 0.0f, 0.0f, 0.0f);
        }
    }

    /* Cell type 1 faces are blue, red, and green; edges and vertices their mix. */
    voxel_rgba(1)(1) = Float4(0.0f, 0.0f, 1.0f, 1.0f);
    voxel_rgba(1)(2) = Float4(0.0f, 1.0f, 0.0f, 1.0f);
    voxel_rgba(1)(4) = Float4(1.0f, 0.0f, 0.0f, 1.0f);
    voxel_rgba(1)(3) = Float4(0.0f, 0.8f, 0.8f, 1.0f);
    voxel_rgba(1)(5) = Float4(0.8f, 0.0f, 0.8f, 1.0f);
    voxel_rgba(1)(6) = Float4(0.8f, 0.8f, 0.0f, 1.0f);
    voxel_rgba(1)(7) = Float4(0.6f, 0.6f, 0.6f, 1.0f);

    /* Create a shell at the center, minradius 10, maxradius 12 */
    {
        const int  cx = 32;
        const int  cy = 32;
        const int  cz = 32;
        const int  rrmin = 10*10;
        const int  rrmax = 12*12;

        for (int z = 0; z < voxel_size.z; z++) {
            const int  zz = (z-cz)*(z-cz);
            for (int y = 0; y < voxel_size.y; y++) {
                const int  zzyy = zz + (y-cy)*(y-cy);
                for (int x = 0; x < voxel_size.x; x++) {
                    const int  dd = zzyy + (x-cx)*(x-cx);
                    if (dd >= rrmin && dd < rrmax) {
                        voxel_cell((size_t)x * voxel_xstride + (size_t)y * voxel_ystride + (size_t)z * voxel_zstride) = 1;
                    }
                }
            }
        }
    }

    fprintf(stderr, "Constructed a %d x %d x %d voxel map.n", voxel_size.x, voxel_size.y, voxel_size.z);

    ppm = fopen(argv(1), "wb");
    if (!ppm) {
        fprintf(stderr, "%s: %s.n", argv(1), strerror(errno));
        return EXIT_FAILURE;
    }
    pgm = fopen(argv(2), "wb");
    if (!pgm) {
        fprintf(stderr, "%s: %s.n", argv(2), strerror(errno));
        fclose(ppm);
        remove(argv(1));
        return EXIT_FAILURE;
    }

    renderPPM(ppm, pgm, 320, 160, Float3(68.0f, 32.0f, 68.0f), Float3(4.0f,4.0f,4.0f), Float3(4.0f, -4.0f, 0.0f), 256.0f);

    if (fclose(ppm)) {
        fprintf(stderr, "%s: Error closing file.n", argv(1));
        fclose(pgm);
        remove(argv(1));
        remove(argv(2));
        return EXIT_FAILURE;
    }
    if (fclose(pgm)) {
        fprintf(stderr, "%s: Error closing file.n", argv(2));
        remove(argv(1));
        remove(argv(2));
        return EXIT_FAILURE;
    }

    fprintf(stderr, "Saved PPM image as '%s', and depth graymap as PGM image '%s'.n", argv(1), argv(2));
    return EXIT_SUCCESS;
}

Most of the code is support for HLSL-like float3/float4/int4 types, so that the code is easier to port. (I don’t have anything that can compile and run HLSL, myself.)

Compile the program, then run it giving two file names as command-line parameters. The first one will be a NetPBM PPM image of the scene, and the second one a NetPBM PGM depth map:
Example rendering

The voxel space here is periodic, so although there is only one sphere in the voxel map, the rendering has multiple copies of it.

Note how the nearest sphere – in the lower right corner of the rendered image – has green faces on it, while they obviously shouldn’t exist on that side of the sphere? They’re better visible if you increase the image size, too. What is going on?

Since each voxel cell has only three faces (and not six, like a cube has), we see the faces we expect to see only when viewing in a negative direction (where unit ray direction vector has all nonpositive components). When we view in other directions, we do not see the surface face first, but the faces between nonempty voxel cells!

In 2D, the equivalent 2×2 block has extra protruding lines:

┌──┬──
│  │
├──┼──
│  │

In 3D, those extra faces are like vanes or plates standing up on the outside of the surface!

Simply put, if we want to draw a nice voxel box, we need to populate four cells:
$$begin{array}{ccl}
text{Cell} & text{Value} & text{Description} \
hline
(0,0,0) & 7 & text{x, y, and z faces} \
(1,0,0) & 1 & text{x face} \
(0,1,0) & 2 & text{y face} \
(1,1,1) & 4 & text{z face} \
end{array}$$

Alternatively, you can adjust the cell index calculation, by preparing an array of eight vectors (constant for each ray, depends only on the direction; each component is either 0 or -1), that are added to the current position just before the cell index calculation. (Essentially, each intersection type has their own adjustment.) That way we can shift the x faces, the y faces, and the z faces separately from the others for the voxel grid, depending on the ray direction; so that each cell then has SIX faces.

unity – DDA voxel raycasting algorithm not working correctly

I am using Unity with a compute shader to render to a texture. So far I have been checking points along the rays in units of 1 just for testing, so I know it works. Now I am simply trying to write a function that increments the ray to stop at the next voxel face, basically. And I think I understand the algorithm but my implementation is not working correctly.

This is the function I am using in a loop to find how much I should increment the ray by for each increment.

float getRayIncrement(float3 dir, float3 position) {

    float3 d;
    d.x = 0;
    d.y = 0;
    d.z = 0;
    if (dir.x > 0) {
        d.x = (ceil(position.x) - position.x) / dir.x;
    }
    else if (dir.x < 0) {
        d.x = (position.x - floor(position.x)) / dir.x;
    }
    if (dir.y > 0) {
        d.y = (ceil(position.y) - position.y) / dir.y;
    }
    else if (dir.y < 0) {
        d.y = (position.y - floor(position.y)) / dir.y;
    }
    if (dir.z > 0) {
        d.z = (ceil(position.z) - position.z) / dir.z;
    }
    else if (dir.z < 0) {
        d.z = (position.z - floor(position.z)) / dir.z;
    }

    float smallestDistance = 1000;

    if (d.x > 0 && d.x < smallestDistance) {
        smallestDistance = d.x;
    }
    if (d.y > 0 && d.y < smallestDistance) {
        smallestDistance = d.y;
    }
    if (d.z > 0 && d.z < smallestDistance) {
        smallestDistance = d.z;
    }

    return smallestDistance;
}

Then I multiply the normalized direction vector of the ray by the smallestDistance to march the ray like:

        pointHolder += direction * smallestDistance;

But I get very strange results, some cube faces don’t render in, some render as speckles of pixels. Here is the full compute shader:

#pragma kernel CSMain

RWTexture2D<float4> Result; // the actual array of pixels the player sees
float width; // in pixels
float height;

StructuredBuffer<int> voxelMaterials; // for now just getting a flat voxel array
int voxelBufferRowSize;
StructuredBuffer<float3> rayDirections; // A better name would be "projectedPixelScreenPositions"
float maxRayDistance;

float3 playerCameraPosition;
float3 playerWorldForward;
float3 playerWorldRight;
float3 playerWorldUp;


float3 transformDirectionFromPoint(float3 p) {

    float3 u1 = p.x * playerWorldRight;
    float3 u2 = p.y * playerWorldUp;
    float3 u3 = p.z * playerWorldForward;

    return u1 + u2 + u3; // the direction to that point
}

float getRayIncrement(float3 dir, float3 position) {

    float3 d;
    d.x = 0;
    d.y = 0;
    d.z = 0;
    if (dir.x > 0) {
        d.x = (ceil(position.x) - position.x) / dir.x;
    }
    else if (dir.x < 0) {
        d.x = (position.x - floor(position.x)) / dir.x;
    }
    if (dir.y > 0) {
        d.y = (ceil(position.y) - position.y) / dir.y;
    }
    else if (dir.y < 0) {
        d.y = (position.y - floor(position.y)) / dir.y;
    }
    if (dir.z > 0) {
        d.z = (ceil(position.z) - position.z) / dir.z;
    }
    else if (dir.z < 0) {
        d.z = (position.z - floor(position.z)) / dir.z;
    }

    float smallestDistance = 1000;
    if (d.x > 0 && d.x < smallestDistance) {
        smallestDistance = d.x;
    }
    if (d.y > 0 && d.y < smallestDistance) {
        smallestDistance = d.y;
    }
    if (d.z > 0 && d.z < smallestDistance) {
        smallestDistance = d.z;
    }

    return smallestDistance;
}





(numthreads(32, 32, 1))
void CSMain(uint3 id : SV_DispatchThreadID)
{
    float3 pointHolder = playerCameraPosition;
    float3 direction = transformDirectionFromPoint( rayDirections(id.x + (id.y * width)) );
    Result(id.xy) = float4(0, 0, 0, 0.0);

    for (int i = 0; i < maxRayDistance; i++) {

        //pointHolder += direction;

        float increment = getRayIncrement(direction, pointHolder);
        pointHolder += direction * increment;

        //check if point is within bounds of the buffer
        if (pointHolder.x < voxelBufferRowSize && pointHolder.x >= 0
            && pointHolder.y < voxelBufferRowSize && pointHolder.y >= 0
            && pointHolder.z < voxelBufferRowSize && pointHolder.z >= 0) 
        {
            // convert the point into a voxel index and check if a voxel exists there

            bool outOfRange = false; // a secondary check for when the direction is negative and we need to check the voxel next to the hit point
            int voxelIndexX;
            if (direction.x >= 0) {
                voxelIndexX = floor(pointHolder.x);
            }
            else {
                voxelIndexX = floor(pointHolder.x) - 1;
                if (voxelIndexX < 0) {
                    outOfRange = true;
                }
            }

            int voxelIndexY;
            if (direction.y >= 0) {
                voxelIndexY = floor(pointHolder.y) - 1;
                if (voxelIndexY < 0) {
                    outOfRange = true;
                }
            }
            else {
                voxelIndexY = floor(pointHolder.y);
            }

            int voxelIndexZ;
            if (direction.z >= 0) {
                voxelIndexZ = floor(pointHolder.z);
            }
            else {
                voxelIndexZ = floor(pointHolder.z) - 1;
                if (voxelIndexZ < 0) {
                    outOfRange = true;
                }
            }

            if (!outOfRange) {

                //int voxelIndex = floor(pointHolder.x) + (floor(pointHolder.z) * voxelBufferRowSize) + (floor(pointHolder.y) * (voxelBufferRowSize * voxelBufferRowSize));

                int voxelIndex = voxelIndexX + (voxelIndexZ * voxelBufferRowSize) + (voxelIndexY * (voxelBufferRowSize * voxelBufferRowSize));

                if (voxelMaterials(voxelIndex) == 1) {
                    Result(id.xy) = float4(((float)i / maxRayDistance) * 2, (float)voxelIndex / (voxelBufferRowSize * voxelBufferRowSize * voxelBufferRowSize), pointHolder.z, 0.0);
                    break;
                }
            }   


        }


    }
}

raycasting – GLSL compute shader flickering squares/blocks artifact

I’m trying to write a bare minimum GPU raycaster using compute shaders in OpenGL. I’m confident the raycasting itself is functional, as I’ve gotten clean outlines of bounding boxes via a ray-box intersection algorithm.

However, when attempting ray-triangle intersection, I get strange artifacts. My shader is programmed to simply test for a ray-triangle intersection, and color the pixel white if an intersection was found and black otherwise. When the triangle should be visible onscreen, the screen is instead filled with black and white squares/blocks/tiles which flicker randomly like TV static. The squares are at most 8×8 pixels (the size of my compute shader blocks), although there are dots as small as single pixels as well. The white blocks generally lie in the expected area of my triangle, although sometimes they are spread out across the bottom of the screen as well.

Here is a video of the artifact. In my full shader the camera can be rotated around and the shape appears more triangle-like, but the flickering artifact is the key issue and still appears in this video which I generated from the following minimal version of my shader code:

layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;

uvec2 DIMS = gl_NumWorkGroups.xy*gl_WorkGroupSize.xy;
uvec2 UV = gl_GlobalInvocationID.xy;
vec2 uvf = vec2(UV) / vec2(DIMS);

layout(location = 1, rgba8) uniform writeonly image2D brightnessOut;

struct Triangle
{
    vec3 v0;
    vec3 v1;
    vec3 v2;
};

struct Ray
{
    vec3 origin;
    vec3 direction;
    vec3 inv;
};

// Wikipedia Moller-Trumbore algorithm, GLSL-ified
bool ray_triangle_intersection(vec3 rayOrigin, vec3 rayVector,
    in Triangle inTriangle, out vec3 outIntersectionPoint)
{
    const float EPSILON = 0.0000001;
    vec3 vertex0 = inTriangle.v0;
    vec3 vertex1 = inTriangle.v1;
    vec3 vertex2 = inTriangle.v2;

    vec3 edge1 = {0.0, 0.0, 0.0};
    vec3 edge2 = {0.0, 0.0, 0.0};
    vec3 h = {0.0, 0.0, 0.0};
    vec3 s = {0.0, 0.0, 0.0};
    vec3 q = {0.0, 0.0, 0.0};
    float a = 0.0, f = 0.0, u = 0.0, v = 0.0;
    edge1 = vertex1 - vertex0;
    edge2 = vertex2 - vertex0;
    h = cross(rayVector, edge2);
    a = dot(edge1, h);
    // Test if ray is parallel to this triangle.
    if (a > -EPSILON && a < EPSILON)
    {
        return false;
    }

    f = 1.0/a;
    s = rayOrigin - vertex0;
    u = f * dot(s, h);
    if (u < 0.0 || u > 1.0)
    {
        return false;
    }

    q = cross(s, edge1);
    v = f * dot(rayVector, q);
    if (v < 0.0 || u + v > 1.0)
    {
        return false;
    }

    // At this stage we can compute t to find out where the intersection point is on the line.
    float t = f * dot(edge2, q);
    if (t > EPSILON) // ray intersection
    {
        outIntersectionPoint = rayOrigin + rayVector * t;
        return true;
    }
    return false;
}

void main()
{
    // Generate rays by calculating the distance from the eye
    // point to the screen and combining it with the pixel indices
    // to produce a ray through this invocation's pixel
    const float HFOV = (3.14159265359/180.0)*45.0;
    const float WIDTH_PX = 1280.0;
    const float HEIGHT_PX = 720.0;
    float VIEW_PLANE_D = (WIDTH_PX/2.0)/tan(HFOV/2.0);
    vec2 rayXY = vec2(UV) - vec2(WIDTH_PX/2.0, HEIGHT_PX/2.0);

    // Rays have origin at (0, 0, 20) and generally point towards (0, 0, -1)
    Ray r;
    r.origin = vec3(0.0, 0.0, 20.0);
    r.direction = normalize(vec3(rayXY, -VIEW_PLANE_D));
    r.inv = 1.0 / r.direction;
    
    // Triangle in XY plane at Z=0
    Triangle debugTri;
    debugTri.v0 = vec3(-20.0, 0.0, 0.0);
    debugTri.v1 = vec3(20.0, 0.0, 0.0);
    debugTri.v0 = vec3(0.0, 40.0, 0.0);

    // Test triangle intersection; write 1.0 if hit, else 0.0
    vec3 hitPosDebug = vec3(0.0);
    bool hitDebug = ray_triangle_intersection(r.origin, r.direction, debugTri, hitPosDebug);

    imageStore(brightnessOut, ivec2(UV), vec4(vec3(float(hitDebug)), 1.0));
}

I render the image to a fullscreen triangle using a normal sampler2D and rasterized triangle UVs chosen to map to screen space.

None of this code should be time dependent, and I’ve tried multiple ray-triangle algorithms from various sources including both branching and branch-free versions and all exhibit the same problem which leads me to suspect some sort of memory incoherency behavior I’m not familiar with, a driver issue, or a mistake I’ve made in configuring or dispatching my compute (I dispatch 160x90x1 of my 8x8x1 blocks to cover my 1280×720 framebuffer texture).

I’ve found a few similar issues like this one on SE and the general internet, but they seem to almost exclusively be caused by using uninitialized variables, which I am not doing as far as I can tell. They mention that the pattern continue to move when viewed in the NSight debugger; while RenderDoc doesn’t do that, the contents of the image do vary between draw calls even after the compute shader has finished. E.g. when inspecting the image in the compute draw call there is one pattern of artifacts, but when I scrub to the subsequent draw calls which use my image as input, the pattern in the image has changed.

I also found this post which seems very similar, but that one also seems to be caused by an uninitialized variable, which again I’ve been careful to avoid. I’ve also not been able to alleviate the issue by tweaking the code as they have done.

I’m running the latest NVidia drivers (461.92) on a GTX 1070. I’ve tried inserting glMemoryBarrier(GL_TEXTURE_FETCH_BARRIER_BIT); (as well as some of the other barrier types) after my compute shader dispatch which I believe is the correct barrier to use if using a sampler2D to draw a texture that was previously modified by an image load/store operation, but it doesn’t seem to change anything.

Odds are that the cause of the problem lies somewhere between my chair and keyboard, but this kind of problem lies outside my usual shader debugging abilities as I’m relatively new to OpenGL. Any ideas would be appreciated! Thanks.

c – Implementation of the dda algorithm. Raycasting

I am making a simple game like wolfenstein 3d on C using raycasting. To calculate the length of the rays, I use the dda algorithm. I apply the Part of the code that calculates the length of the rays and the size of the wall on the vertical line where the ray hit. How can I optimize and improve my code?


/*
**      Function:           void        calculate()
**
**      Arguments:          main struct and i
**
**      return:             void
**
**      Description:        The raycasting loop is a for loop that goes through every x,
** so there is a calculation for every  vertical stripe of the screen.
*/

void            calculate(t_cub3d *cub)
{
    int i;

    i = 0;
    while (i < cub->window.res_width)
    {
        calculate_cam(cub, &i);
// field_x and field_y represent the current square of the map the ray is in.
        cub->field.field_x = cub->player.x_pos;
        cub->field.field_y = cub->player.y_pos;
        calculate_ray_dir(cub);
        calculate_step(cub);
        calculate_wall(cub);
        calculate_height(cub);
        draw(cub, i);
        i++;
    }
    calculate_sprite(cub);
}


/*
**      Function:           void        calculate_cam()
**
**      Arguments:          main struct, variable counter(width)
**
**      return:             void
**
**      Description:        x_camera is the x-coordinate on the camera plane
** that the current x-coordinate of the screen represents, done this way
** so that the right side of the screen will get coordinate 1, the center
** of the screen gets coordinate 0, and the left side of the screen gets coordinate -1
*/

void            calculate_cam(t_cub3d *cub, int *i)
{
    cub->camera.x_camera = 2 * *i / (double)(cub->window.res_width) - 1;
    cub->ray.dir_ray_x = cub->player.x_dir + cub->camera.x_plane * 
            cub->camera.x_camera;
    cub->ray.dir_ray_y = cub->player.y_dir + cub->camera.y_plane * 
            cub->camera.x_camera;
}


/*
**      Function:           void        calculate_ray_dir()
**
**      Arguments:          main struct
**
**      return:             void
**
**      Description:        x_deltaDist and y_deltaDist are the distance the ray
** has to travel to go from 1 x-side to the next x-side, or from 1 y-side
** to the next y-side.
*/

void            calculate_ray_dir(t_cub3d *cub)
{
    if (cub->ray.dir_ray_y == 0)
        cub->ray.x_deltadist = 0;
    else
    {
        if (cub->ray.dir_ray_x == 0)
            cub->ray.x_deltadist = 1;
        else
            cub->ray.x_deltadist = fabs(1 / cub->ray.dir_ray_x);
    }
    if (cub->ray.dir_ray_x == 0)
        cub->ray.y_deltadist = 0;
    else
    {
        if (cub->ray.dir_ray_y == 0)
            cub->ray.y_deltadist = 1;
        else
            cub->ray.y_deltadist = fabs(1 / cub->ray.dir_ray_y);
    }
}


/*
**      Function:           void        calculate_step()
**
**      Arguments:          main struct
**
**      return:             void
**
**      Description:        x_sideDist and y_sideDist are initially the distance
** the ray has to travel from its start position to the first x-side and
** the first y-side.
*/

void            calculate_step(t_cub3d *cub)
{
    if (cub->ray.dir_ray_x < 0)
    {
        cub->ray.x_ray_step = -1;
        cub->ray.x_sidedist = (cub->player.x_pos - (double)(cub->field.field_x))
                * cub->ray.x_deltadist;
    }
    else
    {
        cub->ray.x_ray_step = 1;
        cub->ray.x_sidedist = (((double)(cub->field.field_x) + 1.0 - 
                cub->player.x_pos) * cub->ray.x_deltadist);
    }
    if (cub->ray.dir_ray_y < 0)
    {
        cub->ray.y_ray_step = -1;
        cub->ray.y_sidedist = (cub->player.y_pos - (double)(cub->field.field_y))
                * cub->ray.y_deltadist;
    }
    else
    {
        cub->ray.y_ray_step = 1;
        cub->ray.y_sidedist = ((double)(cub->field.field_y) + 1.0 - 
                cub->player.y_pos) * cub->ray.y_deltadist;
    }
}


/*
**      Function:           void        calculate_wall()
**
**      Arguments:          main struct
**
**      return:             void
**
**      Description:         DDA algorithm. It's a loop that increments the ray with 1 square
**      every time, until a wall is hit.
*/

void            calculate_wall(t_cub3d *cub)
{
    int     is_wall;

    is_wall = 0;
    cub->window.side = 0;
    while (is_wall == 0)
    {
        if (cub->ray.x_sidedist < cub->ray.y_sidedist)
        {
            cub->ray.x_sidedist += cub->ray.x_deltadist;
            cub->field.field_x += cub->ray.x_ray_step;
            cub->window.side = 0;
        }
        else
        {
            cub->ray.y_sidedist += cub->ray.y_deltadist;
            cub->field.field_y += cub->ray.y_ray_step;
            cub->window.side = 1;
        }
        if (cub->field.map(cub->field.field_y)(cub->field.field_x) == '1')
            is_wall = 1;
    }
    calculate_distto_wall(cub);
}


/*
**      Function:           void        calculate_distto_wall()
**
**      Arguments:          main struct
**
**      return:             void
**
**      Description:        calculate the distance of the ray to the wall
*/

void            calculate_distto_wall(t_cub3d *cub)
{
    if (cub->window.side == 0)
    {
        cub->ray.wall_dist = ((double)(cub->field.field_x) - cub->player.x_pos 
                + (1 - cub->ray.x_ray_step) / 2) / cub->ray.dir_ray_x;
    }
    else
    {
        cub->ray.wall_dist = ((double)(cub->field.field_y) - cub->player.y_pos 
                + (1 - cub->ray.y_ray_step) / 2) / cub->ray.dir_ray_y;
    }
}


/*
**      Function:           void        calculate_height()
**
**      Arguments:          main struct
**
**      return:             void
**
**      Description:        calculate the height of the line that has to be
**      drawn on screen
*/

void            calculate_height(t_cub3d *cub)
{
    cub->window.height_ln = (int)(cub->window.res_height / cub->ray.wall_dist);
    cub->window.top_wall = (cub->window.height_ln * -1) / 2 + 
            cub->window.res_height / 2;
    if (cub->window.top_wall < 0)
        cub->window.top_wall = 0;
    cub->window.bottom_wall = cub->window.height_ln / 2 + 
            cub->window.res_height / 2;
    if (cub->window.bottom_wall >= cub->window.res_height)
        cub->window.bottom_wall = cub->window.res_height - 1;
}

mathematics – Triangle area selection using BVH raycasting

Let’s assume I’m already storing a BVH acceleration structure per mesh on my engine and I’d like start
making area selection like on many DCC tools out there, for instance, blender.

With blender you can do {rectangular, circle, lasso} selection to select faces and I’d like to understand the maths behind these algorithms. If I was using color picking techniques these sort of algorithms would be obvious but when using BVH+raycasting I’m still failing to understand the algo/maths behind it.

So, could you please explain the logic behind those? For instance, I assume rectangular selection should be easier to implement than circle/lasso, so how would you gather all screen projected triangles living inside the rectangular selection created by the mouse?

raycasting – Unity Raycast works only once-Ignores NavMeshAgents

I’m new to unity and just trying to build a game.

I’ve been trying to use Raycast to detect if an enemy is next to the player and I can’t seem to get the Physics.Raycast working properly.

Everything works fine in the scene mode and at the beginning of the start mode.But then it stops responding.

I checked threads with similar issues, but it seems I have all those covered(the most common issue is also addressed-the objects I am attempting to detect have rigidbodies and box colliders attached to them)

This is the script I’m working with.

public class hitPlayer : MonoBehaviour
{
   

    public GameObject player;
    public Transform m_transform;
    Animator playerAnimator;
    int layerMask;
    RaycastHit m_Hit;
    float maxDistance = 5f;


void Start()
{
    layerMask = 1 << 8;
    layerMask = ~layerMask;

 
    playerAnimator = player.GetComponent<Animator>();
}
public void onTouch()
{

    
    
    bool isHit = Physics.Raycast(m_transform.position, -m_transform.right, out m_Hit, maxDistance, layerMask);

    if (isHit)
    {
        playerAnimator.SetTrigger("leftHit");
    }
    else
    {
        playerAnimator.SetTrigger("rightHit");
    }

}
void OnDrawGizmos()
{


    bool isHit = Physics.Raycast(m_transform.position, -m_transform.right, out hit, maxDistance,layerMask);
    Debug.Log(isHit);
    if (isHit)
    {
        Gizmos.color = Color.red;
        Gizmos.DrawRay(m_transform.position, -m_transform.right * hit.distance);
    }
    else
    {
        Gizmos.color = Color.green;
        Gizmos.DrawRay(m_transform.position, -m_transform.right * maxDistance);
    }
}
}

Raycast stays green

Raycast stays green view2

Would highly appreciate your assistance in fixing this issue which is to get the raycast working right throughout the game. Thanks in advance.

javascript – Raycasting – weird lines on walls when ray is facing up or right

I am trying to make a raycasting game. Everything is rendered correctly except when the ray is facing up (angle > PI) or facing right(angle > 0.5PI and < 1.5PI) lines are drawn on walls. I am not sure what is causing it to happen but I know that the lines are not affected by the rotation of the player only by the players position. I also tried rounding the rays position but that did not help.

Right and up ray walls.
Text

The walls up close.
Text

Left and down ray walls.
Text

Code:

        let rayX, rayY, rayAngle, rayDeltaX, rayDeltaY

        for (let i = 0; i < this.screen.width; i ++) {
            rayAngle = this.angle - this.fov / 2 + i * (this.fov / this.screen.width)
            
            if (rayAngle < 0) {
                rayAngle += Math.PI * 2
            }
            else if (rayAngle > Math.PI * 2) {
                rayAngle -= Math.PI * 2
            }

            rayX = this.x
            rayY = this.y

            let stepY

            if (rayAngle > Math.PI) {
                stepY = -this.tileSize
                rayY = Math.floor(rayY / this.tileSize) * this.tileSize - 1
            }
            else {
                stepY = this.tileSize
                rayY = Math.floor(rayY / this.tileSize) * this.tileSize + this.tileSize
            }

            rayX = this.x  + (rayY - this.y) / Math.tan(rayAngle)
         
            rayDeltaY = stepY
            rayDeltaX = stepY / Math.tan(rayAngle)

            while(true) {
                if (this.Map.map(Math.floor(rayY / this.tileSize) * this.Map.width + Math.floor(rayX / this.tileSize)) == "https://gamedev.stackexchange.com/#") {
                    break
                }

                rayX += rayDeltaX
                rayY += rayDeltaY
            }

            let rayHorizontalX = rayX
            let rayHorizontalY = rayY
            let rayDistanceHorizontal = Math.sqrt((this.x - rayHorizontalX) ** 2 + (this.y - rayHorizontalY) ** 2)

            rayX = this.x
            rayY = this.y

            let stepX

            if (rayAngle > 0.5 * Math.PI && rayAngle < 1.5 * Math.PI) {
                stepX = -this.tileSize
                rayX = Math.floor(rayX / this.tileSize) * this.tileSize - 1
            }
            else {
                stepX = this.tileSize
                rayX = Math.floor(rayX / this.tileSize) * this.tileSize + this.tileSize
            }

            rayY = this.y + (rayX - this.x) * Math.tan(rayAngle)
    
            rayDeltaY = stepX * Math.tan(rayAngle)
            rayDeltaX = stepX

            while(true) {
                if (this.Map.map(Math.floor(rayY / this.tileSize) * this.Map.width + Math.floor(rayX / this.tileSize)) == "https://gamedev.stackexchange.com/#") {
                    break
                }
    
                rayX += rayDeltaX
                rayY += rayDeltaY
            }
            
            let rayVerticalX = rayX
            let rayVerticalY = rayY
            let rayDistanceVertical = Math.sqrt((this.x - rayVerticalX) ** 2 + (this.y - rayVerticalY) ** 2)
            
            let rayFinalDistance

            if (rayDistanceHorizontal < rayDistanceVertical) {
                rayFinalDistance = rayDistanceHorizontal
                ctx.fillStyle = 'darkblue'
            }
            else {
                rayFinalDistance = rayDistanceVertical
                ctx.fillStyle = 'blue'
            }

            let rayCorrectedDistance = rayFinalDistance * Math.cos(rayAngle - this.angle)

            let lineHeight = this.tileSize * (this.screen.width / 2 / Math.tan(this.fov / 2)) / rayCorrectedDistance
            let lineBottom = this.projectionPlane.centerY + lineHeight * 0.5
            let lineTop = this.projectionPlane.height - lineBottom

            ctx.fillRect(i, lineTop, 1, lineHeight)
        }

Any help would be appreciated.

raycasting – Whats the mobile touch version of raycast2D and Input.GetAxis Mouse X and Y in Unity?

I tried

      float pointer_x = Input.GetAxis("Mouse X");
      float pointer_y = Input.GetAxis("Mouse Y");
      if (Input.touchCount > 0)
      {
          pointer_x = Input.touches(0).deltaPosition.x;
          pointer_y = Input.touches(0).deltaPosition.y;
      } 

`

for Mouse X and Y but didn’t do the results i wanted. I basically have shapes that I want to move on the X and Y axis. So when I move up and Down only move on Y axis, and when I move left and right only move on X axis.

The Raycast2D mobile version of this I’m not sure either.

         Vector2 worldPoint = Camera.main.ScreenToWorldPoint(Input.mousePosition);
 
         RaycastHit2D hit = Physics2D.Raycast(worldPoint, Vector2.zero);
 
         if (Input.GetMouseButtonDown(0))
         {
 
             for (int i = 0; i < empty.Length; i++)
             {
                 empty(i) = true;
             }
 
 
             if (hit.collider != null)
             {
                 startPos = transform.position;
 
                 offset = gameObject.transform.position -
                          Camera.main.ScreenToWorldPoint(
                          new Vector3(Input.mousePosition.x,
                          Input.mousePosition.y, screenPoint.z));
             }
         }
 
         if (Input.GetMouseButton(0))
         {
 
             if (hit.collider != null)
             {
 
                 Vector3 curScreenPoint = new Vector3(Input.mousePosition.x, Input.mousePosition.y, screenPoint.z);
                 Vector3 curPosition = Camera.main.ScreenToWorldPoint(curScreenPoint) + offset;
                 
 
                 if (!posY)
                 {
                     if (Input.GetAxis("Mouse X") > 0.1 ||
                         Input.GetAxis("Mouse X") < -0.1)
                     {
                         posX = true;
 
                         if (Mathf.Round(transform.position.y * 2f) * 0.5f == Mathf.Round(hit.transform.position.y * 2f) * 0.5f)
                         {
                             transform.position = new Vector3(curPosition.x, startPos.y, curPosition.z);
 
                         }
                     }
                 }
 
                 if (!posX)
                 {
                     if (Input.GetAxis("Mouse Y") > 0.1 ||
                         Input.GetAxis("Mouse Y") < -0.1)
                     {
                         posY = true;
                       
                         if (Mathf.Round(transform.position.x * 2f) * 0.5f == Mathf.Round(hit.transform.position.x * 2f) * 0.5f)
                         {
                             transform.position = new Vector3(startPos.x, curPosition.y, curPosition.z);
                         }
                        
                     }
                 }
 
 Mathf.Round(transform.position.y * 2f) * 0.5f + " " + Mathf.Round(hit.transform.position.y * 2f) * 0.5f);
             }
 
 
         }
 
         if (Input.GetMouseButtonUp(0))
         {
             posX = false;
             posY = false;
 
             if (hit.collider != null)
             {
                 transform.position = new Vector3(outputX, outputY);
 Mathf.Sign(hit.transform.position.x) * (Mathf.Abs((int)hit.transform.position.x) + 0.5f));
 
 Mathf.Round(transform.position.y * 2f) * 0.5f + " " + Mathf.Round(hit.transform.position.y * 2f) * 0.5f);
             }
 
             if (Mathf.Sign(transform.position.x) * (Mathf.Abs((int)transform.position.x) + 0.5f) >= 2.5f ||
                 Mathf.Sign(transform.position.x) * (Mathf.Abs((int)transform.position.x) + 0.5f) <= -2.5f ||
                 Mathf.Sign(transform.position.y) * (Mathf.Abs((int)transform.position.y) + 0.5f) >= 1.5f ||
                 Mathf.Sign(transform.position.y) * (Mathf.Abs((int)transform.position.y) + 0.5f) <= -4.5f)
             {
                 Destroy(gameObject);
             }
 
             clicked = false;
         }

raycasting – Plotting a pseudo 3D globe

I would like to plot a small globe where the main character can walk, exactly like Sonic & Knukles Bonus Stage. For those who won’t google it, here is a picture:

https://images.app.goo.gl/X16ouk1UKnMPUi1SA

I am familiar with Raycasting, used in Wolfenstein and other games to “simulate” a 3D perspective in a 2D map.

But I could not figure how to program the globe thing, and I suppose that it was not a real 3D rendering, as Sonic was a 16 bit game.

Any idea on how to do it without any sofisticated 3D engine?