Merge branch 'memory-tweaks' into 'master'

Hanicef allocator changes, simplified memcpy and fixed alignment

See merge request KartKrew/Kart!1530
This commit is contained in:
Sal 2023-09-25 21:42:23 +00:00
commit 0ae5071bbb
13 changed files with 57 additions and 757 deletions

View file

@ -233,11 +233,6 @@ INT32 I_mkdir(const char *dirname, INT32 unixright)
return -1;
}
const CPUInfoFlags *I_CPUInfo(void)
{
return NULL;
}
const char *I_LocateWad(void)
{
return "/sdcard/srb2";

View file

@ -493,7 +493,7 @@ extern char gpbackup[256];
#define M_GetText(x) (x)
#endif
void M_StartupLocale(void);
extern void *(*M_Memcpy)(void* dest, const void* src, size_t n) FUNCNONNULL;
void *M_Memcpy(void* dest, const void* src, size_t n);
char *va(const char *format, ...) FUNCPRINTF;
char *M_GetToken(const char *inputString);

View file

@ -138,11 +138,6 @@ INT32 I_mkdir(const char *dirname, INT32 unixright)
return -1;
}
const CPUInfoFlags *I_CPUInfo(void)
{
return NULL;
}
const char *I_LocateWad(void)
{
return NULL;

View file

@ -280,40 +280,6 @@ char *I_GetUserName(void);
*/
INT32 I_mkdir(const char *dirname, INT32 unixright);
struct CPUInfoFlags {
int FPU : 1; ///< FPU availabile
int CPUID : 1; ///< CPUID instruction
int RDTSC : 1; ///< RDTSC instruction
int MMX : 1; ///< MMX features
int MMXExt : 1; ///< MMX Ext. features
int CMOV : 1; ///< Pentium Pro's "cmov"
int AMD3DNow : 1; ///< 3DNow features
int AMD3DNowExt: 1; ///< 3DNow! Ext. features
int SSE : 1; ///< SSE features
int SSE2 : 1; ///< SSE2 features
int SSE3 : 1; ///< SSE3 features
int IA64 : 1; ///< Running on IA64
int AMD64 : 1; ///< Running on AMD64
int AltiVec : 1; ///< AltiVec features
int FPPE : 1; ///< floating-point precision error
int PFC : 1; ///< TBD?
int cmpxchg : 1; ///< ?
int cmpxchg16b : 1; ///< ?
int cmp8xchg16 : 1; ///< ?
int FPE : 1; ///< FPU Emu
int DEP : 1; ///< Data excution prevent
int PPCMM64 : 1; ///< PowerPC Movemem 64bit ok?
int ALPHAbyte : 1; ///< ?
int PAE : 1; ///< Physical Address Extension
int CPUs : 8;
};
/** \brief Info about CPU
\return CPUInfo in bits
*/
const CPUInfoFlags *I_CPUInfo(void);
/** \brief Find main WAD
\return path to main WAD
*/

View file

@ -91,7 +91,7 @@ huddrawlist_h LUA_HUD_CreateDrawList(void)
{
huddrawlist_h drawlist;
drawlist = (huddrawlist_h) Z_CallocAlign(sizeof(struct huddrawlist_s), PU_STATIC, NULL, 64);
drawlist = (huddrawlist_h) Z_Calloc(sizeof(struct huddrawlist_s), PU_STATIC, NULL);
drawlist->items = NULL;
drawlist->items_capacity = 0;
drawlist->items_len = 0;
@ -152,7 +152,7 @@ static size_t AllocateDrawItem(huddrawlist_h list)
{
if (list->items_capacity == 0) list->items_capacity = 128;
else list->items_capacity *= 2;
list->items = (drawitem_t *) Z_ReallocAlign(list->items, sizeof(struct drawitem_s) * list->items_capacity, PU_STATIC, NULL, 64);
list->items = (drawitem_t *) Z_Realloc(list->items, sizeof(struct drawitem_s) * list->items_capacity, PU_STATIC, NULL);
}
return list->items_len++;
@ -171,7 +171,7 @@ static size_t CopyString(huddrawlist_h list, const char* str)
{
if (list->strbuf_capacity == 0) list->strbuf_capacity = 256;
else list->strbuf_capacity *= 2;
list->strbuf = (char*) Z_ReallocAlign(list->strbuf, sizeof(char) * list->strbuf_capacity, PU_STATIC, NULL, 8);
list->strbuf = (char*) Z_Realloc(list->strbuf, sizeof(char) * list->strbuf_capacity, PU_STATIC, NULL);
}
{

View file

@ -9,432 +9,13 @@
// See the 'LICENSE' file for more details.
//-----------------------------------------------------------------------------
/// \file m_memcpy.c
/// \brief X86 optimized implementations of M_Memcpy
/// \brief (formerly) X86 optimized implementations of M_Memcpy
#include "doomdef.h"
#include "m_misc.h"
#if defined (__GNUC__) && defined (__i386__) // from libkwave, under GPL
// Alam: note libkwave memcpy code comes from mplayer's libvo/aclib_template.c, r699
/* for small memory blocks (<256 bytes) this version is faster */
#define small_memcpy(dest,src,n)\
{\
register unsigned long int dummy;\
__asm__ __volatile__(\
"cld\n\t"\
"rep; movsb"\
:"=&D"(dest), "=&S"(src), "=&c"(dummy)\
:"0" (dest), "1" (src),"2" (n)\
: "memory", "cc");\
}
/* linux kernel __memcpy (from: /include/asm/string.h) */
ATTRINLINE static FUNCINLINE void *__memcpy (void *dest, const void * src, size_t n)
void *M_Memcpy(void* dest, const void* src, size_t n)
{
int d0, d1, d2;
if ( n < 4 )
{
small_memcpy(dest, src, n);
}
else
{
__asm__ __volatile__ (
"rep ; movsl;"
"testb $2,%b4;"
"je 1f;"
"movsw;"
"1:\ttestb $1,%b4;"
"je 2f;"
"movsb;"
"2:"
: "=&c" (d0), "=&D" (d1), "=&S" (d2)
:"0" (n/4), "q" (n),"1" ((long) dest),"2" ((long) src)
: "memory");
}
memcpy(dest, src, n);
return dest;
}
#define SSE_MMREG_SIZE 16
#define MMX_MMREG_SIZE 8
#define MMX1_MIN_LEN 0x800 /* 2K blocks */
#define MIN_LEN 0x40 /* 64-byte blocks */
/* SSE note: i tried to move 128 bytes a time instead of 64 but it
didn't make any measureable difference. i'm using 64 for the sake of
simplicity. [MF] */
static /*FUNCTARGET("sse2")*/ void *sse_cpy(void * dest, const void * src, size_t n)
{
void *retval = dest;
size_t i;
/* PREFETCH has effect even for MOVSB instruction ;) */
__asm__ __volatile__ (
"prefetchnta (%0);"
"prefetchnta 32(%0);"
"prefetchnta 64(%0);"
"prefetchnta 96(%0);"
"prefetchnta 128(%0);"
"prefetchnta 160(%0);"
"prefetchnta 192(%0);"
"prefetchnta 224(%0);"
"prefetchnta 256(%0);"
"prefetchnta 288(%0);"
: : "r" (src) );
if (n >= MIN_LEN)
{
register unsigned long int delta;
/* Align destinition to MMREG_SIZE -boundary */
delta = ((unsigned long int)dest)&(SSE_MMREG_SIZE-1);
if (delta)
{
delta=SSE_MMREG_SIZE-delta;
n -= delta;
small_memcpy(dest, src, delta);
}
i = n >> 6; /* n/64 */
n&=63;
if (((unsigned long)src) & 15)
/* if SRC is misaligned */
for (; i>0; i--)
{
__asm__ __volatile__ (
"prefetchnta 320(%0);"
"prefetchnta 352(%0);"
"movups (%0), %%xmm0;"
"movups 16(%0), %%xmm1;"
"movups 32(%0), %%xmm2;"
"movups 48(%0), %%xmm3;"
"movntps %%xmm0, (%1);"
"movntps %%xmm1, 16(%1);"
"movntps %%xmm2, 32(%1);"
"movntps %%xmm3, 48(%1);"
:: "r" (src), "r" (dest) : "memory");
src = (const unsigned char *)src + 64;
dest = (unsigned char *)dest + 64;
}
else
/*
Only if SRC is aligned on 16-byte boundary.
It allows to use movaps instead of movups, which required data
to be aligned or a general-protection exception (#GP) is generated.
*/
for (; i>0; i--)
{
__asm__ __volatile__ (
"prefetchnta 320(%0);"
"prefetchnta 352(%0);"
"movaps (%0), %%xmm0;"
"movaps 16(%0), %%xmm1;"
"movaps 32(%0), %%xmm2;"
"movaps 48(%0), %%xmm3;"
"movntps %%xmm0, (%1);"
"movntps %%xmm1, 16(%1);"
"movntps %%xmm2, 32(%1);"
"movntps %%xmm3, 48(%1);"
:: "r" (src), "r" (dest) : "memory");
src = ((const unsigned char *)src) + 64;
dest = ((unsigned char *)dest) + 64;
}
/* since movntq is weakly-ordered, a "sfence"
* is needed to become ordered again. */
__asm__ __volatile__ ("sfence":::"memory");
/* enables to use FPU */
__asm__ __volatile__ ("emms":::"memory");
}
/*
* Now do the tail of the block
*/
if (n) __memcpy(dest, src, n);
return retval;
}
static FUNCTARGET("mmx") void *mmx2_cpy(void *dest, const void *src, size_t n)
{
void *retval = dest;
size_t i;
/* PREFETCH has effect even for MOVSB instruction ;) */
__asm__ __volatile__ (
"prefetchnta (%0);"
"prefetchnta 32(%0);"
"prefetchnta 64(%0);"
"prefetchnta 96(%0);"
"prefetchnta 128(%0);"
"prefetchnta 160(%0);"
"prefetchnta 192(%0);"
"prefetchnta 224(%0);"
"prefetchnta 256(%0);"
"prefetchnta 288(%0);"
: : "r" (src));
if (n >= MIN_LEN)
{
register unsigned long int delta;
/* Align destinition to MMREG_SIZE -boundary */
delta = ((unsigned long int)dest)&(MMX_MMREG_SIZE-1);
if (delta)
{
delta=MMX_MMREG_SIZE-delta;
n -= delta;
small_memcpy(dest, src, delta);
}
i = n >> 6; /* n/64 */
n&=63;
for (; i>0; i--)
{
__asm__ __volatile__ (
"prefetchnta 320(%0);"
"prefetchnta 352(%0);"
"movq (%0), %%mm0;"
"movq 8(%0), %%mm1;"
"movq 16(%0), %%mm2;"
"movq 24(%0), %%mm3;"
"movq 32(%0), %%mm4;"
"movq 40(%0), %%mm5;"
"movq 48(%0), %%mm6;"
"movq 56(%0), %%mm7;"
"movntq %%mm0, (%1);"
"movntq %%mm1, 8(%1);"
"movntq %%mm2, 16(%1);"
"movntq %%mm3, 24(%1);"
"movntq %%mm4, 32(%1);"
"movntq %%mm5, 40(%1);"
"movntq %%mm6, 48(%1);"
"movntq %%mm7, 56(%1);"
:: "r" (src), "r" (dest) : "memory");
src = ((const unsigned char *)src) + 64;
dest = ((unsigned char *)dest) + 64;
}
/* since movntq is weakly-ordered, a "sfence"
* is needed to become ordered again. */
__asm__ __volatile__ ("sfence":::"memory");
__asm__ __volatile__ ("emms":::"memory");
}
/*
* Now do the tail of the block
*/
if (n) __memcpy(dest, src, n);
return retval;
}
static FUNCTARGET("mmx") void *mmx1_cpy(void *dest, const void *src, size_t n) //3DNOW
{
void *retval = dest;
size_t i;
/* PREFETCH has effect even for MOVSB instruction ;) */
__asm__ __volatile__ (
"prefetch (%0);"
"prefetch 32(%0);"
"prefetch 64(%0);"
"prefetch 96(%0);"
"prefetch 128(%0);"
"prefetch 160(%0);"
"prefetch 192(%0);"
"prefetch 224(%0);"
"prefetch 256(%0);"
"prefetch 288(%0);"
: : "r" (src));
if (n >= MMX1_MIN_LEN)
{
register unsigned long int delta;
/* Align destinition to MMREG_SIZE -boundary */
delta = ((unsigned long int)dest)&(MMX_MMREG_SIZE-1);
if (delta)
{
delta=MMX_MMREG_SIZE-delta;
n -= delta;
small_memcpy(dest, src, delta);
}
i = n >> 6; /* n/64 */
n&=63;
for (; i>0; i--)
{
__asm__ __volatile__ (
"prefetch 320(%0);"
"prefetch 352(%0);"
"movq (%0), %%mm0;"
"movq 8(%0), %%mm1;"
"movq 16(%0), %%mm2;"
"movq 24(%0), %%mm3;"
"movq 32(%0), %%mm4;"
"movq 40(%0), %%mm5;"
"movq 48(%0), %%mm6;"
"movq 56(%0), %%mm7;"
"movq %%mm0, (%1);"
"movq %%mm1, 8(%1);"
"movq %%mm2, 16(%1);"
"movq %%mm3, 24(%1);"
"movq %%mm4, 32(%1);"
"movq %%mm5, 40(%1);"
"movq %%mm6, 48(%1);"
"movq %%mm7, 56(%1);"
:: "r" (src), "r" (dest) : "memory");
src = ((const unsigned char *)src) + 64;
dest = ((unsigned char *)dest) + 64;
}
__asm__ __volatile__ ("femms":::"memory"); // same as mmx_cpy() but with a femms
}
/*
* Now do the tail of the block
*/
if (n) __memcpy(dest, src, n);
return retval;
}
#endif
// Alam: why? memcpy may be __cdecl/_System and our code may be not the same type
static void *cpu_cpy(void *dest, const void *src, size_t n)
{
if (src == NULL)
{
CONS_Debug(DBG_MEMORY, "Memcpy from 0x0?!: %p %p %s\n", dest, src, sizeu1(n));
return dest;
}
if(dest == NULL)
{
CONS_Debug(DBG_MEMORY, "Memcpy to 0x0?!: %p %p %s\n", dest, src, sizeu1(n));
return dest;
}
return memcpy(dest, src, n);
}
static /*FUNCTARGET("mmx")*/ void *mmx_cpy(void *dest, const void *src, size_t n)
{
#if defined (_MSC_VER) && defined (_X86_)
_asm
{
mov ecx, [n]
mov esi, [src]
mov edi, [dest]
shr ecx, 6 // mit mmx: 64bytes per iteration
jz lower_64 // if lower than 64 bytes
loop_64: // MMX transfers multiples of 64bytes
movq mm0, 0[ESI] // read sources
movq mm1, 8[ESI]
movq mm2, 16[ESI]
movq mm3, 24[ESI]
movq mm4, 32[ESI]
movq mm5, 40[ESI]
movq mm6, 48[ESI]
movq mm7, 56[ESI]
movq 0[EDI], mm0 // write destination
movq 8[EDI], mm1
movq 16[EDI], mm2
movq 24[EDI], mm3
movq 32[EDI], mm4
movq 40[EDI], mm5
movq 48[EDI], mm6
movq 56[EDI], mm7
add esi, 64
add edi, 64
dec ecx
jnz loop_64
emms // close mmx operation
lower_64:// transfer rest of buffer
mov ebx,esi
sub ebx,src
mov ecx,[n]
sub ecx,ebx
shr ecx, 3 // multiples of 8 bytes
jz lower_8
loop_8:
movq mm0, [esi] // read source
movq [edi], mm0 // write destination
add esi, 8
add edi, 8
dec ecx
jnz loop_8
emms // close mmx operation
lower_8:
mov ebx,esi
sub ebx,src
mov ecx,[n]
sub ecx,ebx
rep movsb
mov eax, [dest] // return dest
}
#elif defined (__GNUC__) && defined (__i386__)
void *retval = dest;
size_t i;
if (n >= MMX1_MIN_LEN)
{
register unsigned long int delta;
/* Align destinition to MMREG_SIZE -boundary */
delta = ((unsigned long int)dest)&(MMX_MMREG_SIZE-1);
if (delta)
{
delta=MMX_MMREG_SIZE-delta;
n -= delta;
small_memcpy(dest, src, delta);
}
i = n >> 6; /* n/64 */
n&=63;
for (; i>0; i--)
{
__asm__ __volatile__ (
"movq (%0), %%mm0;"
"movq 8(%0), %%mm1;"
"movq 16(%0), %%mm2;"
"movq 24(%0), %%mm3;"
"movq 32(%0), %%mm4;"
"movq 40(%0), %%mm5;"
"movq 48(%0), %%mm6;"
"movq 56(%0), %%mm7;"
"movq %%mm0, (%1);"
"movq %%mm1, 8(%1);"
"movq %%mm2, 16(%1);"
"movq %%mm3, 24(%1);"
"movq %%mm4, 32(%1);"
"movq %%mm5, 40(%1);"
"movq %%mm6, 48(%1);"
"movq %%mm7, 56(%1);"
:: "r" (src), "r" (dest) : "memory");
src = ((const unsigned char *)src) + 64;
dest = ((unsigned char *)dest) + 64;
}
__asm__ __volatile__ ("emms":::"memory");
}
/*
* Now do the tail of the block
*/
if (n) __memcpy(dest, src, n);
return retval;
#else
return cpu_cpy(dest, src, n);
#endif
}
void *(*M_Memcpy)(void* dest, const void* src, size_t n) = cpu_cpy;
/** Memcpy that uses MMX, 3DNow, MMXExt or even SSE
* Do not use on overlapped memory, use memmove for that
*/
void M_SetupMemcpy(void)
{
#if defined (__GNUC__) && defined (__i386__)
if (R_SSE2)
M_Memcpy = sse_cpy;
else if (R_MMXExt)
M_Memcpy = mmx2_cpy;
else if (R_3DNow)
M_Memcpy = mmx1_cpy;
else
#endif
if (R_MMX)
M_Memcpy = mmx_cpy;
#if 0
M_Memcpy = cpu_cpy;
#endif
}

View file

@ -128,8 +128,6 @@ TMatrix *RotateZMatrix(angle_t rad);
// s1 = s2+s3+s1 (1024 lenghtmax)
void strcatbf(char *s1, const char *s2, const char *s3);
void M_SetupMemcpy(void);
const char *M_FileError(FILE *handle);
int M_PathParts (const char *path);

View file

@ -328,12 +328,11 @@ static void AddInterpolator(levelinterpolator_t* interpolator)
levelinterpolators_size *= 2;
}
levelinterpolators = Z_ReallocAlign(
levelinterpolators = Z_Realloc(
(void*) levelinterpolators,
sizeof(levelinterpolator_t*) * levelinterpolators_size,
PU_LEVEL,
NULL,
sizeof(levelinterpolator_t*) * 8
NULL
);
}
@ -343,11 +342,8 @@ static void AddInterpolator(levelinterpolator_t* interpolator)
static levelinterpolator_t *CreateInterpolator(levelinterpolator_type_e type, thinker_t *thinker)
{
levelinterpolator_t *ret = (levelinterpolator_t*) Z_CallocAlign(
sizeof(levelinterpolator_t),
PU_LEVEL,
NULL,
sizeof(levelinterpolator_t) * 8
levelinterpolator_t *ret = (levelinterpolator_t*) Z_Calloc(
sizeof(levelinterpolator_t), PU_LEVEL, NULL
);
ret->type = type;
@ -662,12 +658,11 @@ void R_AddMobjInterpolator(mobj_t *mobj)
interpolated_mobjs_capacity *= 2;
}
interpolated_mobjs = Z_ReallocAlign(
interpolated_mobjs = Z_Realloc(
interpolated_mobjs,
sizeof(mobj_t *) * interpolated_mobjs_capacity,
PU_LEVEL,
NULL,
64
NULL
);
}

View file

@ -319,50 +319,6 @@ void SCR_SetMode(void)
//
void SCR_Startup(void)
{
const CPUInfoFlags *RCpuInfo = I_CPUInfo();
if (!M_CheckParm("-NOCPUID") && RCpuInfo)
{
#if defined (__i386__) || defined (_M_IX86) || defined (__WATCOMC__)
R_486 = true;
#endif
if (RCpuInfo->RDTSC)
R_586 = true;
if (RCpuInfo->MMX)
R_MMX = true;
if (RCpuInfo->AMD3DNow)
R_3DNow = true;
if (RCpuInfo->MMXExt)
R_MMXExt = true;
if (RCpuInfo->SSE)
R_SSE = true;
if (RCpuInfo->SSE2)
R_SSE2 = true;
CONS_Printf("CPU Info: 486: %i, 586: %i, MMX: %i, 3DNow: %i, MMXExt: %i, SSE2: %i\n", R_486, R_586, R_MMX, R_3DNow, R_MMXExt, R_SSE2);
}
if (M_CheckParm("-noASM"))
R_ASM = false;
if (M_CheckParm("-486"))
R_486 = true;
if (M_CheckParm("-586"))
R_586 = true;
if (M_CheckParm("-MMX"))
R_MMX = true;
if (M_CheckParm("-3DNow"))
R_3DNow = true;
if (M_CheckParm("-MMXExt"))
R_MMXExt = true;
if (M_CheckParm("-SSE"))
R_SSE = true;
if (M_CheckParm("-noSSE"))
R_SSE = false;
if (M_CheckParm("-SSE2"))
R_SSE2 = true;
M_SetupMemcpy();
if (dedicated)
{
V_Init();

View file

@ -2423,70 +2423,6 @@ UINT32 I_GetFreeMem(UINT32 *total)
#endif
}
const CPUInfoFlags *I_CPUInfo(void)
{
#if defined (_WIN32)
static CPUInfoFlags WIN_CPUInfo;
SYSTEM_INFO SI;
p_IsProcessorFeaturePresent pfnCPUID;
*(void**)&pfnCPUID = FUNCPTRCAST(GetProcAddress(GetModuleHandleA("kernel32.dll"), "IsProcessorFeaturePresent"));
ZeroMemory(&WIN_CPUInfo,sizeof (WIN_CPUInfo));
if (pfnCPUID)
{
WIN_CPUInfo.FPPE = pfnCPUID( 0); //PF_FLOATING_POINT_PRECISION_ERRATA
WIN_CPUInfo.FPE = pfnCPUID( 1); //PF_FLOATING_POINT_EMULATED
WIN_CPUInfo.cmpxchg = pfnCPUID( 2); //PF_COMPARE_EXCHANGE_DOUBLE
WIN_CPUInfo.MMX = pfnCPUID( 3); //PF_MMX_INSTRUCTIONS_AVAILABLE
WIN_CPUInfo.PPCMM64 = pfnCPUID( 4); //PF_PPC_MOVEMEM_64BIT_OK
WIN_CPUInfo.ALPHAbyte = pfnCPUID( 5); //PF_ALPHA_BYTE_INSTRUCTIONS
WIN_CPUInfo.SSE = pfnCPUID( 6); //PF_XMMI_INSTRUCTIONS_AVAILABLE
WIN_CPUInfo.AMD3DNow = pfnCPUID( 7); //PF_3DNOW_INSTRUCTIONS_AVAILABLE
WIN_CPUInfo.RDTSC = pfnCPUID( 8); //PF_RDTSC_INSTRUCTION_AVAILABLE
WIN_CPUInfo.PAE = pfnCPUID( 9); //PF_PAE_ENABLED
WIN_CPUInfo.SSE2 = pfnCPUID(10); //PF_XMMI64_INSTRUCTIONS_AVAILABLE
//WIN_CPUInfo.blank = pfnCPUID(11); //PF_SSE_DAZ_MODE_AVAILABLE
WIN_CPUInfo.DEP = pfnCPUID(12); //PF_NX_ENABLED
WIN_CPUInfo.SSE3 = pfnCPUID(13); //PF_SSE3_INSTRUCTIONS_AVAILABLE
WIN_CPUInfo.cmpxchg16b = pfnCPUID(14); //PF_COMPARE_EXCHANGE128
WIN_CPUInfo.cmp8xchg16 = pfnCPUID(15); //PF_COMPARE64_EXCHANGE128
WIN_CPUInfo.PFC = pfnCPUID(16); //PF_CHANNELS_ENABLED
}
#ifdef HAVE_SDLCPUINFO
else
{
WIN_CPUInfo.RDTSC = SDL_HasRDTSC();
WIN_CPUInfo.MMX = SDL_HasMMX();
WIN_CPUInfo.AMD3DNow = SDL_Has3DNow();
WIN_CPUInfo.SSE = SDL_HasSSE();
WIN_CPUInfo.SSE2 = SDL_HasSSE2();
WIN_CPUInfo.AltiVec = SDL_HasAltiVec();
}
WIN_CPUInfo.MMXExt = SDL_FALSE; //SDL_HasMMXExt(); No longer in SDL2
WIN_CPUInfo.AMD3DNowExt = SDL_FALSE; //SDL_Has3DNowExt(); No longer in SDL2
#endif
GetSystemInfo(&SI);
WIN_CPUInfo.CPUs = SI.dwNumberOfProcessors;
WIN_CPUInfo.IA64 = (SI.dwProcessorType == 2200); // PROCESSOR_INTEL_IA64
WIN_CPUInfo.AMD64 = (SI.dwProcessorType == 8664); // PROCESSOR_AMD_X8664
return &WIN_CPUInfo;
#elif defined (HAVE_SDLCPUINFO)
static CPUInfoFlags SDL_CPUInfo;
memset(&SDL_CPUInfo,0,sizeof (CPUInfoFlags));
SDL_CPUInfo.RDTSC = SDL_HasRDTSC();
SDL_CPUInfo.MMX = SDL_HasMMX();
SDL_CPUInfo.MMXExt = SDL_FALSE; //SDL_HasMMXExt(); No longer in SDL2
SDL_CPUInfo.AMD3DNow = SDL_Has3DNow();
SDL_CPUInfo.AMD3DNowExt = SDL_FALSE; //SDL_Has3DNowExt(); No longer in SDL2
SDL_CPUInfo.SSE = SDL_HasSSE();
SDL_CPUInfo.SSE2 = SDL_HasSSE2();
SDL_CPUInfo.AltiVec = SDL_HasAltiVec();
return &SDL_CPUInfo;
#else
return NULL; /// \todo CPUID asm
#endif
}
// note CPUAFFINITY code used to reside here
void I_RegisterSysCommands(void) {}

View file

@ -159,7 +159,6 @@ TYPEDEF (bannednode_t);
// i_system.h
TYPEDEF (JoyFF_t);
TYPEDEF (CPUInfoFlags);
// i_time.h
TYPEDEF (timestate_t);

View file

@ -25,6 +25,9 @@
/// allocator was fragmenting badly. Finally, this version is a bit
/// simpler (about half the lines of code).
#include <stddef.h>
#include <stdalign.h>
#include <tracy/tracy/TracyC.h>
#include "doomdef.h"
@ -49,27 +52,12 @@ static boolean Z_calloc = false;
#define ZONEID 0xa441d13d
struct memblock_s;
typedef struct
{
struct memblock_s *block; // Describing this memory
UINT32 id; // Should be ZONEID
} ATTRPACK memhdr_t;
// Some code might want aligned memory. Assume it wants memory n bytes
// aligned -- then we allocate n-1 extra bytes and return a pointer to
// the first byte aligned as requested.
// Thus, "real" is the pointer we get from malloc() and will free()
// later, but "hdr" is where the memhdr_t starts.
// For non-aligned allocations they will be the same.
typedef struct memblock_s
{
void *real;
memhdr_t *hdr;
void **user;
INT32 tag; // purgelevel
UINT32 id; // Should be ZONEID
size_t size; // including the header and blocks
size_t realsize; // size of real data only
@ -78,7 +66,11 @@ typedef struct memblock_s
INT32 ownerline;
struct memblock_s *next, *prev;
} ATTRPACK memblock_t;
} memblock_t;
#define ALIGNPAD (((sizeof (memblock_t) + (alignof (max_align_t) - 1)) & ~(alignof (max_align_t) - 1)) - sizeof (memblock_t))
#define MEMORY(x) (void *)((uintptr_t)(x) + sizeof(memblock_t) + ALIGNPAD)
#define MEMBLOCK(x) (memblock_t *)((uintptr_t)(x) - ALIGNPAD - sizeof(memblock_t))
// both the head and tail of the zone memory block list
static memblock_t head;
@ -119,52 +111,6 @@ void Z_Init(void)
// Zone memory allocation
// ----------------------
/** Returns the corresponding memblock_t for a given memory block.
*
* \param ptr A pointer to allocated memory,
* assumed to have been allocated with Z_Malloc/Z_Calloc.
* \param func A string containing the name of the function that called this,
* to be printed if the function I_Errors
* \return A pointer to the memblock_t for the given memory.
* \sa Z_Free, Z_ReallocAlign
*/
#define Ptr2Memblock(s, f) Ptr2Memblock2(s, f, __FILE__, __LINE__)
static memblock_t *Ptr2Memblock2(void *ptr, const char* func, const char *file, INT32 line)
{
memhdr_t *hdr;
memblock_t *block;
if (ptr == NULL)
return NULL;
#ifdef ZDEBUG
CONS_Debug(DBG_MEMORY, "%s %s:%d\n", func, file, line);
#endif
hdr = (memhdr_t *)((UINT8 *)ptr - sizeof *hdr);
#ifdef VALGRIND_MAKE_MEM_DEFINED
VALGRIND_MAKE_MEM_DEFINED(hdr, sizeof *hdr);
#endif
#ifdef VALGRIND_MEMPOOL_EXISTS
if (!VALGRIND_MEMPOOL_EXISTS(hdr->block))
{
I_Error("%s: bad memblock from %s:%d", func, file, line);
}
#endif
if (hdr->id != ZONEID)
{
I_Error("%s: wrong id from %s:%d", func, file, line);
}
block = hdr->block;
#ifdef VALGRIND_MAKE_MEM_NOACCESS
VALGRIND_MAKE_MEM_NOACCESS(hdr, sizeof *hdr);
#endif
return block;
}
/** Frees allocated memory.
*
* \param ptr A pointer to allocated memory,
@ -185,7 +131,11 @@ void Z_Free2(void *ptr, const char *file, INT32 line)
#endif
*/
block = Ptr2Memblock2(ptr, "Z_Free", file, line);
block = MEMBLOCK(ptr);
#ifdef PARANOIA
if (block->id != ZONEID)
I_Error("Z_Free at %s:%d: wrong id", file, line);
#endif
// Write every Z_Free call to a debug file.
CONS_Debug(DBG_MEMORY, "Z_Free at %s:%d\n", file, line);
@ -201,9 +151,6 @@ void Z_Free2(void *ptr, const char *file, INT32 line)
if (block->user != NULL)
*block->user = NULL;
// Free the memory and get rid of the block.
TracyCFree(block->real);
free(block->real);
#ifdef VALGRIND_DESTROY_MEMPOOL
VALGRIND_DESTROY_MEMPOOL(block);
#endif
@ -256,35 +203,19 @@ static void *xm(size_t size)
void *Z_Malloc2(size_t size, INT32 tag, void *user, INT32 alignbits,
const char *file, INT32 line)
{
size_t extrabytes = (1<<alignbits) - 1;
size_t padsize = 0;
memblock_t *block;
void *ptr;
memhdr_t *hdr;
void *given;
size_t blocksize = extrabytes + sizeof *hdr + size;
(void)(alignbits); // no longer used, so silence warnings. TODO we should figure out a solution for this
#ifdef ZDEBUG
CONS_Debug(DBG_MEMORY, "Z_Malloc %s:%d\n", file, line);
#endif
if (blocksize < size)/* overflow check */
I_Error("You are allocating memory too large!");
block = xm(sizeof *block);
#ifdef HAVE_VALGRIND
padsize += (1<<sizeof(size_t))*2;
#endif
ptr = xm(blocksize + padsize*2);
TracyCAlloc(ptr, blocksize);
// This horrible calculation makes sure that "given" is aligned
// properly.
given = (void *)((size_t)((UINT8 *)ptr + extrabytes + sizeof *hdr + padsize/2)
& ~extrabytes);
// The mem header lives 'sizeof (memhdr_t)' bytes before given.
hdr = (memhdr_t *)((UINT8 *)given - sizeof *hdr);
block = xm(sizeof (memblock_t) + ALIGNPAD + size);
TracyCAlloc(block, sizeof (memblock_t) + ALIGNPAD + size);
ptr = MEMORY(block);
I_Assert((intptr_t)ptr % alignof (max_align_t) == 0);
#ifdef HAVE_VALGRIND
Z_calloc = false;
@ -295,39 +226,29 @@ void *Z_Malloc2(size_t size, INT32 tag, void *user, INT32 alignbits,
head.next = block;
block->next->prev = block;
block->real = ptr;
block->hdr = hdr;
block->tag = tag;
block->user = NULL;
block->ownerline = line;
block->ownerfile = file;
block->size = blocksize;
block->size = sizeof (memblock_t) + size;
block->realsize = size;
#ifdef VALGRIND_CREATE_MEMPOOL
VALGRIND_CREATE_MEMPOOL(block, padsize, Z_calloc);
VALGRIND_CREATE_MEMPOOL(block, size, Z_calloc);
#endif
//#ifdef VALGRIND_MEMPOOL_ALLOC
// VALGRIND_MEMPOOL_ALLOC(block, hdr, size + sizeof *hdr);
//#endif
hdr->id = ZONEID;
hdr->block = block;
#ifdef VALGRIND_MAKE_MEM_NOACCESS
VALGRIND_MAKE_MEM_NOACCESS(hdr, sizeof *hdr);
#endif
block->id = ZONEID;
if (user != NULL)
{
block->user = user;
*(void **)user = given;
*(void **)user = ptr;
}
else if (tag >= PU_PURGELEVEL)
I_Error("Z_Malloc: attempted to allocate purgable block "
"(size %s) with no user", sizeu1(size));
return given;
return ptr;
}
/** The Z_CallocAlign function.
@ -388,7 +309,11 @@ void *Z_Realloc2(void *ptr, size_t size, INT32 tag, void *user, INT32 alignbits,
return Z_Calloc2(size, tag, user, alignbits, file , line);
}
block = Ptr2Memblock2(ptr, "Z_Realloc", file, line);
block = MEMBLOCK(ptr);
#ifdef PARANOIA
if (block->id != ZONEID)
I_Error("Z_ReallocAlign at %s:%d: wrong id", file, line);
#endif
if (block == NULL)
return NULL;
@ -430,9 +355,8 @@ void Z_FreeTags(INT32 lowtag, INT32 hightag)
for (block = head.next; block != &head; block = next)
{
next = block->next; // get link before freeing
if (block->tag >= lowtag && block->tag <= hightag)
Z_Free((UINT8 *)block->hdr + sizeof *block->hdr);
Z_Free(MEMORY(block));
}
}
@ -455,7 +379,7 @@ void Z_IterateTags(INT32 lowtag, INT32 hightag, boolean (*iterfunc)(void *))
if (block->tag >= lowtag && block->tag <= hightag)
{
void *mem = (UINT8 *)block->hdr + sizeof *block->hdr;
void *mem = MEMORY(block);
boolean free = iterfunc(mem);
if (free)
Z_Free(mem);
@ -500,15 +424,13 @@ void Z_CheckMemCleanup(void)
void Z_CheckHeap(INT32 i)
{
memblock_t *block;
memhdr_t *hdr;
UINT32 blocknumon = 0;
void *given;
for (block = head.next; block != &head; block = block->next)
{
blocknumon++;
hdr = block->hdr;
given = (UINT8 *)hdr + sizeof *hdr;
given = MEMORY(block);
#ifdef ZDEBUG
CONS_Debug(DBG_MEMORY, "block %u owned by %s:%d\n",
blocknumon, block->ownerfile, block->ownerline);
@ -519,6 +441,7 @@ void Z_CheckHeap(INT32 i)
I_Error("Z_CheckHeap %d: block %u"
"(owned by %s:%d)"
" should not exist", i, blocknumon,
" should not exist", i, blocknumon,
block->ownerfile, block->ownerline
);
}
@ -550,16 +473,7 @@ void Z_CheckHeap(INT32 i)
#ifdef VALGRIND_MAKE_MEM_DEFINED
VALGRIND_MAKE_MEM_DEFINED(hdr, sizeof *hdr);
#endif
if (hdr->block != block)
{
I_Error("Z_CheckHeap %d: block %u"
"(owned by %s:%d)"
" doesn't have linkback from allocated memory",
i, blocknumon,
block->ownerfile, block->ownerline
);
}
if (hdr->id != ZONEID)
if (block->id != ZONEID)
{
I_Error("Z_CheckHeap %d: block %u"
"(owned by %s:%d)"
@ -567,9 +481,6 @@ void Z_CheckHeap(INT32 i)
block->ownerfile, block->ownerline
);
}
#ifdef VALGRIND_MAKE_MEM_NOACCESS
VALGRIND_MAKE_MEM_NOACCESS(hdr, sizeof *hdr);
#endif
}
}
@ -591,35 +502,14 @@ void Z_ChangeTag(void *ptr, INT32 tag)
#endif
{
memblock_t *block;
memhdr_t *hdr;
if (ptr == NULL)
return;
hdr = (memhdr_t *)((UINT8 *)ptr - sizeof *hdr);
block = MEMBLOCK(ptr);
#ifdef VALGRIND_MAKE_MEM_DEFINED
VALGRIND_MAKE_MEM_DEFINED(hdr, sizeof *hdr);
#endif
#ifdef VALGRIND_MEMPOOL_EXISTS
if (!VALGRIND_MEMPOOL_EXISTS(hdr->block))
{
#ifdef PARANOIA
I_Error("Z_CT at %s:%d: bad memblock", file, line);
#else
I_Error("Z_CT: bad memblock");
#endif
}
#endif
#ifdef PARANOIA
if (hdr->id != ZONEID) I_Error("Z_CT at %s:%d: wrong id", file, line);
#endif
block = hdr->block;
#ifdef VALGRIND_MAKE_MEM_NOACCESS
VALGRIND_MAKE_MEM_NOACCESS(hdr, sizeof *hdr);
if (block->id != ZONEID) I_Error("Z_ChangeTag at %s:%d: wrong id", file, line);
#endif
if (tag >= PU_PURGELEVEL && block->user == NULL)
@ -643,25 +533,14 @@ void Z_SetUser(void *ptr, void **newuser)
#endif
{
memblock_t *block;
memhdr_t *hdr;
if (ptr == NULL)
return;
hdr = (memhdr_t *)((UINT8 *)ptr - sizeof *hdr);
#ifdef VALGRIND_MAKE_MEM_DEFINED
VALGRIND_MAKE_MEM_DEFINED(hdr, sizeof *hdr);
#endif
block = MEMBLOCK(ptr);
#ifdef PARANOIA
if (hdr->id != ZONEID) I_Error("Z_CT at %s:%d: wrong id", file, line);
#endif
block = hdr->block;
#ifdef VALGRIND_MAKE_MEM_NOACCESS
VALGRIND_MAKE_MEM_NOACCESS(hdr, sizeof *hdr);
if (block->id != ZONEID) I_Error("Z_SetUser at %s:%d: wrong id", file, line);
#endif
if (block->tag >= PU_PURGELEVEL && newuser == NULL)

View file

@ -95,10 +95,10 @@ void *Z_Malloc2(size_t size, INT32 tag, void *user, INT32 alignbits, const char
void *Z_Calloc2(size_t size, INT32 tag, void *user, INT32 alignbits, const char *file, INT32 line) FUNCALLOC(1);
void *Z_Realloc2(void *ptr, size_t size, INT32 tag, void *user, INT32 alignbits, const char *file, INT32 line) FUNCALLOC(2);
// Alloc with no alignment
#define Z_Malloc(s,t,u) Z_MallocAlign(s, t, u, 0)
#define Z_Calloc(s,t,u) Z_CallocAlign(s, t, u, 0)
#define Z_Realloc(p,s,t,u) Z_ReallocAlign(p, s, t, u, 0)
// Alloc with standard alignment
#define Z_Malloc(s,t,u) Z_MallocAlign(s, t, u, sizeof(void *))
#define Z_Calloc(s,t,u) Z_CallocAlign(s, t, u, sizeof(void *))
#define Z_Realloc(p,s,t,u) Z_ReallocAlign(p, s, t, u, sizeof(void *))
// Free all memory by tag
// these don't give line numbers currently though