Implement guest critical sections using std::atomic.

This commit is contained in:
Skyth 2024-12-14 18:23:31 +03:00
parent 7f45cb942d
commit c4b21bde8b
3 changed files with 26 additions and 41 deletions

View file

@ -1,13 +1,13 @@
#pragma once
inline thread_local PPCContext* gPPCContext;
inline thread_local PPCContext* g_ppcContext;
inline PPCContext* GetPPCContext()
{
return gPPCContext;
return g_ppcContext;
}
inline void SetPPCContext(PPCContext& ctx)
{
gPPCContext = &ctx;
g_ppcContext = &ctx;
}

View file

@ -4821,40 +4821,18 @@ struct PipelineStateQueueItem
static moodycamel::BlockingConcurrentQueue<PipelineStateQueueItem> g_pipelineStateQueue;
struct MinimalGuestThreadContext
{
uint8_t* stack = nullptr;
PPCContext ppcContext{};
~MinimalGuestThreadContext()
{
if (stack != nullptr)
g_userHeap.Free(stack);
}
void ensureValid()
{
if (stack == nullptr)
{
stack = reinterpret_cast<uint8_t*>(g_userHeap.Alloc(0x4000));
ppcContext.fn = (uint8_t*)g_codeCache.bucket;
ppcContext.r1.u64 = g_memory.MapVirtual(stack + 0x4000);
SetPPCContext(ppcContext);
}
}
};
static void PipelineCompilerThread()
{
GuestThread::SetThreadName(GetCurrentThreadId(), "Pipeline Compiler Thread");
MinimalGuestThreadContext ctx;
std::unique_ptr<GuestThreadContext> ctx;
while (true)
{
PipelineStateQueueItem queueItem;
g_pipelineStateQueue.wait_dequeue(queueItem);
ctx.ensureValid();
if (ctx == nullptr)
ctx = std::make_unique<GuestThreadContext>(0);
auto pipeline = CreateGraphicsPipeline(queueItem.pipelineState);
#ifdef ASYNC_PSO_DEBUG
@ -5499,7 +5477,7 @@ static void ModelConsumerThread()
GuestThread::SetThreadName(GetCurrentThreadId(), "Model Consumer Thread");
std::vector<boost::shared_ptr<Hedgehog::Database::CDatabaseData>> localPendingDataQueue;
MinimalGuestThreadContext ctx;
std::unique_ptr<GuestThreadContext> ctx;
while (true)
{
@ -5508,7 +5486,8 @@ static void ModelConsumerThread()
while ((pendingDataCount = g_pendingDataCount.load()) == 0)
g_pendingDataCount.wait(pendingDataCount);
ctx.ensureValid();
if (ctx == nullptr)
ctx = std::make_unique<GuestThreadContext>(0);
if (g_pendingPipelineStateCache)
{

View file

@ -561,8 +561,6 @@ struct Semaphore : KernelObject, HostObject<XKSEMAPHORE>
}
};
// https://devblogs.microsoft.com/oldnewthing/20160825-00/?p=94165
void RtlLeaveCriticalSection(XRTL_CRITICAL_SECTION* cs)
{
cs->RecursionCount--;
@ -570,25 +568,29 @@ void RtlLeaveCriticalSection(XRTL_CRITICAL_SECTION* cs)
if (cs->RecursionCount != 0)
return;
InterlockedExchange(&cs->OwningThread, 0);
WakeByAddressSingle(&cs->OwningThread);
std::atomic_ref owningThread(cs->OwningThread);
owningThread.store(0);
owningThread.notify_one();
}
void RtlEnterCriticalSection(XRTL_CRITICAL_SECTION* cs)
{
DWORD thisThread = GetCurrentThreadId();
uint32_t thisThread = g_ppcContext->r13.u32;
assert(thisThread != NULL);
std::atomic_ref owningThread(cs->OwningThread);
while (true)
{
DWORD previousOwner = InterlockedCompareExchangeAcquire(&cs->OwningThread, thisThread, 0);
uint32_t previousOwner = 0;
if (previousOwner == 0 || previousOwner == thisThread)
if (owningThread.compare_exchange_weak(previousOwner, thisThread) || previousOwner == thisThread)
{
cs->RecursionCount++;
return;
}
WaitOnAddress(&cs->OwningThread, &previousOwner, sizeof(previousOwner), INFINITE);
owningThread.wait(previousOwner);
}
}
@ -1036,10 +1038,14 @@ void XexGetModuleHandle()
bool RtlTryEnterCriticalSection(XRTL_CRITICAL_SECTION* cs)
{
DWORD thisThread = GetCurrentThreadId();
DWORD previousOwner = InterlockedCompareExchangeAcquire(&cs->OwningThread, thisThread, 0);
uint32_t thisThread = g_ppcContext->r13.u32;
assert(thisThread != NULL);
if (previousOwner == 0 || previousOwner == thisThread)
std::atomic_ref owningThread(cs->OwningThread);
uint32_t previousOwner = 0;
if (owningThread.compare_exchange_weak(previousOwner, thisThread) || previousOwner == thisThread)
{
cs->RecursionCount++;
return true;