mirror of
https://github.com/wolfpld/tracy.git
synced 2025-03-20 07:40:02 +08:00
Only one callstack may be in-flight at any time.
Save for the allocated callstack, but this will be solved in another way. There's no need to keep pending callstacks in a map.
This commit is contained in:
parent
afe2fad1a7
commit
ebf09bebae
@ -1705,7 +1705,7 @@ void Worker::Exec()
|
|||||||
if( m_terminate )
|
if( m_terminate )
|
||||||
{
|
{
|
||||||
if( m_pendingStrings != 0 || m_pendingThreads != 0 || m_pendingSourceLocation != 0 || m_pendingCallstackFrames != 0 ||
|
if( m_pendingStrings != 0 || m_pendingThreads != 0 || m_pendingSourceLocation != 0 || m_pendingCallstackFrames != 0 ||
|
||||||
!m_pendingCustomStrings.empty() || m_data.plots.IsPending() || !m_pendingCallstacks.empty() || m_pendingCallstackSubframes != 0 )
|
!m_pendingCustomStrings.empty() || m_data.plots.IsPending() || m_pendingCallstackPtr != 0 || m_pendingCallstackSubframes != 0 )
|
||||||
{
|
{
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -2086,7 +2086,7 @@ uint64_t Worker::GetCanonicalPointer( const CallstackFrameId& id ) const
|
|||||||
|
|
||||||
void Worker::AddCallstackPayload( uint64_t ptr, char* _data, size_t _sz )
|
void Worker::AddCallstackPayload( uint64_t ptr, char* _data, size_t _sz )
|
||||||
{
|
{
|
||||||
assert( m_pendingCallstacks.find( ptr ) == m_pendingCallstacks.end() );
|
assert( m_pendingCallstackPtr == 0 );
|
||||||
|
|
||||||
const auto sz = _sz / sizeof( uint64_t );
|
const auto sz = _sz / sizeof( uint64_t );
|
||||||
const auto memsize = sizeof( VarArray<CallstackFrameId> ) + sz * sizeof( CallstackFrameId );
|
const auto memsize = sizeof( VarArray<CallstackFrameId> ) + sz * sizeof( CallstackFrameId );
|
||||||
@ -2127,12 +2127,13 @@ void Worker::AddCallstackPayload( uint64_t ptr, char* _data, size_t _sz )
|
|||||||
m_slab.Unalloc( memsize );
|
m_slab.Unalloc( memsize );
|
||||||
}
|
}
|
||||||
|
|
||||||
m_pendingCallstacks.emplace( ptr, idx );
|
m_pendingCallstackPtr = ptr;
|
||||||
|
m_pendingCallstackId = idx;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Worker::AddCallstackAllocPayload( uint64_t ptr, char* data, size_t _sz )
|
void Worker::AddCallstackAllocPayload( uint64_t ptr, char* data, size_t _sz )
|
||||||
{
|
{
|
||||||
assert( m_pendingCallstacks.find( ptr ) == m_pendingCallstacks.end() );
|
//assert( m_pendingCallstacks.find( ptr ) == m_pendingCallstacks.end() );
|
||||||
|
|
||||||
CallstackFrameId stack[64];
|
CallstackFrameId stack[64];
|
||||||
const auto sz = *(uint32_t*)data; data += 4;
|
const auto sz = *(uint32_t*)data; data += 4;
|
||||||
@ -2200,7 +2201,7 @@ void Worker::AddCallstackAllocPayload( uint64_t ptr, char* data, size_t _sz )
|
|||||||
m_slab.Unalloc( memsize );
|
m_slab.Unalloc( memsize );
|
||||||
}
|
}
|
||||||
|
|
||||||
m_pendingCallstacks.emplace( ptr, idx );
|
//m_pendingCallstacks.emplace( ptr, idx );
|
||||||
}
|
}
|
||||||
|
|
||||||
void Worker::InsertPlot( PlotData* plot, int64_t time, double val )
|
void Worker::InsertPlot( PlotData* plot, int64_t time, double val )
|
||||||
@ -3164,29 +3165,27 @@ void Worker::ProcessMemFreeCallstack( const QueueMemFree& ev )
|
|||||||
|
|
||||||
void Worker::ProcessCallstackMemory( const QueueCallstackMemory& ev )
|
void Worker::ProcessCallstackMemory( const QueueCallstackMemory& ev )
|
||||||
{
|
{
|
||||||
auto it = m_pendingCallstacks.find( ev.ptr );
|
assert( m_pendingCallstackPtr == ev.ptr );
|
||||||
assert( it != m_pendingCallstacks.end() );
|
m_pendingCallstackPtr = 0;
|
||||||
|
|
||||||
if( m_lastMemActionCallstack != std::numeric_limits<uint64_t>::max() )
|
if( m_lastMemActionCallstack != std::numeric_limits<uint64_t>::max() )
|
||||||
{
|
{
|
||||||
auto& mem = m_data.memory.data[m_lastMemActionCallstack];
|
auto& mem = m_data.memory.data[m_lastMemActionCallstack];
|
||||||
if( m_lastMemActionWasAlloc )
|
if( m_lastMemActionWasAlloc )
|
||||||
{
|
{
|
||||||
mem.csAlloc = it->second;
|
mem.csAlloc = m_pendingCallstackId;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
mem.csFree = it->second;
|
mem.csFree = m_pendingCallstackId;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
m_pendingCallstacks.erase( it );
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Worker::ProcessCallstack( const QueueCallstack& ev )
|
void Worker::ProcessCallstack( const QueueCallstack& ev )
|
||||||
{
|
{
|
||||||
auto it = m_pendingCallstacks.find( ev.ptr );
|
assert( m_pendingCallstackPtr == ev.ptr );
|
||||||
assert( it != m_pendingCallstacks.end() );
|
m_pendingCallstackPtr = 0;
|
||||||
|
|
||||||
auto nit = m_nextCallstack.find( ev.thread );
|
auto nit = m_nextCallstack.find( ev.thread );
|
||||||
assert( nit != m_nextCallstack.end() );
|
assert( nit != m_nextCallstack.end() );
|
||||||
@ -3195,28 +3194,24 @@ void Worker::ProcessCallstack( const QueueCallstack& ev )
|
|||||||
switch( next.type )
|
switch( next.type )
|
||||||
{
|
{
|
||||||
case NextCallstackType::Zone:
|
case NextCallstackType::Zone:
|
||||||
next.zone->callstack = it->second;
|
next.zone->callstack = m_pendingCallstackId;
|
||||||
break;
|
break;
|
||||||
case NextCallstackType::Gpu:
|
case NextCallstackType::Gpu:
|
||||||
next.gpu->callstack = it->second;
|
next.gpu->callstack = m_pendingCallstackId;
|
||||||
break;
|
break;
|
||||||
case NextCallstackType::Crash:
|
case NextCallstackType::Crash:
|
||||||
m_data.m_crashEvent.callstack = it->second;
|
m_data.m_crashEvent.callstack = m_pendingCallstackId;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
assert( false );
|
assert( false );
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
m_pendingCallstacks.erase( it );
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Worker::ProcessCallstackAlloc( const QueueCallstackAlloc& ev )
|
void Worker::ProcessCallstackAlloc( const QueueCallstackAlloc& ev )
|
||||||
{
|
{
|
||||||
auto it = m_pendingCallstacks.find( ev.nativePtr );
|
assert( m_pendingCallstackPtr == ev.nativePtr );
|
||||||
assert( it != m_pendingCallstacks.end() );
|
m_pendingCallstackPtr = 0;
|
||||||
auto itAlloc = m_pendingCallstacks.find( ev.ptr );
|
|
||||||
assert( itAlloc != m_pendingCallstacks.end() );
|
|
||||||
|
|
||||||
auto nit = m_nextCallstack.find( ev.thread );
|
auto nit = m_nextCallstack.find( ev.thread );
|
||||||
assert( nit != m_nextCallstack.end() );
|
assert( nit != m_nextCallstack.end() );
|
||||||
@ -3225,21 +3220,18 @@ void Worker::ProcessCallstackAlloc( const QueueCallstackAlloc& ev )
|
|||||||
switch( next.type )
|
switch( next.type )
|
||||||
{
|
{
|
||||||
case NextCallstackType::Zone:
|
case NextCallstackType::Zone:
|
||||||
next.zone->callstack = it->second;
|
next.zone->callstack = m_pendingCallstackId;
|
||||||
break;
|
break;
|
||||||
case NextCallstackType::Gpu:
|
case NextCallstackType::Gpu:
|
||||||
next.gpu->callstack = it->second;
|
next.gpu->callstack = m_pendingCallstackId;
|
||||||
break;
|
break;
|
||||||
case NextCallstackType::Crash:
|
case NextCallstackType::Crash:
|
||||||
m_data.m_crashEvent.callstack = it->second;
|
m_data.m_crashEvent.callstack = m_pendingCallstackId;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
assert( false );
|
assert( false );
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
m_pendingCallstacks.erase( it );
|
|
||||||
m_pendingCallstacks.erase( itAlloc );
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Worker::ProcessCallstackFrameSize( const QueueCallstackFrameSize& ev )
|
void Worker::ProcessCallstackFrameSize( const QueueCallstackFrameSize& ev )
|
||||||
|
|||||||
@ -484,7 +484,8 @@ private:
|
|||||||
|
|
||||||
GpuCtxData* m_gpuCtxMap[256];
|
GpuCtxData* m_gpuCtxMap[256];
|
||||||
flat_hash_map<uint64_t, StringLocation, nohash<uint64_t>> m_pendingCustomStrings;
|
flat_hash_map<uint64_t, StringLocation, nohash<uint64_t>> m_pendingCustomStrings;
|
||||||
flat_hash_map<uint64_t, uint32_t> m_pendingCallstacks;
|
uint64_t m_pendingCallstackPtr = 0;
|
||||||
|
uint32_t m_pendingCallstackId;
|
||||||
flat_hash_map<uint64_t, int32_t, nohash<uint64_t>> m_pendingSourceLocationPayload;
|
flat_hash_map<uint64_t, int32_t, nohash<uint64_t>> m_pendingSourceLocationPayload;
|
||||||
Vector<uint64_t> m_sourceLocationQueue;
|
Vector<uint64_t> m_sourceLocationQueue;
|
||||||
flat_hash_map<uint64_t, uint32_t, nohash<uint64_t>> m_sourceLocationShrink;
|
flat_hash_map<uint64_t, uint32_t, nohash<uint64_t>> m_sourceLocationShrink;
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user