tracy/server/TracyWorker.hpp

527 lines
21 KiB
C++
Raw Normal View History

#ifndef __TRACYWORKER_HPP__
#define __TRACYWORKER_HPP__
#include <atomic>
#include <limits>
#include <stdexcept>
#include <string>
#include <thread>
#include <vector>
#include "../common/tracy_lz4.hpp"
#include "../common/TracyForceInline.hpp"
Use the fastest mutex available. The selection is based on the following test results: MSVC: === Lock test, 6 threads === => NonRecursiveBenaphore No contention: 11.641 ns/iter 2 thread contention: 141.559 ns/iter 3 thread contention: 242.733 ns/iter 4 thread contention: 409.807 ns/iter 5 thread contention: 561.544 ns/iter 6 thread contention: 785.845 ns/iter => std::mutex No contention: 19.190 ns/iter 2 thread contention: 39.305 ns/iter 3 thread contention: 58.999 ns/iter 4 thread contention: 59.532 ns/iter 5 thread contention: 103.539 ns/iter 6 thread contention: 110.314 ns/iter => std::shared_timed_mutex No contention: 45.487 ns/iter 2 thread contention: 96.351 ns/iter 3 thread contention: 142.871 ns/iter 4 thread contention: 184.999 ns/iter 5 thread contention: 336.608 ns/iter 6 thread contention: 542.551 ns/iter => std::shared_mutex No contention: 10.861 ns/iter 2 thread contention: 17.495 ns/iter 3 thread contention: 31.126 ns/iter 4 thread contention: 40.468 ns/iter 5 thread contention: 15.677 ns/iter 6 thread contention: 64.505 ns/iter Cygwin (clang): === Lock test, 6 threads === => NonRecursiveBenaphore No contention: 11.536 ns/iter 2 thread contention: 121.082 ns/iter 3 thread contention: 396.430 ns/iter 4 thread contention: 672.555 ns/iter 5 thread contention: 1327.761 ns/iter 6 thread contention: 14151.955 ns/iter => std::mutex No contention: 62.583 ns/iter 2 thread contention: 3990.464 ns/iter 3 thread contention: 7161.189 ns/iter 4 thread contention: 9870.820 ns/iter 5 thread contention: 12355.178 ns/iter 6 thread contention: 14694.903 ns/iter => std::shared_timed_mutex No contention: 91.687 ns/iter 2 thread contention: 1115.037 ns/iter 3 thread contention: 4183.792 ns/iter 4 thread contention: 15283.491 ns/iter 5 thread contention: 27812.477 ns/iter 6 thread contention: 35028.140 ns/iter => std::shared_mutex No contention: 91.764 ns/iter 2 thread contention: 1051.826 ns/iter 3 thread contention: 5574.720 ns/iter 4 thread contention: 15721.416 ns/iter 5 thread contention: 27721.487 ns/iter 6 thread contention: 35420.404 ns/iter Linux (x64): === Lock test, 6 threads === => NonRecursiveBenaphore No contention: 13.487 ns/iter 2 thread contention: 210.317 ns/iter 3 thread contention: 430.855 ns/iter 4 thread contention: 510.533 ns/iter 5 thread contention: 1003.609 ns/iter 6 thread contention: 1787.683 ns/iter => std::mutex No contention: 12.403 ns/iter 2 thread contention: 157.122 ns/iter 3 thread contention: 186.791 ns/iter 4 thread contention: 265.073 ns/iter 5 thread contention: 283.778 ns/iter 6 thread contention: 270.687 ns/iter => std::shared_timed_mutex No contention: 21.509 ns/iter 2 thread contention: 150.179 ns/iter 3 thread contention: 256.574 ns/iter 4 thread contention: 415.351 ns/iter 5 thread contention: 611.532 ns/iter 6 thread contention: 944.695 ns/iter => std::shared_mutex No contention: 20.805 ns/iter 2 thread contention: 157.034 ns/iter 3 thread contention: 244.025 ns/iter 4 thread contention: 406.269 ns/iter 5 thread contention: 387.985 ns/iter 6 thread contention: 468.550 ns/iter Linux (arm64): === Lock test, 6 threads === => NonRecursiveBenaphore No contention: 20.891 ns/iter 2 thread contention: 211.037 ns/iter 3 thread contention: 409.962 ns/iter 4 thread contention: 657.441 ns/iter 5 thread contention: 828.405 ns/iter 6 thread contention: 1131.827 ns/iter => std::mutex No contention: 50.884 ns/iter 2 thread contention: 103.620 ns/iter 3 thread contention: 332.429 ns/iter 4 thread contention: 620.802 ns/iter 5 thread contention: 783.943 ns/iter 6 thread contention: 834.002 ns/iter => std::shared_timed_mutex No contention: 64.948 ns/iter 2 thread contention: 173.191 ns/iter 3 thread contention: 490.352 ns/iter 4 thread contention: 660.668 ns/iter 5 thread contention: 1014.546 ns/iter 6 thread contention: 1451.553 ns/iter => std::shared_mutex No contention: 64.521 ns/iter 2 thread contention: 195.222 ns/iter 3 thread contention: 490.819 ns/iter 4 thread contention: 654.786 ns/iter 5 thread contention: 955.759 ns/iter 6 thread contention: 1282.544 ns/iter
2018-07-13 22:39:01 +00:00
#include "../common/TracyMutex.hpp"
#include "../common/TracyQueue.hpp"
2019-04-01 17:17:18 +00:00
#include "../common/TracyProtocol.hpp"
#include "../common/TracySocket.hpp"
#include "tracy_flat_hash_map.hpp"
#include "TracyEvent.hpp"
#include "TracySlab.hpp"
2018-08-04 14:33:03 +00:00
#include "TracyStringDiscovery.hpp"
2018-06-19 19:15:36 +00:00
#include "TracyVarArray.hpp"
namespace tracy
{
class FileRead;
class FileWrite;
2018-04-20 14:03:09 +00:00
namespace EventType
{
enum Type : uint32_t
{
Locks = 1 << 0,
Messages = 1 << 1,
Plots = 1 << 2,
Memory = 1 << 3,
None = 0,
All = std::numeric_limits<uint32_t>::max()
};
}
struct UnsupportedVersion : public std::exception
{
UnsupportedVersion( int version ) : version( version ) {}
int version;
};
2018-07-28 15:59:17 +00:00
struct LoadProgress
{
enum Stage
{
Initialization,
Locks,
Messages,
Zones,
GpuZones,
Plots,
Memory,
CallStacks
};
2018-07-28 18:13:06 +00:00
LoadProgress() : total( 0 ), progress( 0 ), subTotal( 0 ), subProgress( 0 ) {}
std::atomic<uint64_t> total;
std::atomic<uint64_t> progress;
std::atomic<uint64_t> subTotal;
std::atomic<uint64_t> subProgress;
2018-07-28 15:59:17 +00:00
};
class Worker
{
public:
#pragma pack( 1 )
struct ZoneThreadData
{
ZoneEvent* zone;
uint16_t thread;
};
#pragma pack()
private:
struct SourceLocationZones
{
Vector<ZoneThreadData> zones;
int64_t min = std::numeric_limits<int64_t>::max();
int64_t max = std::numeric_limits<int64_t>::min();
int64_t total = 0;
double sumSq = 0;
int64_t selfMin = std::numeric_limits<int64_t>::max();
int64_t selfMax = std::numeric_limits<int64_t>::min();
int64_t selfTotal = 0;
};
struct CallstackFrameIdHash
{
2019-03-03 16:54:00 +00:00
size_t operator()( const CallstackFrameId& id ) const { return id.data; }
typedef tracy::power_of_two_hash_policy hash_policy;
};
struct CallstackFrameIdCompare
{
2019-03-03 16:54:00 +00:00
bool operator()( const CallstackFrameId& lhs, const CallstackFrameId& rhs ) const { return lhs.data == rhs.data; }
};
2019-03-03 16:34:56 +00:00
struct RevFrameHash
{
2019-03-03 16:54:00 +00:00
size_t operator()( const CallstackFrameData* data ) const
2019-03-03 16:34:56 +00:00
{
size_t hash = data->size;
for( uint8_t i=0; i<data->size; i++ )
{
const auto& v = data->data[i];
hash = ( ( hash << 5 ) + hash ) ^ size_t( v.line );
hash = ( ( hash << 5 ) + hash ) ^ size_t( v.file.__data );
hash = ( ( hash << 5 ) + hash ) ^ size_t( v.name.__data );
}
return hash;
}
typedef tracy::power_of_two_hash_policy hash_policy;
};
struct RevFrameComp
{
2019-03-03 16:54:00 +00:00
bool operator()( const CallstackFrameData* lhs, const CallstackFrameData* rhs ) const
2019-03-03 16:34:56 +00:00
{
if( lhs->size != rhs->size ) return false;
for( uint8_t i=0; i<lhs->size; i++ )
{
if( memcmp( lhs->data + i, rhs->data + i, sizeof( CallstackFrame ) ) != 0 ) return false;
}
return true;
}
};
struct DataBlock
{
2018-07-10 20:39:41 +00:00
DataBlock() : zonesCnt( 0 ), lastTime( 0 ), frameOffset( 0 ), threadLast( std::numeric_limits<uint64_t>::max(), 0 ) {}
Use the fastest mutex available. The selection is based on the following test results: MSVC: === Lock test, 6 threads === => NonRecursiveBenaphore No contention: 11.641 ns/iter 2 thread contention: 141.559 ns/iter 3 thread contention: 242.733 ns/iter 4 thread contention: 409.807 ns/iter 5 thread contention: 561.544 ns/iter 6 thread contention: 785.845 ns/iter => std::mutex No contention: 19.190 ns/iter 2 thread contention: 39.305 ns/iter 3 thread contention: 58.999 ns/iter 4 thread contention: 59.532 ns/iter 5 thread contention: 103.539 ns/iter 6 thread contention: 110.314 ns/iter => std::shared_timed_mutex No contention: 45.487 ns/iter 2 thread contention: 96.351 ns/iter 3 thread contention: 142.871 ns/iter 4 thread contention: 184.999 ns/iter 5 thread contention: 336.608 ns/iter 6 thread contention: 542.551 ns/iter => std::shared_mutex No contention: 10.861 ns/iter 2 thread contention: 17.495 ns/iter 3 thread contention: 31.126 ns/iter 4 thread contention: 40.468 ns/iter 5 thread contention: 15.677 ns/iter 6 thread contention: 64.505 ns/iter Cygwin (clang): === Lock test, 6 threads === => NonRecursiveBenaphore No contention: 11.536 ns/iter 2 thread contention: 121.082 ns/iter 3 thread contention: 396.430 ns/iter 4 thread contention: 672.555 ns/iter 5 thread contention: 1327.761 ns/iter 6 thread contention: 14151.955 ns/iter => std::mutex No contention: 62.583 ns/iter 2 thread contention: 3990.464 ns/iter 3 thread contention: 7161.189 ns/iter 4 thread contention: 9870.820 ns/iter 5 thread contention: 12355.178 ns/iter 6 thread contention: 14694.903 ns/iter => std::shared_timed_mutex No contention: 91.687 ns/iter 2 thread contention: 1115.037 ns/iter 3 thread contention: 4183.792 ns/iter 4 thread contention: 15283.491 ns/iter 5 thread contention: 27812.477 ns/iter 6 thread contention: 35028.140 ns/iter => std::shared_mutex No contention: 91.764 ns/iter 2 thread contention: 1051.826 ns/iter 3 thread contention: 5574.720 ns/iter 4 thread contention: 15721.416 ns/iter 5 thread contention: 27721.487 ns/iter 6 thread contention: 35420.404 ns/iter Linux (x64): === Lock test, 6 threads === => NonRecursiveBenaphore No contention: 13.487 ns/iter 2 thread contention: 210.317 ns/iter 3 thread contention: 430.855 ns/iter 4 thread contention: 510.533 ns/iter 5 thread contention: 1003.609 ns/iter 6 thread contention: 1787.683 ns/iter => std::mutex No contention: 12.403 ns/iter 2 thread contention: 157.122 ns/iter 3 thread contention: 186.791 ns/iter 4 thread contention: 265.073 ns/iter 5 thread contention: 283.778 ns/iter 6 thread contention: 270.687 ns/iter => std::shared_timed_mutex No contention: 21.509 ns/iter 2 thread contention: 150.179 ns/iter 3 thread contention: 256.574 ns/iter 4 thread contention: 415.351 ns/iter 5 thread contention: 611.532 ns/iter 6 thread contention: 944.695 ns/iter => std::shared_mutex No contention: 20.805 ns/iter 2 thread contention: 157.034 ns/iter 3 thread contention: 244.025 ns/iter 4 thread contention: 406.269 ns/iter 5 thread contention: 387.985 ns/iter 6 thread contention: 468.550 ns/iter Linux (arm64): === Lock test, 6 threads === => NonRecursiveBenaphore No contention: 20.891 ns/iter 2 thread contention: 211.037 ns/iter 3 thread contention: 409.962 ns/iter 4 thread contention: 657.441 ns/iter 5 thread contention: 828.405 ns/iter 6 thread contention: 1131.827 ns/iter => std::mutex No contention: 50.884 ns/iter 2 thread contention: 103.620 ns/iter 3 thread contention: 332.429 ns/iter 4 thread contention: 620.802 ns/iter 5 thread contention: 783.943 ns/iter 6 thread contention: 834.002 ns/iter => std::shared_timed_mutex No contention: 64.948 ns/iter 2 thread contention: 173.191 ns/iter 3 thread contention: 490.352 ns/iter 4 thread contention: 660.668 ns/iter 5 thread contention: 1014.546 ns/iter 6 thread contention: 1451.553 ns/iter => std::shared_mutex No contention: 64.521 ns/iter 2 thread contention: 195.222 ns/iter 3 thread contention: 490.819 ns/iter 4 thread contention: 654.786 ns/iter 5 thread contention: 955.759 ns/iter 6 thread contention: 1282.544 ns/iter
2018-07-13 22:39:01 +00:00
TracyMutex lock;
2018-08-04 17:47:09 +00:00
StringDiscovery<FrameData*> frames;
FrameData* framesBase;
Vector<GpuCtxData*> gpuData;
Vector<MessageData*> messages;
2018-08-04 14:33:03 +00:00
StringDiscovery<PlotData*> plots;
Vector<ThreadData*> threads;
2018-04-01 00:03:34 +00:00
MemData memory;
uint64_t zonesCnt;
int64_t lastTime;
2018-07-10 20:39:41 +00:00
uint64_t frameOffset;
flat_hash_map<uint64_t, const char*, nohash<uint64_t>> strings;
Vector<const char*> stringData;
2019-02-12 19:23:14 +00:00
flat_hash_map<charutil::StringKey, uint32_t, charutil::StringKey::HasherPOT, charutil::StringKey::Comparator> stringMap;
flat_hash_map<uint64_t, const char*, nohash<uint64_t>> threadNames;
flat_hash_map<uint64_t, SourceLocation, nohash<uint64_t>> sourceLocation;
Vector<SourceLocation*> sourceLocationPayload;
flat_hash_map<SourceLocation*, uint32_t, SourceLocationHasher, SourceLocationComparator> sourceLocationPayloadMap;
Vector<uint64_t> sourceLocationExpand;
#ifndef TRACY_NO_STATISTICS
flat_hash_map<int32_t, SourceLocationZones, nohash<int32_t>> sourceLocationZones;
bool sourceLocationZonesReady;
2018-07-29 12:16:13 +00:00
#else
flat_hash_map<int32_t, uint64_t> sourceLocationZonesCnt;
#endif
flat_hash_map<VarArray<CallstackFrameId>*, uint32_t, VarArrayHasherPOT<CallstackFrameId>, VarArrayComparator<CallstackFrameId>> callstackMap;
Vector<VarArray<CallstackFrameId>*> callstackPayload;
flat_hash_map<CallstackFrameId, CallstackFrameData*, CallstackFrameIdHash, CallstackFrameIdCompare> callstackFrameMap;
2019-03-03 16:34:56 +00:00
flat_hash_map<CallstackFrameData*, CallstackFrameId, RevFrameHash, RevFrameComp> revFrameMap;
2018-06-19 19:15:36 +00:00
flat_hash_map<uint32_t, LockMap*, nohash<uint32_t>> lockMap;
2018-03-18 19:45:22 +00:00
flat_hash_map<uint64_t, uint16_t, nohash<uint64_t>> threadMap;
Vector<uint64_t> threadExpand;
std::pair<uint64_t, uint16_t> threadLast;
Store children vectors in a separate data collection. This reduces per-zone memory cost by 9 bytes if there are no children and increases it by 4 bytes, if there are children. This is universally a better solution, as the following data shows: +++ /home/wolf/desktop/tracy-old/android.tracy +++ Vectors: 2794480 Size 0: 2373070 (84.92%) Size 1: 70237 (2.51%) Size 2+: 351173 (12.57%) +++ /home/wolf/desktop/tracy-old/asset-new.tracy +++ Vectors: 1799227 Size 0: 1482691 (82.41%) Size 1: 93272 (5.18%) Size 2+: 223264 (12.41%) +++ /home/wolf/desktop/tracy-old/asset-new-id.tracy +++ Vectors: 1977996 Size 0: 1640817 (82.95%) Size 1: 97198 (4.91%) Size 2+: 239981 (12.13%) +++ /home/wolf/desktop/tracy-old/asset-old.tracy +++ Vectors: 1782395 Size 0: 1471437 (82.55%) Size 1: 88813 (4.98%) Size 2+: 222145 (12.46%) +++ /home/wolf/desktop/tracy-old/big.tracy +++ Vectors: 180794047 Size 0: 172696094 (95.52%) Size 1: 2799772 (1.55%) Size 2+: 5298181 (2.93%) +++ /home/wolf/desktop/tracy-old/darkrl.tracy +++ Vectors: 12014129 Size 0: 11611324 (96.65%) Size 1: 134980 (1.12%) Size 2+: 267825 (2.23%) +++ /home/wolf/desktop/tracy-old/mem.tracy +++ Vectors: 383097 Size 0: 321932 (84.03%) Size 1: 854 (0.22%) Size 2+: 60311 (15.74%) +++ /home/wolf/desktop/tracy-old/new.tracy +++ Vectors: 77536 Size 0: 63035 (81.30%) Size 1: 8886 (11.46%) Size 2+: 5615 (7.24%) +++ /home/wolf/desktop/tracy-old/selfprofile.tracy +++ Vectors: 22940871 Size 0: 22704868 (98.97%) Size 1: 73000 (0.32%) Size 2+: 163003 (0.71%) +++ /home/wolf/desktop/tracy-old/tbrowser.tracy +++ Vectors: 962682 Size 0: 695380 (72.23%) Size 1: 43007 (4.47%) Size 2+: 224295 (23.30%) +++ /home/wolf/desktop/tracy-old/virtualfile_hc.tracy +++ Vectors: 529170 Size 0: 449386 (84.92%) Size 1: 15694 (2.97%) Size 2+: 64090 (12.11%) +++ /home/wolf/desktop/tracy-old/zfile_hc.tracy +++ Vectors: 264849 Size 0: 220589 (83.29%) Size 1: 9386 (3.54%) Size 2+: 34874 (13.17%)
2018-07-22 14:05:50 +00:00
2019-03-26 20:41:44 +00:00
Vector<Vector<ZoneEvent*>> zoneChildren;
Vector<Vector<GpuEvent*>> gpuChildren;
2018-08-20 00:07:31 +00:00
Vector<Vector<ZoneEvent*>> zoneVectorCache;
2019-03-26 20:41:44 +00:00
CrashEvent crashEvent;
};
struct MbpsBlock
{
2019-04-01 17:55:37 +00:00
MbpsBlock() : mbps( 64 ), compRatio( 1.0 ), queue( 0 ) {}
Use the fastest mutex available. The selection is based on the following test results: MSVC: === Lock test, 6 threads === => NonRecursiveBenaphore No contention: 11.641 ns/iter 2 thread contention: 141.559 ns/iter 3 thread contention: 242.733 ns/iter 4 thread contention: 409.807 ns/iter 5 thread contention: 561.544 ns/iter 6 thread contention: 785.845 ns/iter => std::mutex No contention: 19.190 ns/iter 2 thread contention: 39.305 ns/iter 3 thread contention: 58.999 ns/iter 4 thread contention: 59.532 ns/iter 5 thread contention: 103.539 ns/iter 6 thread contention: 110.314 ns/iter => std::shared_timed_mutex No contention: 45.487 ns/iter 2 thread contention: 96.351 ns/iter 3 thread contention: 142.871 ns/iter 4 thread contention: 184.999 ns/iter 5 thread contention: 336.608 ns/iter 6 thread contention: 542.551 ns/iter => std::shared_mutex No contention: 10.861 ns/iter 2 thread contention: 17.495 ns/iter 3 thread contention: 31.126 ns/iter 4 thread contention: 40.468 ns/iter 5 thread contention: 15.677 ns/iter 6 thread contention: 64.505 ns/iter Cygwin (clang): === Lock test, 6 threads === => NonRecursiveBenaphore No contention: 11.536 ns/iter 2 thread contention: 121.082 ns/iter 3 thread contention: 396.430 ns/iter 4 thread contention: 672.555 ns/iter 5 thread contention: 1327.761 ns/iter 6 thread contention: 14151.955 ns/iter => std::mutex No contention: 62.583 ns/iter 2 thread contention: 3990.464 ns/iter 3 thread contention: 7161.189 ns/iter 4 thread contention: 9870.820 ns/iter 5 thread contention: 12355.178 ns/iter 6 thread contention: 14694.903 ns/iter => std::shared_timed_mutex No contention: 91.687 ns/iter 2 thread contention: 1115.037 ns/iter 3 thread contention: 4183.792 ns/iter 4 thread contention: 15283.491 ns/iter 5 thread contention: 27812.477 ns/iter 6 thread contention: 35028.140 ns/iter => std::shared_mutex No contention: 91.764 ns/iter 2 thread contention: 1051.826 ns/iter 3 thread contention: 5574.720 ns/iter 4 thread contention: 15721.416 ns/iter 5 thread contention: 27721.487 ns/iter 6 thread contention: 35420.404 ns/iter Linux (x64): === Lock test, 6 threads === => NonRecursiveBenaphore No contention: 13.487 ns/iter 2 thread contention: 210.317 ns/iter 3 thread contention: 430.855 ns/iter 4 thread contention: 510.533 ns/iter 5 thread contention: 1003.609 ns/iter 6 thread contention: 1787.683 ns/iter => std::mutex No contention: 12.403 ns/iter 2 thread contention: 157.122 ns/iter 3 thread contention: 186.791 ns/iter 4 thread contention: 265.073 ns/iter 5 thread contention: 283.778 ns/iter 6 thread contention: 270.687 ns/iter => std::shared_timed_mutex No contention: 21.509 ns/iter 2 thread contention: 150.179 ns/iter 3 thread contention: 256.574 ns/iter 4 thread contention: 415.351 ns/iter 5 thread contention: 611.532 ns/iter 6 thread contention: 944.695 ns/iter => std::shared_mutex No contention: 20.805 ns/iter 2 thread contention: 157.034 ns/iter 3 thread contention: 244.025 ns/iter 4 thread contention: 406.269 ns/iter 5 thread contention: 387.985 ns/iter 6 thread contention: 468.550 ns/iter Linux (arm64): === Lock test, 6 threads === => NonRecursiveBenaphore No contention: 20.891 ns/iter 2 thread contention: 211.037 ns/iter 3 thread contention: 409.962 ns/iter 4 thread contention: 657.441 ns/iter 5 thread contention: 828.405 ns/iter 6 thread contention: 1131.827 ns/iter => std::mutex No contention: 50.884 ns/iter 2 thread contention: 103.620 ns/iter 3 thread contention: 332.429 ns/iter 4 thread contention: 620.802 ns/iter 5 thread contention: 783.943 ns/iter 6 thread contention: 834.002 ns/iter => std::shared_timed_mutex No contention: 64.948 ns/iter 2 thread contention: 173.191 ns/iter 3 thread contention: 490.352 ns/iter 4 thread contention: 660.668 ns/iter 5 thread contention: 1014.546 ns/iter 6 thread contention: 1451.553 ns/iter => std::shared_mutex No contention: 64.521 ns/iter 2 thread contention: 195.222 ns/iter 3 thread contention: 490.819 ns/iter 4 thread contention: 654.786 ns/iter 5 thread contention: 955.759 ns/iter 6 thread contention: 1282.544 ns/iter
2018-07-13 22:39:01 +00:00
TracyMutex lock;
std::vector<float> mbps;
float compRatio;
2019-04-01 17:55:37 +00:00
size_t queue;
};
enum class NextCallstackType
{
Zone,
2018-08-20 00:07:31 +00:00
Gpu,
Crash
};
struct NextCallstack
{
NextCallstackType type;
union
{
ZoneEvent* zone;
GpuEvent* gpu;
};
};
2019-01-14 22:22:31 +00:00
struct FailureData
{
uint64_t thread;
int32_t srcloc;
};
public:
2019-01-14 22:22:31 +00:00
enum class Failure
{
None,
ZoneStack,
2019-01-15 23:45:48 +00:00
ZoneEnd,
ZoneText,
ZoneName,
MemFree,
FrameEnd,
NUM_FAILURES
2019-01-14 22:22:31 +00:00
};
Worker( const char* addr );
2018-04-20 14:03:09 +00:00
Worker( FileRead& f, EventType::Type eventMask = EventType::All );
~Worker();
const std::string& GetAddr() const { return m_addr; }
const std::string& GetCaptureName() const { return m_captureName; }
const std::string& GetCaptureProgram() const { return m_captureProgram; }
uint64_t GetCaptureTime() const { return m_captureTime; }
2018-08-19 16:24:43 +00:00
const std::string& GetHostInfo() const { return m_hostInfo; }
int64_t GetDelay() const { return m_delay; }
int64_t GetResolution() const { return m_resolution; }
Use the fastest mutex available. The selection is based on the following test results: MSVC: === Lock test, 6 threads === => NonRecursiveBenaphore No contention: 11.641 ns/iter 2 thread contention: 141.559 ns/iter 3 thread contention: 242.733 ns/iter 4 thread contention: 409.807 ns/iter 5 thread contention: 561.544 ns/iter 6 thread contention: 785.845 ns/iter => std::mutex No contention: 19.190 ns/iter 2 thread contention: 39.305 ns/iter 3 thread contention: 58.999 ns/iter 4 thread contention: 59.532 ns/iter 5 thread contention: 103.539 ns/iter 6 thread contention: 110.314 ns/iter => std::shared_timed_mutex No contention: 45.487 ns/iter 2 thread contention: 96.351 ns/iter 3 thread contention: 142.871 ns/iter 4 thread contention: 184.999 ns/iter 5 thread contention: 336.608 ns/iter 6 thread contention: 542.551 ns/iter => std::shared_mutex No contention: 10.861 ns/iter 2 thread contention: 17.495 ns/iter 3 thread contention: 31.126 ns/iter 4 thread contention: 40.468 ns/iter 5 thread contention: 15.677 ns/iter 6 thread contention: 64.505 ns/iter Cygwin (clang): === Lock test, 6 threads === => NonRecursiveBenaphore No contention: 11.536 ns/iter 2 thread contention: 121.082 ns/iter 3 thread contention: 396.430 ns/iter 4 thread contention: 672.555 ns/iter 5 thread contention: 1327.761 ns/iter 6 thread contention: 14151.955 ns/iter => std::mutex No contention: 62.583 ns/iter 2 thread contention: 3990.464 ns/iter 3 thread contention: 7161.189 ns/iter 4 thread contention: 9870.820 ns/iter 5 thread contention: 12355.178 ns/iter 6 thread contention: 14694.903 ns/iter => std::shared_timed_mutex No contention: 91.687 ns/iter 2 thread contention: 1115.037 ns/iter 3 thread contention: 4183.792 ns/iter 4 thread contention: 15283.491 ns/iter 5 thread contention: 27812.477 ns/iter 6 thread contention: 35028.140 ns/iter => std::shared_mutex No contention: 91.764 ns/iter 2 thread contention: 1051.826 ns/iter 3 thread contention: 5574.720 ns/iter 4 thread contention: 15721.416 ns/iter 5 thread contention: 27721.487 ns/iter 6 thread contention: 35420.404 ns/iter Linux (x64): === Lock test, 6 threads === => NonRecursiveBenaphore No contention: 13.487 ns/iter 2 thread contention: 210.317 ns/iter 3 thread contention: 430.855 ns/iter 4 thread contention: 510.533 ns/iter 5 thread contention: 1003.609 ns/iter 6 thread contention: 1787.683 ns/iter => std::mutex No contention: 12.403 ns/iter 2 thread contention: 157.122 ns/iter 3 thread contention: 186.791 ns/iter 4 thread contention: 265.073 ns/iter 5 thread contention: 283.778 ns/iter 6 thread contention: 270.687 ns/iter => std::shared_timed_mutex No contention: 21.509 ns/iter 2 thread contention: 150.179 ns/iter 3 thread contention: 256.574 ns/iter 4 thread contention: 415.351 ns/iter 5 thread contention: 611.532 ns/iter 6 thread contention: 944.695 ns/iter => std::shared_mutex No contention: 20.805 ns/iter 2 thread contention: 157.034 ns/iter 3 thread contention: 244.025 ns/iter 4 thread contention: 406.269 ns/iter 5 thread contention: 387.985 ns/iter 6 thread contention: 468.550 ns/iter Linux (arm64): === Lock test, 6 threads === => NonRecursiveBenaphore No contention: 20.891 ns/iter 2 thread contention: 211.037 ns/iter 3 thread contention: 409.962 ns/iter 4 thread contention: 657.441 ns/iter 5 thread contention: 828.405 ns/iter 6 thread contention: 1131.827 ns/iter => std::mutex No contention: 50.884 ns/iter 2 thread contention: 103.620 ns/iter 3 thread contention: 332.429 ns/iter 4 thread contention: 620.802 ns/iter 5 thread contention: 783.943 ns/iter 6 thread contention: 834.002 ns/iter => std::shared_timed_mutex No contention: 64.948 ns/iter 2 thread contention: 173.191 ns/iter 3 thread contention: 490.352 ns/iter 4 thread contention: 660.668 ns/iter 5 thread contention: 1014.546 ns/iter 6 thread contention: 1451.553 ns/iter => std::shared_mutex No contention: 64.521 ns/iter 2 thread contention: 195.222 ns/iter 3 thread contention: 490.819 ns/iter 4 thread contention: 654.786 ns/iter 5 thread contention: 955.759 ns/iter 6 thread contention: 1282.544 ns/iter
2018-07-13 22:39:01 +00:00
TracyMutex& GetDataLock() { return m_data.lock; }
2018-08-04 17:47:09 +00:00
size_t GetFrameCount( const FrameData& fd ) const { return fd.frames.size(); }
2018-09-01 10:38:12 +00:00
size_t GetFullFrameCount( const FrameData& fd ) const;
int64_t GetTimeBegin() const { return GetFrameBegin( *m_data.framesBase, 0 ); }
int64_t GetLastTime() const { return m_data.lastTime; }
uint64_t GetZoneCount() const { return m_data.zonesCnt; }
2018-08-08 17:21:53 +00:00
uint64_t GetLockCount() const;
uint64_t GetPlotCount() const;
2018-08-14 14:36:25 +00:00
uint64_t GetSrcLocCount() const { return m_data.sourceLocationPayload.size() + m_data.sourceLocation.size(); }
uint64_t GetCallstackPayloadCount() const { return m_data.callstackPayload.size() - 1; }
uint64_t GetCallstackFrameCount() const { return m_data.callstackFrameMap.size(); }
2018-07-10 20:39:41 +00:00
uint64_t GetFrameOffset() const { return m_data.frameOffset; }
2018-08-04 17:47:09 +00:00
const FrameData* GetFramesBase() const { return m_data.framesBase; }
2018-08-04 18:58:49 +00:00
const Vector<FrameData*>& GetFrames() const { return m_data.frames.Data(); }
2018-08-04 17:47:09 +00:00
int64_t GetFrameTime( const FrameData& fd, size_t idx ) const;
int64_t GetFrameBegin( const FrameData& fd, size_t idx ) const;
int64_t GetFrameEnd( const FrameData& fd, size_t idx ) const;
std::pair <int, int> GetFrameRange( const FrameData& fd, int64_t from, int64_t to );
const flat_hash_map<uint32_t, LockMap*, nohash<uint32_t>>& GetLockMap() const { return m_data.lockMap; }
const Vector<MessageData*>& GetMessages() const { return m_data.messages; }
const Vector<GpuCtxData*>& GetGpuData() const { return m_data.gpuData; }
2018-08-04 14:33:03 +00:00
const Vector<PlotData*>& GetPlots() const { return m_data.plots.Data(); }
const Vector<ThreadData*>& GetThreadData() const { return m_data.threads; }
2018-04-01 18:25:09 +00:00
const MemData& GetMemData() const { return m_data.memory; }
2018-06-19 23:18:59 +00:00
const VarArray<CallstackFrameId>& GetCallstack( uint32_t idx ) const { return *m_data.callstackPayload[idx]; }
const CallstackFrameData* GetCallstackFrame( const CallstackFrameId& ptr ) const;
uint64_t GetCanonicalPointer( const CallstackFrameId& id ) const;
2019-03-26 20:41:44 +00:00
const CrashEvent& GetCrashEvent() const { return m_data.crashEvent; }
2018-03-18 01:53:00 +00:00
// Some zones may have incomplete timing data (only start time is available, end hasn't arrived yet).
// GetZoneEnd() will try to infer the end time by looking at child zones (parent zone can't end
// before its children have ended).
// GetZoneEndDirect() will only return zone's direct timing data, without looking at children.
Store children vectors in a separate data collection. This reduces per-zone memory cost by 9 bytes if there are no children and increases it by 4 bytes, if there are children. This is universally a better solution, as the following data shows: +++ /home/wolf/desktop/tracy-old/android.tracy +++ Vectors: 2794480 Size 0: 2373070 (84.92%) Size 1: 70237 (2.51%) Size 2+: 351173 (12.57%) +++ /home/wolf/desktop/tracy-old/asset-new.tracy +++ Vectors: 1799227 Size 0: 1482691 (82.41%) Size 1: 93272 (5.18%) Size 2+: 223264 (12.41%) +++ /home/wolf/desktop/tracy-old/asset-new-id.tracy +++ Vectors: 1977996 Size 0: 1640817 (82.95%) Size 1: 97198 (4.91%) Size 2+: 239981 (12.13%) +++ /home/wolf/desktop/tracy-old/asset-old.tracy +++ Vectors: 1782395 Size 0: 1471437 (82.55%) Size 1: 88813 (4.98%) Size 2+: 222145 (12.46%) +++ /home/wolf/desktop/tracy-old/big.tracy +++ Vectors: 180794047 Size 0: 172696094 (95.52%) Size 1: 2799772 (1.55%) Size 2+: 5298181 (2.93%) +++ /home/wolf/desktop/tracy-old/darkrl.tracy +++ Vectors: 12014129 Size 0: 11611324 (96.65%) Size 1: 134980 (1.12%) Size 2+: 267825 (2.23%) +++ /home/wolf/desktop/tracy-old/mem.tracy +++ Vectors: 383097 Size 0: 321932 (84.03%) Size 1: 854 (0.22%) Size 2+: 60311 (15.74%) +++ /home/wolf/desktop/tracy-old/new.tracy +++ Vectors: 77536 Size 0: 63035 (81.30%) Size 1: 8886 (11.46%) Size 2+: 5615 (7.24%) +++ /home/wolf/desktop/tracy-old/selfprofile.tracy +++ Vectors: 22940871 Size 0: 22704868 (98.97%) Size 1: 73000 (0.32%) Size 2+: 163003 (0.71%) +++ /home/wolf/desktop/tracy-old/tbrowser.tracy +++ Vectors: 962682 Size 0: 695380 (72.23%) Size 1: 43007 (4.47%) Size 2+: 224295 (23.30%) +++ /home/wolf/desktop/tracy-old/virtualfile_hc.tracy +++ Vectors: 529170 Size 0: 449386 (84.92%) Size 1: 15694 (2.97%) Size 2+: 64090 (12.11%) +++ /home/wolf/desktop/tracy-old/zfile_hc.tracy +++ Vectors: 264849 Size 0: 220589 (83.29%) Size 1: 9386 (3.54%) Size 2+: 34874 (13.17%)
2018-07-22 14:05:50 +00:00
int64_t GetZoneEnd( const ZoneEvent& ev );
int64_t GetZoneEnd( const GpuEvent& ev );
static tracy_force_inline int64_t GetZoneEndDirect( const ZoneEvent& ev ) { return ev.end >= 0 ? ev.end : ev.start; }
static tracy_force_inline int64_t GetZoneEndDirect( const GpuEvent& ev ) { return ev.gpuEnd >= 0 ? ev.gpuEnd : ev.gpuStart; }
2018-03-18 01:53:00 +00:00
const char* GetString( uint64_t ptr ) const;
const char* GetString( const StringRef& ref ) const;
const char* GetString( const StringIdx& idx ) const;
const char* GetThreadString( uint64_t id ) const;
const SourceLocation& GetSourceLocation( int32_t srcloc ) const;
const char* GetZoneName( const SourceLocation& srcloc ) const;
const char* GetZoneName( const ZoneEvent& ev ) const;
const char* GetZoneName( const ZoneEvent& ev, const SourceLocation& srcloc ) const;
const char* GetZoneName( const GpuEvent& ev ) const;
const char* GetZoneName( const GpuEvent& ev, const SourceLocation& srcloc ) const;
2019-03-26 20:41:44 +00:00
tracy_force_inline const Vector<ZoneEvent*>& GetZoneChildren( int32_t idx ) const { return m_data.zoneChildren[idx]; }
tracy_force_inline const Vector<GpuEvent*>& GetGpuChildren( int32_t idx ) const { return m_data.gpuChildren[idx]; }
Store children vectors in a separate data collection. This reduces per-zone memory cost by 9 bytes if there are no children and increases it by 4 bytes, if there are children. This is universally a better solution, as the following data shows: +++ /home/wolf/desktop/tracy-old/android.tracy +++ Vectors: 2794480 Size 0: 2373070 (84.92%) Size 1: 70237 (2.51%) Size 2+: 351173 (12.57%) +++ /home/wolf/desktop/tracy-old/asset-new.tracy +++ Vectors: 1799227 Size 0: 1482691 (82.41%) Size 1: 93272 (5.18%) Size 2+: 223264 (12.41%) +++ /home/wolf/desktop/tracy-old/asset-new-id.tracy +++ Vectors: 1977996 Size 0: 1640817 (82.95%) Size 1: 97198 (4.91%) Size 2+: 239981 (12.13%) +++ /home/wolf/desktop/tracy-old/asset-old.tracy +++ Vectors: 1782395 Size 0: 1471437 (82.55%) Size 1: 88813 (4.98%) Size 2+: 222145 (12.46%) +++ /home/wolf/desktop/tracy-old/big.tracy +++ Vectors: 180794047 Size 0: 172696094 (95.52%) Size 1: 2799772 (1.55%) Size 2+: 5298181 (2.93%) +++ /home/wolf/desktop/tracy-old/darkrl.tracy +++ Vectors: 12014129 Size 0: 11611324 (96.65%) Size 1: 134980 (1.12%) Size 2+: 267825 (2.23%) +++ /home/wolf/desktop/tracy-old/mem.tracy +++ Vectors: 383097 Size 0: 321932 (84.03%) Size 1: 854 (0.22%) Size 2+: 60311 (15.74%) +++ /home/wolf/desktop/tracy-old/new.tracy +++ Vectors: 77536 Size 0: 63035 (81.30%) Size 1: 8886 (11.46%) Size 2+: 5615 (7.24%) +++ /home/wolf/desktop/tracy-old/selfprofile.tracy +++ Vectors: 22940871 Size 0: 22704868 (98.97%) Size 1: 73000 (0.32%) Size 2+: 163003 (0.71%) +++ /home/wolf/desktop/tracy-old/tbrowser.tracy +++ Vectors: 962682 Size 0: 695380 (72.23%) Size 1: 43007 (4.47%) Size 2+: 224295 (23.30%) +++ /home/wolf/desktop/tracy-old/virtualfile_hc.tracy +++ Vectors: 529170 Size 0: 449386 (84.92%) Size 1: 15694 (2.97%) Size 2+: 64090 (12.11%) +++ /home/wolf/desktop/tracy-old/zfile_hc.tracy +++ Vectors: 264849 Size 0: 220589 (83.29%) Size 1: 9386 (3.54%) Size 2+: 34874 (13.17%)
2018-07-22 14:05:50 +00:00
std::vector<int32_t> GetMatchingSourceLocation( const char* query, bool ignoreCase ) const;
#ifndef TRACY_NO_STATISTICS
2018-03-18 19:20:24 +00:00
const SourceLocationZones& GetZonesForSourceLocation( int32_t srcloc ) const;
const flat_hash_map<int32_t, SourceLocationZones, nohash<int32_t>>& GetSourceLocationZones() const { return m_data.sourceLocationZones; }
bool AreSourceLocationZonesReady() const { return m_data.sourceLocationZonesReady; }
#endif
2018-05-03 16:43:51 +00:00
tracy_force_inline uint16_t CompressThread( uint64_t thread )
{
if( m_data.threadLast.first == thread ) return m_data.threadLast.second;
return CompressThreadReal( thread );
}
tracy_force_inline uint64_t DecompressThread( uint16_t thread ) const { assert( thread < m_data.threadExpand.size() ); return m_data.threadExpand[thread]; }
2018-03-18 19:45:22 +00:00
Use the fastest mutex available. The selection is based on the following test results: MSVC: === Lock test, 6 threads === => NonRecursiveBenaphore No contention: 11.641 ns/iter 2 thread contention: 141.559 ns/iter 3 thread contention: 242.733 ns/iter 4 thread contention: 409.807 ns/iter 5 thread contention: 561.544 ns/iter 6 thread contention: 785.845 ns/iter => std::mutex No contention: 19.190 ns/iter 2 thread contention: 39.305 ns/iter 3 thread contention: 58.999 ns/iter 4 thread contention: 59.532 ns/iter 5 thread contention: 103.539 ns/iter 6 thread contention: 110.314 ns/iter => std::shared_timed_mutex No contention: 45.487 ns/iter 2 thread contention: 96.351 ns/iter 3 thread contention: 142.871 ns/iter 4 thread contention: 184.999 ns/iter 5 thread contention: 336.608 ns/iter 6 thread contention: 542.551 ns/iter => std::shared_mutex No contention: 10.861 ns/iter 2 thread contention: 17.495 ns/iter 3 thread contention: 31.126 ns/iter 4 thread contention: 40.468 ns/iter 5 thread contention: 15.677 ns/iter 6 thread contention: 64.505 ns/iter Cygwin (clang): === Lock test, 6 threads === => NonRecursiveBenaphore No contention: 11.536 ns/iter 2 thread contention: 121.082 ns/iter 3 thread contention: 396.430 ns/iter 4 thread contention: 672.555 ns/iter 5 thread contention: 1327.761 ns/iter 6 thread contention: 14151.955 ns/iter => std::mutex No contention: 62.583 ns/iter 2 thread contention: 3990.464 ns/iter 3 thread contention: 7161.189 ns/iter 4 thread contention: 9870.820 ns/iter 5 thread contention: 12355.178 ns/iter 6 thread contention: 14694.903 ns/iter => std::shared_timed_mutex No contention: 91.687 ns/iter 2 thread contention: 1115.037 ns/iter 3 thread contention: 4183.792 ns/iter 4 thread contention: 15283.491 ns/iter 5 thread contention: 27812.477 ns/iter 6 thread contention: 35028.140 ns/iter => std::shared_mutex No contention: 91.764 ns/iter 2 thread contention: 1051.826 ns/iter 3 thread contention: 5574.720 ns/iter 4 thread contention: 15721.416 ns/iter 5 thread contention: 27721.487 ns/iter 6 thread contention: 35420.404 ns/iter Linux (x64): === Lock test, 6 threads === => NonRecursiveBenaphore No contention: 13.487 ns/iter 2 thread contention: 210.317 ns/iter 3 thread contention: 430.855 ns/iter 4 thread contention: 510.533 ns/iter 5 thread contention: 1003.609 ns/iter 6 thread contention: 1787.683 ns/iter => std::mutex No contention: 12.403 ns/iter 2 thread contention: 157.122 ns/iter 3 thread contention: 186.791 ns/iter 4 thread contention: 265.073 ns/iter 5 thread contention: 283.778 ns/iter 6 thread contention: 270.687 ns/iter => std::shared_timed_mutex No contention: 21.509 ns/iter 2 thread contention: 150.179 ns/iter 3 thread contention: 256.574 ns/iter 4 thread contention: 415.351 ns/iter 5 thread contention: 611.532 ns/iter 6 thread contention: 944.695 ns/iter => std::shared_mutex No contention: 20.805 ns/iter 2 thread contention: 157.034 ns/iter 3 thread contention: 244.025 ns/iter 4 thread contention: 406.269 ns/iter 5 thread contention: 387.985 ns/iter 6 thread contention: 468.550 ns/iter Linux (arm64): === Lock test, 6 threads === => NonRecursiveBenaphore No contention: 20.891 ns/iter 2 thread contention: 211.037 ns/iter 3 thread contention: 409.962 ns/iter 4 thread contention: 657.441 ns/iter 5 thread contention: 828.405 ns/iter 6 thread contention: 1131.827 ns/iter => std::mutex No contention: 50.884 ns/iter 2 thread contention: 103.620 ns/iter 3 thread contention: 332.429 ns/iter 4 thread contention: 620.802 ns/iter 5 thread contention: 783.943 ns/iter 6 thread contention: 834.002 ns/iter => std::shared_timed_mutex No contention: 64.948 ns/iter 2 thread contention: 173.191 ns/iter 3 thread contention: 490.352 ns/iter 4 thread contention: 660.668 ns/iter 5 thread contention: 1014.546 ns/iter 6 thread contention: 1451.553 ns/iter => std::shared_mutex No contention: 64.521 ns/iter 2 thread contention: 195.222 ns/iter 3 thread contention: 490.819 ns/iter 4 thread contention: 654.786 ns/iter 5 thread contention: 955.759 ns/iter 6 thread contention: 1282.544 ns/iter
2018-07-13 22:39:01 +00:00
TracyMutex& GetMbpsDataLock() { return m_mbpsData.lock; }
const std::vector<float>& GetMbpsData() const { return m_mbpsData.mbps; }
float GetCompRatio() const { return m_mbpsData.compRatio; }
2019-04-01 17:55:37 +00:00
size_t GetSendQueueSize() const { return m_mbpsData.queue; }
bool HasData() const { return m_hasData.load( std::memory_order_acquire ); }
bool IsConnected() const { return m_connected.load( std::memory_order_relaxed ); }
bool IsDataStatic() const { return !m_thread.joinable(); }
void Shutdown() { m_shutdown.store( true, std::memory_order_relaxed ); }
void Disconnect() { Shutdown(); } // TODO: Needs proper implementation.
void Write( FileWrite& f );
2018-07-29 13:33:48 +00:00
int GetTraceVersion() const { return m_traceVersion; }
uint8_t GetHandshakeStatus() const { return m_handshake.load( std::memory_order_relaxed ); }
2018-07-28 15:59:17 +00:00
static const LoadProgress& GetLoadProgress() { return s_loadProgress; }
2019-01-06 18:09:50 +00:00
int64_t GetLoadTime() const { return m_loadTime; }
2018-07-28 15:59:17 +00:00
2019-01-14 22:22:31 +00:00
void ClearFailure() { m_failure = Failure::None; }
Failure GetFailureType() const { return m_failure; }
const FailureData& GetFailureData() const { return m_failureData; }
static const char* GetFailureString( Failure failure );
2019-01-14 22:22:31 +00:00
private:
void Exec();
2019-04-01 16:52:32 +00:00
void Query( ServerQuery type, uint64_t data );
2019-01-14 22:08:34 +00:00
tracy_force_inline bool DispatchProcess( const QueueItem& ev, char*& ptr );
tracy_force_inline bool Process( const QueueItem& ev );
tracy_force_inline void ProcessZoneBegin( const QueueZoneBegin& ev );
tracy_force_inline void ProcessZoneBeginCallstack( const QueueZoneBegin& ev );
tracy_force_inline void ProcessZoneBeginAllocSrcLoc( const QueueZoneBegin& ev );
tracy_force_inline void ProcessZoneBeginAllocSrcLocCallstack( const QueueZoneBegin& ev );
2019-01-15 17:55:21 +00:00
tracy_force_inline void ProcessZoneEnd( const QueueZoneEnd& ev );
2019-01-14 21:56:10 +00:00
tracy_force_inline void ProcessZoneValidation( const QueueZoneValidation& ev );
tracy_force_inline void ProcessFrameMark( const QueueFrameMark& ev );
2018-08-05 00:09:59 +00:00
tracy_force_inline void ProcessFrameMarkStart( const QueueFrameMark& ev );
tracy_force_inline void ProcessFrameMarkEnd( const QueueFrameMark& ev );
tracy_force_inline void ProcessZoneText( const QueueZoneText& ev );
2018-06-29 14:12:17 +00:00
tracy_force_inline void ProcessZoneName( const QueueZoneText& ev );
tracy_force_inline void ProcessLockAnnounce( const QueueLockAnnounce& ev );
tracy_force_inline void ProcessLockTerminate( const QueueLockTerminate& ev );
tracy_force_inline void ProcessLockWait( const QueueLockWait& ev );
tracy_force_inline void ProcessLockObtain( const QueueLockObtain& ev );
tracy_force_inline void ProcessLockRelease( const QueueLockRelease& ev );
tracy_force_inline void ProcessLockSharedWait( const QueueLockWait& ev );
tracy_force_inline void ProcessLockSharedObtain( const QueueLockObtain& ev );
tracy_force_inline void ProcessLockSharedRelease( const QueueLockRelease& ev );
tracy_force_inline void ProcessLockMark( const QueueLockMark& ev );
tracy_force_inline void ProcessPlotData( const QueuePlotData& ev );
tracy_force_inline void ProcessMessage( const QueueMessage& ev );
tracy_force_inline void ProcessMessageLiteral( const QueueMessage& ev );
tracy_force_inline void ProcessGpuNewContext( const QueueGpuNewContext& ev );
tracy_force_inline void ProcessGpuZoneBegin( const QueueGpuZoneBegin& ev );
tracy_force_inline void ProcessGpuZoneBeginCallstack( const QueueGpuZoneBegin& ev );
tracy_force_inline void ProcessGpuZoneEnd( const QueueGpuZoneEnd& ev );
tracy_force_inline void ProcessGpuTime( const QueueGpuTime& ev );
2018-04-01 00:03:34 +00:00
tracy_force_inline void ProcessMemAlloc( const QueueMemAlloc& ev );
tracy_force_inline bool ProcessMemFree( const QueueMemFree& ev );
2018-06-19 16:52:45 +00:00
tracy_force_inline void ProcessMemAllocCallstack( const QueueMemAlloc& ev );
tracy_force_inline void ProcessMemFreeCallstack( const QueueMemFree& ev );
tracy_force_inline void ProcessCallstackMemory( const QueueCallstackMemory& ev );
2018-06-21 23:15:49 +00:00
tracy_force_inline void ProcessCallstack( const QueueCallstack& ev );
tracy_force_inline void ProcessCallstackAlloc( const QueueCallstackAlloc& ev );
tracy_force_inline void ProcessCallstackFrameSize( const QueueCallstackFrameSize& ev );
2018-06-19 23:07:09 +00:00
tracy_force_inline void ProcessCallstackFrame( const QueueCallstackFrame& ev );
2018-08-20 00:07:31 +00:00
tracy_force_inline void ProcessCrashReport( const QueueCrashReport& ev );
2019-02-21 21:45:39 +00:00
tracy_force_inline void ProcessSysTime( const QueueSysTime& ev );
tracy_force_inline void ProcessZoneBeginImpl( ZoneEvent* zone, const QueueZoneBegin& ev );
tracy_force_inline void ProcessZoneBeginAllocSrcLocImpl( ZoneEvent* zone, const QueueZoneBegin& ev );
tracy_force_inline void ProcessGpuZoneBeginImpl( GpuEvent* zone, const QueueGpuZoneBegin& ev );
2019-01-14 22:22:31 +00:00
void ZoneStackFailure( uint64_t thread, const ZoneEvent* ev );
2019-01-15 23:45:48 +00:00
void ZoneEndFailure( uint64_t thread );
void ZoneTextFailure( uint64_t thread );
void ZoneNameFailure( uint64_t thread );
void MemFreeFailure( uint64_t thread );
void FrameEndFailure();
2019-01-14 22:22:31 +00:00
tracy_force_inline void CheckSourceLocation( uint64_t ptr );
void NewSourceLocation( uint64_t ptr );
tracy_force_inline uint32_t ShrinkSourceLocation( uint64_t srcloc );
uint32_t NewShrinkedSourceLocation( uint64_t srcloc );
tracy_force_inline void MemAllocChanged( int64_t time );
void CreateMemAllocPlot();
void ReconstructMemAllocPlot();
void InsertMessageData( MessageData* msg, uint64_t thread );
ThreadData* NewThread( uint64_t thread );
ThreadData* NoticeThread( uint64_t thread );
tracy_force_inline void NewZone( ZoneEvent* zone, uint64_t thread );
void InsertLockEvent( LockMap& lockmap, LockEvent* lev, uint64_t thread );
void CheckString( uint64_t ptr );
void CheckThreadString( uint64_t id );
void AddSourceLocation( const QueueSourceLocation& srcloc );
void AddSourceLocationPayload( uint64_t ptr, char* data, size_t sz );
void AddString( uint64_t ptr, char* str, size_t sz );
void AddThreadString( uint64_t id, char* str, size_t sz );
void AddCustomString( uint64_t ptr, char* str, size_t sz );
2018-06-24 13:28:09 +00:00
tracy_force_inline void AddCallstackPayload( uint64_t ptr, char* data, size_t sz );
2019-03-03 16:34:56 +00:00
tracy_force_inline void AddCallstackAllocPayload( uint64_t ptr, char* data, size_t sz );
2018-06-19 19:15:36 +00:00
void InsertPlot( PlotData* plot, int64_t time, double val );
void HandlePlotName( uint64_t name, char* str, size_t sz );
2018-08-04 18:48:21 +00:00
void HandleFrameName( uint64_t name, char* str, size_t sz );
2018-04-01 00:03:34 +00:00
void HandlePostponedPlots();
StringLocation StoreString( char* str, size_t sz );
uint16_t CompressThreadReal( uint64_t thread );
uint16_t CompressThreadNew( uint64_t thread );
Store time deltas, instead of absolute time in trace dumps. This change greatly reduces the size of saved dumps, but increase the cost of processing during loading. One notable outlier in the dataset below is mem.tracy, which increased in size, even if changes in the memory dump saving scheme decrease size of the other traces. 041/aa.tracy (0.4.1) {18987 KB} -> 042/aa.tracy (0.4.2) {10140 KB} 53.40% size change 041/android.tracy (0.4.1) {696753 KB} -> 042/android.tracy (0.4.2) {542738 KB} 77.90% size change 041/asset-new.tracy (0.4.1) {97163 KB} -> 042/asset-new.tracy (0.4.2) {78402 KB} 80.69% size change 041/asset-new-id.tracy (0.4.1) {105683 KB} -> 042/asset-new-id.tracy (0.4.2) {84341 KB} 79.81% size change 041/asset-old.tracy (0.4.1) {100205 KB} -> 042/asset-old.tracy (0.4.2) {80688 KB} 80.52% size change 041/big.tracy (0.4.1) {2246014 KB} -> 042/big.tracy (0.4.2) {943083 KB} 41.99% size change 041/crash.tracy (0.4.1) {143 KB} -> 042/crash.tracy (0.4.2) {131 KB} 91.39% size change 041/crash2.tracy (0.4.1) {3411 KB} -> 042/crash2.tracy (0.4.2) {1425 KB} 41.80% size change 041/darkrl.tracy (0.4.1) {31818 KB} -> 042/darkrl.tracy (0.4.2) {15897 KB} 49.96% size change 041/darkrl2.tracy (0.4.1) {18778 KB} -> 042/darkrl2.tracy (0.4.2) {8002 KB} 42.62% size change 041/darkrl-old.tracy (0.4.1) {151346 KB} -> 042/darkrl-old.tracy (0.4.2) {67945 KB} 44.89% size change 041/deadlock.tracy (0.4.1) {53 KB} -> 042/deadlock.tracy (0.4.2) {52 KB} 98.55% size change 041/gn-opengl.tracy (0.4.1) {45860 KB} -> 042/gn-opengl.tracy (0.4.2) {30983 KB} 67.56% size change 041/gn-vulkan.tracy (0.4.1) {45618 KB} -> 042/gn-vulkan.tracy (0.4.2) {31349 KB} 68.72% size change 041/long.tracy (0.4.1) {1583550 KB} -> 042/long.tracy (0.4.2) {1225316 KB} 77.38% size change 041/mem.tracy (0.4.1) {1243058 KB} -> 042/mem.tracy (0.4.2) {1369291 KB} 110.15% size change 041/multi.tracy (0.4.1) {14519 KB} -> 042/multi.tracy (0.4.2) {8110 KB} 55.86% size change 041/new.tracy (0.4.1) {1439 KB} -> 042/new.tracy (0.4.2) {1108 KB} 77.01% size change 041/q3bsp-mt.tracy (0.4.1) {1414323 KB} -> 042/q3bsp-mt.tracy (0.4.2) {949855 KB} 67.16% size change 041/q3bsp-st.tracy (0.4.1) {301334 KB} -> 042/q3bsp-st.tracy (0.4.2) {240347 KB} 79.76% size change 041/selfprofile.tracy (0.4.1) {399648 KB} -> 042/selfprofile.tracy (0.4.2) {197713 KB} 49.47% size change 041/tbrowser.tracy (0.4.1) {13052 KB} -> 042/tbrowser.tracy (0.4.2) {9503 KB} 72.81% size change 041/test.tracy (0.4.1) {60309 KB} -> 042/test.tracy (0.4.2) {40700 KB} 67.49% size change 041/virtualfile_hc.tracy (0.4.1) {108967 KB} -> 042/virtualfile_hc.tracy (0.4.2) {72839 KB} 66.85% size change 041/zfile_hc.tracy (0.4.1) {58814 KB} -> 042/zfile_hc.tracy (0.4.2) {39608 KB} 67.35% size change
2018-12-30 22:06:03 +00:00
tracy_force_inline void ReadTimeline( FileRead& f, ZoneEvent* zone, uint16_t thread, int64_t& refTime );
tracy_force_inline void ReadTimelinePre042( FileRead& f, ZoneEvent* zone, uint16_t thread, int fileVer );
Track separate time offset for GPU times. This is second version of 0.4.2 dump file format. Previous 0.4.2 format cannot be read anymore. 041/aa.tracy (0.4.1) {18987 KB} -> 042/aa.tracy (0.4.2) {10051 KB} 52.94% size change 041/android.tracy (0.4.1) {696753 KB} -> 042/android.tracy (0.4.2) {542738 KB} 77.90% size change 041/asset-new.tracy (0.4.1) {97163 KB} -> 042/asset-new.tracy (0.4.2) {78402 KB} 80.69% size change 041/asset-new-id.tracy (0.4.1) {105683 KB} -> 042/asset-new-id.tracy (0.4.2) {84341 KB} 79.81% size change 041/asset-old.tracy (0.4.1) {100205 KB} -> 042/asset-old.tracy (0.4.2) {80688 KB} 80.52% size change 041/big.tracy (0.4.1) {2246014 KB} -> 042/big.tracy (0.4.2) {939578 KB} 41.83% size change 041/crash.tracy (0.4.1) {143 KB} -> 042/crash.tracy (0.4.2) {131 KB} 91.37% size change 041/crash2.tracy (0.4.1) {3411 KB} -> 042/crash2.tracy (0.4.2) {1420 KB} 41.63% size change 041/darkrl.tracy (0.4.1) {31818 KB} -> 042/darkrl.tracy (0.4.2) {15762 KB} 49.54% size change 041/darkrl2.tracy (0.4.1) {18778 KB} -> 042/darkrl2.tracy (0.4.2) {7945 KB} 42.31% size change 041/darkrl-old.tracy (0.4.1) {151346 KB} -> 042/darkrl-old.tracy (0.4.2) {67449 KB} 44.57% size change 041/deadlock.tracy (0.4.1) {53 KB} -> 042/deadlock.tracy (0.4.2) {52 KB} 98.55% size change 041/gn-opengl.tracy (0.4.1) {45860 KB} -> 042/gn-opengl.tracy (0.4.2) {29005 KB} 63.25% size change 041/gn-vulkan.tracy (0.4.1) {45618 KB} -> 042/gn-vulkan.tracy (0.4.2) {29352 KB} 64.34% size change 041/long.tracy (0.4.1) {1583550 KB} -> 042/long.tracy (0.4.2) {1182800 KB} 74.69% size change 041/mem.tracy (0.4.1) {1243058 KB} -> 042/mem.tracy (0.4.2) {1369067 KB} 110.14% size change 041/multi.tracy (0.4.1) {14519 KB} -> 042/multi.tracy (0.4.2) {8000 KB} 55.10% size change 041/new.tracy (0.4.1) {1439 KB} -> 042/new.tracy (0.4.2) {1105 KB} 76.75% size change 041/q3bsp-mt.tracy (0.4.1) {1414323 KB} -> 042/q3bsp-mt.tracy (0.4.2) {949855 KB} 67.16% size change 041/q3bsp-st.tracy (0.4.1) {301334 KB} -> 042/q3bsp-st.tracy (0.4.2) {240347 KB} 79.76% size change 041/selfprofile.tracy (0.4.1) {399648 KB} -> 042/selfprofile.tracy (0.4.2) {197704 KB} 49.47% size change 041/tbrowser.tracy (0.4.1) {13052 KB} -> 042/tbrowser.tracy (0.4.2) {9503 KB} 72.81% size change 041/test.tracy (0.4.1) {60309 KB} -> 042/test.tracy (0.4.2) {40700 KB} 67.49% size change 041/virtualfile_hc.tracy (0.4.1) {108967 KB} -> 042/virtualfile_hc.tracy (0.4.2) {72424 KB} 66.46% size change 041/zfile_hc.tracy (0.4.1) {58814 KB} -> 042/zfile_hc.tracy (0.4.2) {39418 KB} 67.02% size change
2019-01-03 19:09:45 +00:00
tracy_force_inline void ReadTimeline( FileRead& f, GpuEvent* zone, int64_t& refTime, int64_t& refGpuTime );
Don't decompress GpuZone threads while saving trace. Saving the threads compressed in GPU zones and memory event has the following result on trace dump sizes: 043/aa.tracy (0.4.3) {10055 KB} -> 044/aa.tracy (0.4.4) {9975 KB} 99.20% size change 043/android.tracy (0.4.3) {542739 KB} -> 044/android.tracy (0.4.4) {519248 KB} 95.67% size change 043/asset-new.tracy (0.4.3) {78403 KB} -> 044/asset-new.tracy (0.4.4) {75899 KB} 96.81% size change 043/asset-new-id.tracy (0.4.3) {84341 KB} -> 044/asset-new-id.tracy (0.4.4) {81771 KB} 96.95% size change 043/asset-old.tracy (0.4.3) {80688 KB} -> 044/asset-old.tracy (0.4.4) {78410 KB} 97.18% size change 043/big.tracy (0.4.3) {939577 KB} -> 044/big.tracy (0.4.4) {938427 KB} 99.88% size change 043/callstack.tracy (0.4.3) {14557 KB} -> 044/callstack.tracy (0.4.4) {14465 KB} 99.37% size change 043/callstack-linux.tracy (0.4.3) {6949 KB} -> 044/callstack-linux.tracy (0.4.4) {6942 KB} 99.90% size change 043/crash.tracy (0.4.3) {131 KB} -> 044/crash.tracy (0.4.4) {127 KB} 97.10% size change 043/crash2.tracy (0.4.3) {1422 KB} -> 044/crash2.tracy (0.4.4) {1412 KB} 99.29% size change 043/darkrl.tracy (0.4.3) {15767 KB} -> 044/darkrl.tracy (0.4.4) {15663 KB} 99.34% size change 043/darkrl2.tracy (0.4.3) {7947 KB} -> 044/darkrl2.tracy (0.4.4) {7886 KB} 99.23% size change 043/darkrl-old.tracy (0.4.3) {67448 KB} -> 044/darkrl-old.tracy (0.4.4) {67004 KB} 99.34% size change 043/deadlock.tracy (0.4.3) {5984 KB} -> 044/deadlock.tracy (0.4.4) {5986 KB} 100.03% size change 043/gn-opengl.tracy (0.4.3) {29005 KB} -> 044/gn-opengl.tracy (0.4.4) {28885 KB} 99.59% size change 043/gn-vulkan.tracy (0.4.3) {29352 KB} -> 044/gn-vulkan.tracy (0.4.4) {29257 KB} 99.68% size change 043/long.tracy (0.4.3) {1182800 KB} -> 044/long.tracy (0.4.4) {1176584 KB} 99.47% size change 043/mem.tracy (0.4.3) {1369067 KB} -> 044/mem.tracy (0.4.4) {1262406 KB} 92.21% size change 043/multi.tracy (0.4.3) {8004 KB} -> 044/multi.tracy (0.4.4) {7944 KB} 99.24% size change 043/new.tracy (0.4.3) {1108 KB} -> 044/new.tracy (0.4.4) {1099 KB} 99.18% size change 043/q3bsp-mt.tracy (0.4.3) {949855 KB} -> 044/q3bsp-mt.tracy (0.4.4) {937574 KB} 98.71% size change 043/q3bsp-st.tracy (0.4.3) {240347 KB} -> 044/q3bsp-st.tracy (0.4.4) {230092 KB} 95.73% size change 043/selfprofile.tracy (0.4.3) {197708 KB} -> 044/selfprofile.tracy (0.4.4) {197659 KB} 99.98% size change 043/tbrowser.tracy (0.4.3) {9503 KB} -> 044/tbrowser.tracy (0.4.4) {9503 KB} 100.00% size change 043/test.tracy (0.4.3) {40700 KB} -> 044/test.tracy (0.4.4) {40699 KB} 100.00% size change 043/virtualfile_hc.tracy (0.4.3) {72424 KB} -> 044/virtualfile_hc.tracy (0.4.4) {72304 KB} 99.83% size change 043/zfile_hc.tracy (0.4.3) {39419 KB} -> 044/zfile_hc.tracy (0.4.4) {39328 KB} 99.77% size change
2019-02-16 22:04:34 +00:00
tracy_force_inline void ReadTimelinePre044( FileRead& f, GpuEvent* zone, int64_t& refTime, int64_t& refGpuTime, int fileVer );
tracy_force_inline void ReadTimelineUpdateStatistics( ZoneEvent* zone, uint16_t thread );
Store time deltas, instead of absolute time in trace dumps. This change greatly reduces the size of saved dumps, but increase the cost of processing during loading. One notable outlier in the dataset below is mem.tracy, which increased in size, even if changes in the memory dump saving scheme decrease size of the other traces. 041/aa.tracy (0.4.1) {18987 KB} -> 042/aa.tracy (0.4.2) {10140 KB} 53.40% size change 041/android.tracy (0.4.1) {696753 KB} -> 042/android.tracy (0.4.2) {542738 KB} 77.90% size change 041/asset-new.tracy (0.4.1) {97163 KB} -> 042/asset-new.tracy (0.4.2) {78402 KB} 80.69% size change 041/asset-new-id.tracy (0.4.1) {105683 KB} -> 042/asset-new-id.tracy (0.4.2) {84341 KB} 79.81% size change 041/asset-old.tracy (0.4.1) {100205 KB} -> 042/asset-old.tracy (0.4.2) {80688 KB} 80.52% size change 041/big.tracy (0.4.1) {2246014 KB} -> 042/big.tracy (0.4.2) {943083 KB} 41.99% size change 041/crash.tracy (0.4.1) {143 KB} -> 042/crash.tracy (0.4.2) {131 KB} 91.39% size change 041/crash2.tracy (0.4.1) {3411 KB} -> 042/crash2.tracy (0.4.2) {1425 KB} 41.80% size change 041/darkrl.tracy (0.4.1) {31818 KB} -> 042/darkrl.tracy (0.4.2) {15897 KB} 49.96% size change 041/darkrl2.tracy (0.4.1) {18778 KB} -> 042/darkrl2.tracy (0.4.2) {8002 KB} 42.62% size change 041/darkrl-old.tracy (0.4.1) {151346 KB} -> 042/darkrl-old.tracy (0.4.2) {67945 KB} 44.89% size change 041/deadlock.tracy (0.4.1) {53 KB} -> 042/deadlock.tracy (0.4.2) {52 KB} 98.55% size change 041/gn-opengl.tracy (0.4.1) {45860 KB} -> 042/gn-opengl.tracy (0.4.2) {30983 KB} 67.56% size change 041/gn-vulkan.tracy (0.4.1) {45618 KB} -> 042/gn-vulkan.tracy (0.4.2) {31349 KB} 68.72% size change 041/long.tracy (0.4.1) {1583550 KB} -> 042/long.tracy (0.4.2) {1225316 KB} 77.38% size change 041/mem.tracy (0.4.1) {1243058 KB} -> 042/mem.tracy (0.4.2) {1369291 KB} 110.15% size change 041/multi.tracy (0.4.1) {14519 KB} -> 042/multi.tracy (0.4.2) {8110 KB} 55.86% size change 041/new.tracy (0.4.1) {1439 KB} -> 042/new.tracy (0.4.2) {1108 KB} 77.01% size change 041/q3bsp-mt.tracy (0.4.1) {1414323 KB} -> 042/q3bsp-mt.tracy (0.4.2) {949855 KB} 67.16% size change 041/q3bsp-st.tracy (0.4.1) {301334 KB} -> 042/q3bsp-st.tracy (0.4.2) {240347 KB} 79.76% size change 041/selfprofile.tracy (0.4.1) {399648 KB} -> 042/selfprofile.tracy (0.4.2) {197713 KB} 49.47% size change 041/tbrowser.tracy (0.4.1) {13052 KB} -> 042/tbrowser.tracy (0.4.2) {9503 KB} 72.81% size change 041/test.tracy (0.4.1) {60309 KB} -> 042/test.tracy (0.4.2) {40700 KB} 67.49% size change 041/virtualfile_hc.tracy (0.4.1) {108967 KB} -> 042/virtualfile_hc.tracy (0.4.2) {72839 KB} 66.85% size change 041/zfile_hc.tracy (0.4.1) {58814 KB} -> 042/zfile_hc.tracy (0.4.2) {39608 KB} 67.35% size change
2018-12-30 22:06:03 +00:00
void ReadTimeline( FileRead& f, Vector<ZoneEvent*>& vec, uint16_t thread, uint64_t size, int64_t& refTime );
void ReadTimelinePre042( FileRead& f, Vector<ZoneEvent*>& vec, uint16_t thread, uint64_t size, int fileVer );
Track separate time offset for GPU times. This is second version of 0.4.2 dump file format. Previous 0.4.2 format cannot be read anymore. 041/aa.tracy (0.4.1) {18987 KB} -> 042/aa.tracy (0.4.2) {10051 KB} 52.94% size change 041/android.tracy (0.4.1) {696753 KB} -> 042/android.tracy (0.4.2) {542738 KB} 77.90% size change 041/asset-new.tracy (0.4.1) {97163 KB} -> 042/asset-new.tracy (0.4.2) {78402 KB} 80.69% size change 041/asset-new-id.tracy (0.4.1) {105683 KB} -> 042/asset-new-id.tracy (0.4.2) {84341 KB} 79.81% size change 041/asset-old.tracy (0.4.1) {100205 KB} -> 042/asset-old.tracy (0.4.2) {80688 KB} 80.52% size change 041/big.tracy (0.4.1) {2246014 KB} -> 042/big.tracy (0.4.2) {939578 KB} 41.83% size change 041/crash.tracy (0.4.1) {143 KB} -> 042/crash.tracy (0.4.2) {131 KB} 91.37% size change 041/crash2.tracy (0.4.1) {3411 KB} -> 042/crash2.tracy (0.4.2) {1420 KB} 41.63% size change 041/darkrl.tracy (0.4.1) {31818 KB} -> 042/darkrl.tracy (0.4.2) {15762 KB} 49.54% size change 041/darkrl2.tracy (0.4.1) {18778 KB} -> 042/darkrl2.tracy (0.4.2) {7945 KB} 42.31% size change 041/darkrl-old.tracy (0.4.1) {151346 KB} -> 042/darkrl-old.tracy (0.4.2) {67449 KB} 44.57% size change 041/deadlock.tracy (0.4.1) {53 KB} -> 042/deadlock.tracy (0.4.2) {52 KB} 98.55% size change 041/gn-opengl.tracy (0.4.1) {45860 KB} -> 042/gn-opengl.tracy (0.4.2) {29005 KB} 63.25% size change 041/gn-vulkan.tracy (0.4.1) {45618 KB} -> 042/gn-vulkan.tracy (0.4.2) {29352 KB} 64.34% size change 041/long.tracy (0.4.1) {1583550 KB} -> 042/long.tracy (0.4.2) {1182800 KB} 74.69% size change 041/mem.tracy (0.4.1) {1243058 KB} -> 042/mem.tracy (0.4.2) {1369067 KB} 110.14% size change 041/multi.tracy (0.4.1) {14519 KB} -> 042/multi.tracy (0.4.2) {8000 KB} 55.10% size change 041/new.tracy (0.4.1) {1439 KB} -> 042/new.tracy (0.4.2) {1105 KB} 76.75% size change 041/q3bsp-mt.tracy (0.4.1) {1414323 KB} -> 042/q3bsp-mt.tracy (0.4.2) {949855 KB} 67.16% size change 041/q3bsp-st.tracy (0.4.1) {301334 KB} -> 042/q3bsp-st.tracy (0.4.2) {240347 KB} 79.76% size change 041/selfprofile.tracy (0.4.1) {399648 KB} -> 042/selfprofile.tracy (0.4.2) {197704 KB} 49.47% size change 041/tbrowser.tracy (0.4.1) {13052 KB} -> 042/tbrowser.tracy (0.4.2) {9503 KB} 72.81% size change 041/test.tracy (0.4.1) {60309 KB} -> 042/test.tracy (0.4.2) {40700 KB} 67.49% size change 041/virtualfile_hc.tracy (0.4.1) {108967 KB} -> 042/virtualfile_hc.tracy (0.4.2) {72424 KB} 66.46% size change 041/zfile_hc.tracy (0.4.1) {58814 KB} -> 042/zfile_hc.tracy (0.4.2) {39418 KB} 67.02% size change
2019-01-03 19:09:45 +00:00
void ReadTimeline( FileRead& f, Vector<GpuEvent*>& vec, uint64_t size, int64_t& refTime, int64_t& refGpuTime );
Don't decompress GpuZone threads while saving trace. Saving the threads compressed in GPU zones and memory event has the following result on trace dump sizes: 043/aa.tracy (0.4.3) {10055 KB} -> 044/aa.tracy (0.4.4) {9975 KB} 99.20% size change 043/android.tracy (0.4.3) {542739 KB} -> 044/android.tracy (0.4.4) {519248 KB} 95.67% size change 043/asset-new.tracy (0.4.3) {78403 KB} -> 044/asset-new.tracy (0.4.4) {75899 KB} 96.81% size change 043/asset-new-id.tracy (0.4.3) {84341 KB} -> 044/asset-new-id.tracy (0.4.4) {81771 KB} 96.95% size change 043/asset-old.tracy (0.4.3) {80688 KB} -> 044/asset-old.tracy (0.4.4) {78410 KB} 97.18% size change 043/big.tracy (0.4.3) {939577 KB} -> 044/big.tracy (0.4.4) {938427 KB} 99.88% size change 043/callstack.tracy (0.4.3) {14557 KB} -> 044/callstack.tracy (0.4.4) {14465 KB} 99.37% size change 043/callstack-linux.tracy (0.4.3) {6949 KB} -> 044/callstack-linux.tracy (0.4.4) {6942 KB} 99.90% size change 043/crash.tracy (0.4.3) {131 KB} -> 044/crash.tracy (0.4.4) {127 KB} 97.10% size change 043/crash2.tracy (0.4.3) {1422 KB} -> 044/crash2.tracy (0.4.4) {1412 KB} 99.29% size change 043/darkrl.tracy (0.4.3) {15767 KB} -> 044/darkrl.tracy (0.4.4) {15663 KB} 99.34% size change 043/darkrl2.tracy (0.4.3) {7947 KB} -> 044/darkrl2.tracy (0.4.4) {7886 KB} 99.23% size change 043/darkrl-old.tracy (0.4.3) {67448 KB} -> 044/darkrl-old.tracy (0.4.4) {67004 KB} 99.34% size change 043/deadlock.tracy (0.4.3) {5984 KB} -> 044/deadlock.tracy (0.4.4) {5986 KB} 100.03% size change 043/gn-opengl.tracy (0.4.3) {29005 KB} -> 044/gn-opengl.tracy (0.4.4) {28885 KB} 99.59% size change 043/gn-vulkan.tracy (0.4.3) {29352 KB} -> 044/gn-vulkan.tracy (0.4.4) {29257 KB} 99.68% size change 043/long.tracy (0.4.3) {1182800 KB} -> 044/long.tracy (0.4.4) {1176584 KB} 99.47% size change 043/mem.tracy (0.4.3) {1369067 KB} -> 044/mem.tracy (0.4.4) {1262406 KB} 92.21% size change 043/multi.tracy (0.4.3) {8004 KB} -> 044/multi.tracy (0.4.4) {7944 KB} 99.24% size change 043/new.tracy (0.4.3) {1108 KB} -> 044/new.tracy (0.4.4) {1099 KB} 99.18% size change 043/q3bsp-mt.tracy (0.4.3) {949855 KB} -> 044/q3bsp-mt.tracy (0.4.4) {937574 KB} 98.71% size change 043/q3bsp-st.tracy (0.4.3) {240347 KB} -> 044/q3bsp-st.tracy (0.4.4) {230092 KB} 95.73% size change 043/selfprofile.tracy (0.4.3) {197708 KB} -> 044/selfprofile.tracy (0.4.4) {197659 KB} 99.98% size change 043/tbrowser.tracy (0.4.3) {9503 KB} -> 044/tbrowser.tracy (0.4.4) {9503 KB} 100.00% size change 043/test.tracy (0.4.3) {40700 KB} -> 044/test.tracy (0.4.4) {40699 KB} 100.00% size change 043/virtualfile_hc.tracy (0.4.3) {72424 KB} -> 044/virtualfile_hc.tracy (0.4.4) {72304 KB} 99.83% size change 043/zfile_hc.tracy (0.4.3) {39419 KB} -> 044/zfile_hc.tracy (0.4.4) {39328 KB} 99.77% size change
2019-02-16 22:04:34 +00:00
void ReadTimelinePre044( FileRead& f, Vector<GpuEvent*>& vec, uint64_t size, int64_t& refTime, int64_t& refGpuTime, int fileVer );
Store time deltas, instead of absolute time in trace dumps. This change greatly reduces the size of saved dumps, but increase the cost of processing during loading. One notable outlier in the dataset below is mem.tracy, which increased in size, even if changes in the memory dump saving scheme decrease size of the other traces. 041/aa.tracy (0.4.1) {18987 KB} -> 042/aa.tracy (0.4.2) {10140 KB} 53.40% size change 041/android.tracy (0.4.1) {696753 KB} -> 042/android.tracy (0.4.2) {542738 KB} 77.90% size change 041/asset-new.tracy (0.4.1) {97163 KB} -> 042/asset-new.tracy (0.4.2) {78402 KB} 80.69% size change 041/asset-new-id.tracy (0.4.1) {105683 KB} -> 042/asset-new-id.tracy (0.4.2) {84341 KB} 79.81% size change 041/asset-old.tracy (0.4.1) {100205 KB} -> 042/asset-old.tracy (0.4.2) {80688 KB} 80.52% size change 041/big.tracy (0.4.1) {2246014 KB} -> 042/big.tracy (0.4.2) {943083 KB} 41.99% size change 041/crash.tracy (0.4.1) {143 KB} -> 042/crash.tracy (0.4.2) {131 KB} 91.39% size change 041/crash2.tracy (0.4.1) {3411 KB} -> 042/crash2.tracy (0.4.2) {1425 KB} 41.80% size change 041/darkrl.tracy (0.4.1) {31818 KB} -> 042/darkrl.tracy (0.4.2) {15897 KB} 49.96% size change 041/darkrl2.tracy (0.4.1) {18778 KB} -> 042/darkrl2.tracy (0.4.2) {8002 KB} 42.62% size change 041/darkrl-old.tracy (0.4.1) {151346 KB} -> 042/darkrl-old.tracy (0.4.2) {67945 KB} 44.89% size change 041/deadlock.tracy (0.4.1) {53 KB} -> 042/deadlock.tracy (0.4.2) {52 KB} 98.55% size change 041/gn-opengl.tracy (0.4.1) {45860 KB} -> 042/gn-opengl.tracy (0.4.2) {30983 KB} 67.56% size change 041/gn-vulkan.tracy (0.4.1) {45618 KB} -> 042/gn-vulkan.tracy (0.4.2) {31349 KB} 68.72% size change 041/long.tracy (0.4.1) {1583550 KB} -> 042/long.tracy (0.4.2) {1225316 KB} 77.38% size change 041/mem.tracy (0.4.1) {1243058 KB} -> 042/mem.tracy (0.4.2) {1369291 KB} 110.15% size change 041/multi.tracy (0.4.1) {14519 KB} -> 042/multi.tracy (0.4.2) {8110 KB} 55.86% size change 041/new.tracy (0.4.1) {1439 KB} -> 042/new.tracy (0.4.2) {1108 KB} 77.01% size change 041/q3bsp-mt.tracy (0.4.1) {1414323 KB} -> 042/q3bsp-mt.tracy (0.4.2) {949855 KB} 67.16% size change 041/q3bsp-st.tracy (0.4.1) {301334 KB} -> 042/q3bsp-st.tracy (0.4.2) {240347 KB} 79.76% size change 041/selfprofile.tracy (0.4.1) {399648 KB} -> 042/selfprofile.tracy (0.4.2) {197713 KB} 49.47% size change 041/tbrowser.tracy (0.4.1) {13052 KB} -> 042/tbrowser.tracy (0.4.2) {9503 KB} 72.81% size change 041/test.tracy (0.4.1) {60309 KB} -> 042/test.tracy (0.4.2) {40700 KB} 67.49% size change 041/virtualfile_hc.tracy (0.4.1) {108967 KB} -> 042/virtualfile_hc.tracy (0.4.2) {72839 KB} 66.85% size change 041/zfile_hc.tracy (0.4.1) {58814 KB} -> 042/zfile_hc.tracy (0.4.2) {39608 KB} 67.35% size change
2018-12-30 22:06:03 +00:00
void WriteTimeline( FileWrite& f, const Vector<ZoneEvent*>& vec, int64_t& refTime );
Track separate time offset for GPU times. This is second version of 0.4.2 dump file format. Previous 0.4.2 format cannot be read anymore. 041/aa.tracy (0.4.1) {18987 KB} -> 042/aa.tracy (0.4.2) {10051 KB} 52.94% size change 041/android.tracy (0.4.1) {696753 KB} -> 042/android.tracy (0.4.2) {542738 KB} 77.90% size change 041/asset-new.tracy (0.4.1) {97163 KB} -> 042/asset-new.tracy (0.4.2) {78402 KB} 80.69% size change 041/asset-new-id.tracy (0.4.1) {105683 KB} -> 042/asset-new-id.tracy (0.4.2) {84341 KB} 79.81% size change 041/asset-old.tracy (0.4.1) {100205 KB} -> 042/asset-old.tracy (0.4.2) {80688 KB} 80.52% size change 041/big.tracy (0.4.1) {2246014 KB} -> 042/big.tracy (0.4.2) {939578 KB} 41.83% size change 041/crash.tracy (0.4.1) {143 KB} -> 042/crash.tracy (0.4.2) {131 KB} 91.37% size change 041/crash2.tracy (0.4.1) {3411 KB} -> 042/crash2.tracy (0.4.2) {1420 KB} 41.63% size change 041/darkrl.tracy (0.4.1) {31818 KB} -> 042/darkrl.tracy (0.4.2) {15762 KB} 49.54% size change 041/darkrl2.tracy (0.4.1) {18778 KB} -> 042/darkrl2.tracy (0.4.2) {7945 KB} 42.31% size change 041/darkrl-old.tracy (0.4.1) {151346 KB} -> 042/darkrl-old.tracy (0.4.2) {67449 KB} 44.57% size change 041/deadlock.tracy (0.4.1) {53 KB} -> 042/deadlock.tracy (0.4.2) {52 KB} 98.55% size change 041/gn-opengl.tracy (0.4.1) {45860 KB} -> 042/gn-opengl.tracy (0.4.2) {29005 KB} 63.25% size change 041/gn-vulkan.tracy (0.4.1) {45618 KB} -> 042/gn-vulkan.tracy (0.4.2) {29352 KB} 64.34% size change 041/long.tracy (0.4.1) {1583550 KB} -> 042/long.tracy (0.4.2) {1182800 KB} 74.69% size change 041/mem.tracy (0.4.1) {1243058 KB} -> 042/mem.tracy (0.4.2) {1369067 KB} 110.14% size change 041/multi.tracy (0.4.1) {14519 KB} -> 042/multi.tracy (0.4.2) {8000 KB} 55.10% size change 041/new.tracy (0.4.1) {1439 KB} -> 042/new.tracy (0.4.2) {1105 KB} 76.75% size change 041/q3bsp-mt.tracy (0.4.1) {1414323 KB} -> 042/q3bsp-mt.tracy (0.4.2) {949855 KB} 67.16% size change 041/q3bsp-st.tracy (0.4.1) {301334 KB} -> 042/q3bsp-st.tracy (0.4.2) {240347 KB} 79.76% size change 041/selfprofile.tracy (0.4.1) {399648 KB} -> 042/selfprofile.tracy (0.4.2) {197704 KB} 49.47% size change 041/tbrowser.tracy (0.4.1) {13052 KB} -> 042/tbrowser.tracy (0.4.2) {9503 KB} 72.81% size change 041/test.tracy (0.4.1) {60309 KB} -> 042/test.tracy (0.4.2) {40700 KB} 67.49% size change 041/virtualfile_hc.tracy (0.4.1) {108967 KB} -> 042/virtualfile_hc.tracy (0.4.2) {72424 KB} 66.46% size change 041/zfile_hc.tracy (0.4.1) {58814 KB} -> 042/zfile_hc.tracy (0.4.2) {39418 KB} 67.02% size change
2019-01-03 19:09:45 +00:00
void WriteTimeline( FileWrite& f, const Vector<GpuEvent*>& vec, int64_t& refTime, int64_t& refGpuTime );
int64_t TscTime( int64_t tsc ) { return int64_t( tsc * m_timerMul ); }
int64_t TscTime( uint64_t tsc ) { return int64_t( tsc * m_timerMul ); }
Socket m_sock;
std::string m_addr;
std::thread m_thread;
2019-01-06 20:09:56 +00:00
std::atomic<bool> m_connected = { false };
std::atomic<bool> m_hasData;
2019-01-06 20:09:56 +00:00
std::atomic<bool> m_shutdown = { false };
std::thread m_threadBackground;
int64_t m_delay;
int64_t m_resolution;
double m_timerMul;
std::string m_captureName;
std::string m_captureProgram;
uint64_t m_captureTime;
2018-08-19 16:21:56 +00:00
std::string m_hostInfo;
bool m_terminate = false;
bool m_crashed = false;
LZ4_streamDecode_t* m_stream;
char* m_buffer;
int m_bufferOffset;
2018-07-11 23:21:04 +00:00
bool m_onDemand;
GpuCtxData* m_gpuCtxMap[256];
flat_hash_map<uint64_t, StringLocation, nohash<uint64_t>> m_pendingCustomStrings;
uint64_t m_pendingCallstackPtr = 0;
uint32_t m_pendingCallstackId;
flat_hash_map<uint64_t, int32_t, nohash<uint64_t>> m_pendingSourceLocationPayload;
Vector<uint64_t> m_sourceLocationQueue;
flat_hash_map<uint64_t, uint32_t, nohash<uint64_t>> m_sourceLocationShrink;
flat_hash_map<uint64_t, ThreadData*, nohash<uint64_t>> m_threadMap;
flat_hash_map<uint64_t, NextCallstack, nohash<uint64_t>> m_nextCallstack;
uint32_t m_pendingStrings;
uint32_t m_pendingThreads;
uint32_t m_pendingSourceLocation;
2018-06-20 21:42:00 +00:00
uint32_t m_pendingCallstackFrames;
uint8_t m_pendingCallstackSubframes;
CallstackFrameData* m_callstackFrameStaging;
uint64_t m_callstackFrameStagingPtr;
2019-03-03 16:34:56 +00:00
uint64_t m_callstackAllocNextIdx = 0;
2018-06-19 16:52:45 +00:00
uint64_t m_lastMemActionCallstack;
bool m_lastMemActionWasAlloc;
2018-06-19 16:52:45 +00:00
Slab<64*1024*1024> m_slab;
DataBlock m_data;
MbpsBlock m_mbpsData;
2018-07-28 15:59:17 +00:00
2018-07-29 13:33:48 +00:00
int m_traceVersion;
2019-01-06 20:09:56 +00:00
std::atomic<uint8_t> m_handshake = { 0 };
2018-07-29 13:33:48 +00:00
2018-07-28 15:59:17 +00:00
static LoadProgress s_loadProgress;
2019-01-06 18:09:50 +00:00
int64_t m_loadTime;
2019-01-14 22:22:31 +00:00
Failure m_failure = Failure::None;
FailureData m_failureData;
2019-02-21 21:45:39 +00:00
PlotData* m_sysTimePlot = nullptr;
2019-04-01 17:17:18 +00:00
Vector<ServerQueryPacket> m_serverQueryQueue;
size_t m_serverQuerySpaceLeft;
};
}
#endif