Revert to non-shared mutex for data lock.

The main change here is that the UI will be unresponsive when a trace is being
saved when there's no capture being performed. Note that the UI was always
frozen during saving if the capture was live, due to how starvation is
prevented by the locks.
This commit is contained in:
Bartosz Taudul 2021-02-07 18:11:24 +01:00
parent 417d526581
commit 9bb1d13afa
3 changed files with 14 additions and 14 deletions

View File

@ -730,7 +730,7 @@ bool View::DrawImpl()
ImGui::EndPopup();
}
}
std::shared_lock<std::shared_mutex> lock( m_worker.GetDataLock() );
std::lock_guard<std::mutex> lock( m_worker.GetDataLock() );
if( !m_worker.IsDataStatic() )
{
if( m_worker.IsConnected() )
@ -1407,7 +1407,7 @@ bool View::DrawConnection()
ImGui::GetWindowDrawList()->AddCircleFilled( wpos + ImVec2( 1 + cs * 0.5, 3 + ty * 1.75 ), cs * 0.5, isConnected ? 0xFF2222CC : 0xFF444444, 10 );
{
std::shared_lock<std::shared_mutex> lock( m_worker.GetDataLock() );
std::lock_guard<std::mutex> lock( m_worker.GetDataLock() );
ImGui::SameLine();
TextFocused( "+", RealToString( m_worker.GetSendInFlight() ) );
const auto sz = m_worker.GetFrameCount( *m_frames );
@ -1475,7 +1475,7 @@ bool View::DrawConnection()
m_userData.StateShouldBePreserved();
m_saveThreadState.store( SaveThreadState::Saving, std::memory_order_relaxed );
m_saveThread = std::thread( [this, f{std::move( f )}] {
std::shared_lock<std::shared_mutex> lock( m_worker.GetDataLock() );
std::lock_guard<std::mutex> lock( m_worker.GetDataLock() );
m_worker.Write( *f );
f->Finish();
const auto stats = f->GetCompressionStatistics();
@ -1489,7 +1489,7 @@ bool View::DrawConnection()
ImGui::SameLine( 0, 2 * ty );
const char* stopStr = ICON_FA_PLUG " Stop";
std::shared_lock<std::shared_mutex> lock( m_worker.GetDataLock() );
std::lock_guard<std::mutex> lock( m_worker.GetDataLock() );
if( !m_disconnectIssued && m_worker.IsConnected() )
{
if( ImGui::Button( stopStr ) )

View File

@ -1785,7 +1785,7 @@ Worker::Worker( FileRead& f, EventType::Type eventMask, bool bgTasks )
}
for( auto& v : counts ) UpdateSampleStatistics( v.first, v.second, false );
}
std::lock_guard<std::shared_mutex> lock( m_data.lock );
std::lock_guard<std::mutex> lock( m_data.lock );
m_data.callstackSamplesReady = true;
} ) );
@ -1801,7 +1801,7 @@ Worker::Worker( FileRead& f, EventType::Type eventMask, bool bgTasks )
gcnt += AddGhostZone( GetCallstack( sd.callstack.Val() ), &t->ghostZones, sd.time.Val() );
}
}
std::lock_guard<std::shared_mutex> lock( m_data.lock );
std::lock_guard<std::mutex> lock( m_data.lock );
m_data.ghostZonesReady = true;
m_data.ghostCnt = gcnt;
} ) );
@ -1835,7 +1835,7 @@ Worker::Worker( FileRead& f, EventType::Type eventMask, bool bgTasks )
{
pdqsort_branchless( v.second.begin(), v.second.end(), []( const auto& lhs, const auto& rhs ) { return lhs.time.Val() < rhs.time.Val(); } );
}
std::lock_guard<std::shared_mutex> lock( m_data.lock );
std::lock_guard<std::mutex> lock( m_data.lock );
m_data.symbolSamplesReady = true;
} ) );
}
@ -1853,7 +1853,7 @@ Worker::Worker( FileRead& f, EventType::Type eventMask, bool bgTasks )
#endif
}
{
std::lock_guard<std::shared_mutex> lock( m_data.lock );
std::lock_guard<std::mutex> lock( m_data.lock );
m_data.sourceLocationZonesReady = true;
}
@ -2779,7 +2779,7 @@ void Worker::Exec()
const char* end = ptr + netbuf.size;
{
std::lock_guard<std::shared_mutex> lock( m_data.lock );
std::lock_guard<std::mutex> lock( m_data.lock );
while( ptr < end )
{
auto ev = (const QueueItem*)ptr;
@ -6223,7 +6223,7 @@ void Worker::ReconstructMemAllocPlot( MemData& mem )
PlotData* plot;
{
std::lock_guard<std::shared_mutex> lock( m_data.lock );
std::lock_guard<std::mutex> lock( m_data.lock );
plot = m_slab.AllocInit<PlotData>();
}
@ -6307,7 +6307,7 @@ void Worker::ReconstructMemAllocPlot( MemData& mem )
plot->min = 0;
plot->max = max;
std::lock_guard<std::shared_mutex> lock( m_data.lock );
std::lock_guard<std::mutex> lock( m_data.lock );
m_data.plots.Data().insert( m_data.plots.Data().begin(), plot );
mem.plot = plot;
}
@ -6403,7 +6403,7 @@ void Worker::ReconstructContextSwitchUsage()
}
}
std::lock_guard<std::shared_mutex> lock( m_data.lock );
std::lock_guard<std::mutex> lock( m_data.lock );
m_data.ctxUsageReady = true;
}

View File

@ -233,7 +233,7 @@ private:
struct DataBlock
{
std::shared_mutex lock;
std::mutex lock;
StringDiscovery<FrameData*> frames;
FrameData* framesBase;
Vector<GpuCtxData*> gpuData;
@ -413,7 +413,7 @@ public:
uint32_t GetCpuId() const { return m_data.cpuId; }
const char* GetCpuManufacturer() const { return m_data.cpuManufacturer; }
std::shared_mutex& GetDataLock() { return m_data.lock; }
std::mutex& GetDataLock() { return m_data.lock; }
size_t GetFrameCount( const FrameData& fd ) const { return fd.frames.size(); }
size_t GetFullFrameCount( const FrameData& fd ) const;
int64_t GetLastTime() const { return m_data.lastTime; }