OLD | NEW |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "platform/scheduler/renderer/task_queue_throttler.h" | 5 #include "platform/scheduler/renderer/task_queue_throttler.h" |
6 | 6 |
7 #include <cstdint> | 7 #include <cstdint> |
8 | 8 |
9 #include "base/format_macros.h" | 9 #include "base/format_macros.h" |
10 #include "base/logging.h" | 10 #include "base/logging.h" |
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
135 if (!task_queue->IsEmpty()) { | 135 if (!task_queue->IsEmpty()) { |
136 LazyNow lazy_now(tick_clock_); | 136 LazyNow lazy_now(tick_clock_); |
137 OnQueueNextWakeUpChanged(task_queue, | 137 OnQueueNextWakeUpChanged(task_queue, |
138 NextTaskRunTime(&lazy_now, task_queue).value()); | 138 NextTaskRunTime(&lazy_now, task_queue).value()); |
139 } | 139 } |
140 } | 140 } |
141 | 141 |
142 void TaskQueueThrottler::DecreaseThrottleRefCount(TaskQueue* task_queue) { | 142 void TaskQueueThrottler::DecreaseThrottleRefCount(TaskQueue* task_queue) { |
143 TaskQueueMap::iterator iter = queue_details_.find(task_queue); | 143 TaskQueueMap::iterator iter = queue_details_.find(task_queue); |
144 | 144 |
145 if (iter == queue_details_.end() || | 145 if (iter == queue_details_.end()) |
146 --iter->second.throttling_ref_count != 0) { | |
147 return; | 146 return; |
148 } | 147 if (iter->second.throttling_ref_count == 0) |
| 148 return; |
| 149 if (--iter->second.throttling_ref_count != 0) |
| 150 return; |
149 | 151 |
150 TRACE_EVENT1("renderer.scheduler", "TaskQueueThrottler_TaskQueueUnthrottled", | 152 TRACE_EVENT1("renderer.scheduler", "TaskQueueThrottler_TaskQueueUnthrottled", |
151 "task_queue", task_queue); | 153 "task_queue", task_queue); |
152 | 154 |
153 task_queue->SetObserver(nullptr); | 155 task_queue->SetObserver(nullptr); |
154 | 156 |
155 MaybeDeleteQueueMetadata(iter); | 157 MaybeDeleteQueueMetadata(iter); |
156 | 158 |
157 if (!allow_throttling_) | 159 if (!allow_throttling_) |
158 return; | 160 return; |
(...skipping 10 matching lines...) Expand all Loading... |
169 if (find_it == queue_details_.end()) | 171 if (find_it == queue_details_.end()) |
170 return false; | 172 return false; |
171 return find_it->second.throttling_ref_count > 0; | 173 return find_it->second.throttling_ref_count > 0; |
172 } | 174 } |
173 | 175 |
174 void TaskQueueThrottler::UnregisterTaskQueue(TaskQueue* task_queue) { | 176 void TaskQueueThrottler::UnregisterTaskQueue(TaskQueue* task_queue) { |
175 auto find_it = queue_details_.find(task_queue); | 177 auto find_it = queue_details_.find(task_queue); |
176 if (find_it == queue_details_.end()) | 178 if (find_it == queue_details_.end()) |
177 return; | 179 return; |
178 | 180 |
179 LazyNow lazy_now(tick_clock_); | |
180 std::unordered_set<BudgetPool*> budget_pools = find_it->second.budget_pools; | 181 std::unordered_set<BudgetPool*> budget_pools = find_it->second.budget_pools; |
181 for (BudgetPool* budget_pool : budget_pools) { | 182 for (BudgetPool* budget_pool : budget_pools) { |
182 budget_pool->RemoveQueue(lazy_now.Now(), task_queue); | 183 budget_pool->UnregisterQueue(task_queue); |
183 } | 184 } |
184 | 185 |
185 // Iterator may have been deleted by BudgetPool::RemoveQueue, so don't | 186 // Iterator may have been deleted by BudgetPool::RemoveQueue, so don't |
186 // use it here. | 187 // use it here. |
187 queue_details_.erase(task_queue); | 188 queue_details_.erase(task_queue); |
188 | 189 |
189 // NOTE: Observer is automatically unregistered when unregistering task queue. | 190 // NOTE: Observer is automatically unregistered when unregistering task queue. |
190 } | 191 } |
191 | 192 |
192 void TaskQueueThrottler::OnQueueNextWakeUpChanged( | 193 void TaskQueueThrottler::OnQueueNextWakeUpChanged( |
193 TaskQueue* queue, | 194 TaskQueue* queue, |
194 base::TimeTicks next_wake_up) { | 195 base::TimeTicks next_wake_up) { |
195 if (!control_task_queue_->RunsTasksOnCurrentThread()) { | 196 if (!control_task_queue_->RunsTasksOnCurrentThread()) { |
196 control_task_queue_->PostTask( | 197 control_task_queue_->PostTask( |
197 FROM_HERE, | 198 FROM_HERE, |
198 base::Bind(forward_immediate_work_callback_, queue, next_wake_up)); | 199 base::Bind(forward_immediate_work_callback_, queue, next_wake_up)); |
199 return; | 200 return; |
200 } | 201 } |
201 | 202 |
202 TRACE_EVENT0("renderer.scheduler", | 203 TRACE_EVENT0("renderer.scheduler", |
203 "TaskQueueThrottler::OnQueueNextWakeUpChanged"); | 204 "TaskQueueThrottler::OnQueueNextWakeUpChanged"); |
204 | 205 |
205 // We don't expect this to get called for disabled queues, but we can't DCHECK | 206 // We don't expect this to get called for disabled queues, but we can't DCHECK |
206 // because of the above thread hop. Just bail out if the queue is disabled. | 207 // because of the above thread hop. Just bail out if the queue is disabled. |
207 if (!queue->IsQueueEnabled()) | 208 if (!queue->IsQueueEnabled()) |
208 return; | 209 return; |
209 | 210 |
210 base::TimeTicks now = tick_clock_->NowTicks(); | 211 base::TimeTicks now = tick_clock_->NowTicks(); |
| 212 |
| 213 auto find_it = queue_details_.find(queue); |
| 214 if (find_it == queue_details_.end()) |
| 215 return; |
| 216 |
| 217 for (BudgetPool* budget_pool : find_it->second.budget_pools) { |
| 218 budget_pool->OnQueueNextWakeUpChanged(queue, now, next_wake_up); |
| 219 } |
| 220 |
| 221 // TODO(altimin): This probably can be removed —- budget pools should |
| 222 // schedule this. |
| 223 base::TimeTicks next_allowed_run_time = |
| 224 GetNextAllowedRunTime(queue, next_wake_up); |
211 MaybeSchedulePumpThrottledTasks( | 225 MaybeSchedulePumpThrottledTasks( |
212 FROM_HERE, now, | 226 FROM_HERE, now, std::max(next_wake_up, next_allowed_run_time)); |
213 std::max(GetNextAllowedRunTime(now, queue), next_wake_up)); | |
214 } | 227 } |
215 | 228 |
216 void TaskQueueThrottler::PumpThrottledTasks() { | 229 void TaskQueueThrottler::PumpThrottledTasks() { |
217 TRACE_EVENT0("renderer.scheduler", "TaskQueueThrottler::PumpThrottledTasks"); | 230 TRACE_EVENT0("renderer.scheduler", "TaskQueueThrottler::PumpThrottledTasks"); |
218 pending_pump_throttled_tasks_runtime_.reset(); | 231 pending_pump_throttled_tasks_runtime_.reset(); |
219 | 232 |
220 LazyNow lazy_now(tick_clock_); | 233 LazyNow lazy_now(tick_clock_); |
221 base::Optional<base::TimeTicks> next_scheduled_delayed_task; | 234 base::Optional<base::TimeTicks> next_scheduled_delayed_task; |
222 | 235 |
| 236 for (const auto& pair : budget_pools_) |
| 237 pair.first->OnWakeUp(lazy_now.Now()); |
| 238 |
223 for (const TaskQueueMap::value_type& map_entry : queue_details_) { | 239 for (const TaskQueueMap::value_type& map_entry : queue_details_) { |
224 TaskQueue* task_queue = map_entry.first; | 240 TaskQueue* task_queue = map_entry.first; |
225 if (task_queue->IsEmpty() || !IsThrottled(task_queue)) | 241 UpdateQueueThrottlingStateInternal(lazy_now.Now(), task_queue, true); |
226 continue; | |
227 | |
228 // Don't enable queues whose budget pool doesn't allow them to run now. | |
229 base::TimeTicks next_allowed_run_time = | |
230 GetNextAllowedRunTime(lazy_now.Now(), task_queue); | |
231 base::Optional<base::TimeTicks> next_desired_run_time = | |
232 NextTaskRunTime(&lazy_now, task_queue); | |
233 | |
234 if (next_desired_run_time && | |
235 next_allowed_run_time > next_desired_run_time.value()) { | |
236 TRACE_EVENT1( | |
237 "renderer.scheduler", | |
238 "TaskQueueThrottler::PumpThrottledTasks_ExpensiveTaskThrottled", | |
239 "throttle_time_in_seconds", | |
240 (next_allowed_run_time - next_desired_run_time.value()).InSecondsF()); | |
241 | |
242 // Schedule a pump for queue which was disabled because of time budget. | |
243 next_scheduled_delayed_task = | |
244 Min(next_scheduled_delayed_task, next_allowed_run_time); | |
245 | |
246 continue; | |
247 } | |
248 | |
249 next_scheduled_delayed_task = | |
250 Min(next_scheduled_delayed_task, task_queue->GetNextScheduledWakeUp()); | |
251 | |
252 if (next_allowed_run_time > lazy_now.Now()) | |
253 continue; | |
254 | |
255 // Remove previous fence and install a new one, allowing all tasks posted | |
256 // on |task_queue| up until this point to run and block all further tasks. | |
257 task_queue->InsertFence(TaskQueue::InsertFencePosition::NOW); | |
258 } | |
259 | |
260 // Maybe schedule a call to TaskQueueThrottler::PumpThrottledTasks if there is | |
261 // a pending delayed task or a throttled task ready to run. | |
262 // NOTE: posting a non-delayed task in the future will result in | |
263 // TaskQueueThrottler::OnTimeDomainHasImmediateWork being called. | |
264 if (next_scheduled_delayed_task) { | |
265 MaybeSchedulePumpThrottledTasks(FROM_HERE, lazy_now.Now(), | |
266 *next_scheduled_delayed_task); | |
267 } | 242 } |
268 } | 243 } |
269 | 244 |
270 /* static */ | 245 /* static */ |
271 base::TimeTicks TaskQueueThrottler::AlignedThrottledRunTime( | 246 base::TimeTicks TaskQueueThrottler::AlignedThrottledRunTime( |
272 base::TimeTicks unthrottled_runtime) { | 247 base::TimeTicks unthrottled_runtime) { |
273 const base::TimeDelta one_second = base::TimeDelta::FromSeconds(1); | 248 const base::TimeDelta one_second = base::TimeDelta::FromSeconds(1); |
274 return unthrottled_runtime + one_second - | 249 return unthrottled_runtime + one_second - |
275 ((unthrottled_runtime - base::TimeTicks()) % one_second); | 250 ((unthrottled_runtime - base::TimeTicks()) % one_second); |
276 } | 251 } |
(...skipping 29 matching lines...) Expand all Loading... |
306 } | 281 } |
307 | 282 |
308 CPUTimeBudgetPool* TaskQueueThrottler::CreateCPUTimeBudgetPool( | 283 CPUTimeBudgetPool* TaskQueueThrottler::CreateCPUTimeBudgetPool( |
309 const char* name) { | 284 const char* name) { |
310 CPUTimeBudgetPool* time_budget_pool = | 285 CPUTimeBudgetPool* time_budget_pool = |
311 new CPUTimeBudgetPool(name, this, tick_clock_->NowTicks()); | 286 new CPUTimeBudgetPool(name, this, tick_clock_->NowTicks()); |
312 budget_pools_[time_budget_pool] = base::WrapUnique(time_budget_pool); | 287 budget_pools_[time_budget_pool] = base::WrapUnique(time_budget_pool); |
313 return time_budget_pool; | 288 return time_budget_pool; |
314 } | 289 } |
315 | 290 |
| 291 WakeUpBudgetPool* TaskQueueThrottler::CreateWakeUpBudgetPool(const char* name) { |
| 292 WakeUpBudgetPool* wake_up_budget_pool = |
| 293 new WakeUpBudgetPool(name, this, tick_clock_->NowTicks()); |
| 294 budget_pools_[wake_up_budget_pool] = base::WrapUnique(wake_up_budget_pool); |
| 295 return wake_up_budget_pool; |
| 296 } |
| 297 |
316 void TaskQueueThrottler::OnTaskRunTimeReported(TaskQueue* task_queue, | 298 void TaskQueueThrottler::OnTaskRunTimeReported(TaskQueue* task_queue, |
317 base::TimeTicks start_time, | 299 base::TimeTicks start_time, |
318 base::TimeTicks end_time) { | 300 base::TimeTicks end_time) { |
319 if (!IsThrottled(task_queue)) | 301 if (!IsThrottled(task_queue)) |
320 return; | 302 return; |
321 | 303 |
322 auto find_it = queue_details_.find(task_queue); | 304 auto find_it = queue_details_.find(task_queue); |
323 if (find_it == queue_details_.end()) | 305 if (find_it == queue_details_.end()) |
324 return; | 306 return; |
325 | 307 |
326 for (BudgetPool* budget_pool : find_it->second.budget_pools) { | 308 for (BudgetPool* budget_pool : find_it->second.budget_pools) { |
327 budget_pool->RecordTaskRunTime(start_time, end_time); | 309 budget_pool->RecordTaskRunTime(task_queue, start_time, end_time); |
328 if (!budget_pool->HasEnoughBudgetToRun(end_time)) | |
329 budget_pool->BlockThrottledQueues(end_time); | |
330 } | 310 } |
331 } | 311 } |
332 | 312 |
333 void TaskQueueThrottler::BlockQueue(base::TimeTicks now, TaskQueue* queue) { | 313 void TaskQueueThrottler::UpdateQueueThrottlingState(base::TimeTicks now, |
334 if (!IsThrottled(queue)) | 314 TaskQueue* queue) { |
| 315 UpdateQueueThrottlingStateInternal(now, queue, false); |
| 316 } |
| 317 |
| 318 void TaskQueueThrottler::UpdateQueueThrottlingStateInternal(base::TimeTicks now, |
| 319 TaskQueue* queue, |
| 320 bool is_wake_up) { |
| 321 if (!queue->IsQueueEnabled() || !IsThrottled(queue)) { |
335 return; | 322 return; |
| 323 } |
336 | 324 |
337 queue->InsertFence(TaskQueue::InsertFencePosition::BEGINNING_OF_TIME); | 325 LazyNow lazy_now(now); |
338 SchedulePumpQueue(FROM_HERE, now, queue); | 326 |
| 327 base::Optional<base::TimeTicks> next_desired_run_time = |
| 328 NextTaskRunTime(&lazy_now, queue); |
| 329 |
| 330 if (!next_desired_run_time) { |
| 331 // This queue is empty. Given that new task can arrive at any moment, |
| 332 // block the queue completely and update the state upon the notification |
| 333 // about a new task. |
| 334 queue->InsertFence(TaskQueue::InsertFencePosition::NOW); |
| 335 return; |
| 336 } |
| 337 |
| 338 if (CanRunTasksAt(queue, now, false) && |
| 339 CanRunTasksAt(queue, next_desired_run_time.value(), false)) { |
| 340 // We can run up until the next task uninterrupted unless something changes. |
| 341 // Remove the fence to allow new tasks to run immediately and handle |
| 342 // the situation change in the notification about the said change. |
| 343 queue->RemoveFence(); |
| 344 |
| 345 // TaskQueueThrottler does not schedule wake-ups implicitly, we need |
| 346 // to be explicit. |
| 347 if (next_desired_run_time.value() != now) { |
| 348 time_domain_->SetNextTaskRunTime(next_desired_run_time.value()); |
| 349 } |
| 350 return; |
| 351 } |
| 352 |
| 353 if (CanRunTasksAt(queue, now, is_wake_up)) { |
| 354 // We can run task now, but we can't run until the next scheduled task. |
| 355 // Insert a fresh fence to unblock queue and schedule a pump for the |
| 356 // next wake-up. |
| 357 queue->InsertFence(TaskQueue::InsertFencePosition::NOW); |
| 358 |
| 359 base::Optional<base::TimeTicks> next_wake_up = |
| 360 queue->GetNextScheduledWakeUp(); |
| 361 if (next_wake_up) { |
| 362 MaybeSchedulePumpThrottledTasks( |
| 363 FROM_HERE, now, GetNextAllowedRunTime(queue, next_wake_up.value())); |
| 364 } |
| 365 return; |
| 366 } |
| 367 |
| 368 base::TimeTicks next_run_time = |
| 369 GetNextAllowedRunTime(queue, next_desired_run_time.value()); |
| 370 |
| 371 // Insert a fence of an approriate type. |
| 372 base::Optional<QueueBlockType> block_type = GetQueueBlockType(now, queue); |
| 373 DCHECK(block_type); |
| 374 |
| 375 switch (block_type.value()) { |
| 376 case QueueBlockType::kAllTasks: |
| 377 queue->InsertFence(TaskQueue::InsertFencePosition::BEGINNING_OF_TIME); |
| 378 |
| 379 { |
| 380 // Braces limit the scope for a declared variable. Does not compile |
| 381 // otherwise. |
| 382 TRACE_EVENT1( |
| 383 "renderer.scheduler", |
| 384 "TaskQueueThrottler::PumpThrottledTasks_ExpensiveTaskThrottled", |
| 385 "throttle_time_in_seconds", |
| 386 (next_run_time - next_desired_run_time.value()).InSecondsF()); |
| 387 } |
| 388 break; |
| 389 case QueueBlockType::kNewTasksOnly: |
| 390 if (!queue->HasFence()) { |
| 391 // Insert a new non-fully blocking fence only when there is no fence |
| 392 // already in order avoid undesired unblocking of old tasks. |
| 393 queue->InsertFence(TaskQueue::InsertFencePosition::NOW); |
| 394 } |
| 395 break; |
| 396 } |
| 397 |
| 398 // Schedule a pump. |
| 399 MaybeSchedulePumpThrottledTasks(FROM_HERE, now, next_run_time); |
| 400 } |
| 401 |
| 402 base::Optional<QueueBlockType> TaskQueueThrottler::GetQueueBlockType( |
| 403 base::TimeTicks now, |
| 404 TaskQueue* queue) { |
| 405 auto find_it = queue_details_.find(queue); |
| 406 if (find_it == queue_details_.end()) |
| 407 return base::nullopt; |
| 408 |
| 409 bool has_new_tasks_only_block = false; |
| 410 |
| 411 for (BudgetPool* budget_pool : find_it->second.budget_pools) { |
| 412 if (!budget_pool->CanRunTasksAt(now, false)) { |
| 413 if (budget_pool->GetBlockType() == QueueBlockType::kAllTasks) |
| 414 return QueueBlockType::kAllTasks; |
| 415 DCHECK_EQ(budget_pool->GetBlockType(), QueueBlockType::kNewTasksOnly); |
| 416 has_new_tasks_only_block = true; |
| 417 } |
| 418 } |
| 419 |
| 420 if (has_new_tasks_only_block) |
| 421 return QueueBlockType::kNewTasksOnly; |
| 422 return base::nullopt; |
339 } | 423 } |
340 | 424 |
341 void TaskQueueThrottler::AsValueInto(base::trace_event::TracedValue* state, | 425 void TaskQueueThrottler::AsValueInto(base::trace_event::TracedValue* state, |
342 base::TimeTicks now) const { | 426 base::TimeTicks now) const { |
343 if (pending_pump_throttled_tasks_runtime_) { | 427 if (pending_pump_throttled_tasks_runtime_) { |
344 state->SetDouble( | 428 state->SetDouble( |
345 "next_throttled_tasks_pump_in_seconds", | 429 "next_throttled_tasks_pump_in_seconds", |
346 (pending_pump_throttled_tasks_runtime_.value() - now).InSecondsF()); | 430 (pending_pump_throttled_tasks_runtime_.value() - now).InSecondsF()); |
347 } | 431 } |
348 | 432 |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
389 | 473 |
390 find_it->second.budget_pools.erase(budget_pool); | 474 find_it->second.budget_pools.erase(budget_pool); |
391 | 475 |
392 MaybeDeleteQueueMetadata(find_it); | 476 MaybeDeleteQueueMetadata(find_it); |
393 } | 477 } |
394 | 478 |
395 void TaskQueueThrottler::UnregisterBudgetPool(BudgetPool* budget_pool) { | 479 void TaskQueueThrottler::UnregisterBudgetPool(BudgetPool* budget_pool) { |
396 budget_pools_.erase(budget_pool); | 480 budget_pools_.erase(budget_pool); |
397 } | 481 } |
398 | 482 |
399 void TaskQueueThrottler::UnblockQueue(base::TimeTicks now, TaskQueue* queue) { | 483 base::TimeTicks TaskQueueThrottler::GetNextAllowedRunTime( |
400 SchedulePumpQueue(FROM_HERE, now, queue); | 484 TaskQueue* queue, |
401 } | 485 base::TimeTicks desired_run_time) { |
402 | 486 base::TimeTicks next_run_time = desired_run_time; |
403 void TaskQueueThrottler::SchedulePumpQueue( | |
404 const tracked_objects::Location& from_here, | |
405 base::TimeTicks now, | |
406 TaskQueue* queue) { | |
407 if (!IsThrottled(queue)) | |
408 return; | |
409 | |
410 LazyNow lazy_now(now); | |
411 base::Optional<base::TimeTicks> next_desired_run_time = | |
412 NextTaskRunTime(&lazy_now, queue); | |
413 if (!next_desired_run_time) | |
414 return; | |
415 | |
416 base::Optional<base::TimeTicks> next_run_time = | |
417 Max(next_desired_run_time, GetNextAllowedRunTime(now, queue)); | |
418 | |
419 MaybeSchedulePumpThrottledTasks(from_here, now, next_run_time.value()); | |
420 } | |
421 | |
422 base::TimeTicks TaskQueueThrottler::GetNextAllowedRunTime(base::TimeTicks now, | |
423 TaskQueue* queue) { | |
424 base::TimeTicks next_run_time = now; | |
425 | 487 |
426 auto find_it = queue_details_.find(queue); | 488 auto find_it = queue_details_.find(queue); |
427 if (find_it == queue_details_.end()) | 489 if (find_it == queue_details_.end()) |
428 return next_run_time; | 490 return next_run_time; |
429 | 491 |
430 for (BudgetPool* budget_pool : find_it->second.budget_pools) { | 492 for (BudgetPool* budget_pool : find_it->second.budget_pools) { |
431 next_run_time = | 493 next_run_time = std::max( |
432 std::max(next_run_time, budget_pool->GetNextAllowedRunTime()); | 494 next_run_time, budget_pool->GetNextAllowedRunTime(desired_run_time)); |
433 } | 495 } |
434 | 496 |
435 return next_run_time; | 497 return next_run_time; |
436 } | 498 } |
437 | 499 |
| 500 bool TaskQueueThrottler::CanRunTasksAt(TaskQueue* queue, |
| 501 base::TimeTicks moment, |
| 502 bool is_wake_up) { |
| 503 auto find_it = queue_details_.find(queue); |
| 504 if (find_it == queue_details_.end()) |
| 505 return true; |
| 506 |
| 507 for (BudgetPool* budget_pool : find_it->second.budget_pools) { |
| 508 if (!budget_pool->CanRunTasksAt(moment, is_wake_up)) |
| 509 return false; |
| 510 } |
| 511 |
| 512 return true; |
| 513 } |
| 514 |
438 void TaskQueueThrottler::MaybeDeleteQueueMetadata(TaskQueueMap::iterator it) { | 515 void TaskQueueThrottler::MaybeDeleteQueueMetadata(TaskQueueMap::iterator it) { |
439 if (it->second.throttling_ref_count == 0 && it->second.budget_pools.empty()) | 516 if (it->second.throttling_ref_count == 0 && it->second.budget_pools.empty()) |
440 queue_details_.erase(it); | 517 queue_details_.erase(it); |
441 } | 518 } |
442 | 519 |
443 void TaskQueueThrottler::DisableThrottling() { | 520 void TaskQueueThrottler::DisableThrottling() { |
444 if (!allow_throttling_) | 521 if (!allow_throttling_) |
445 return; | 522 return; |
446 | 523 |
447 allow_throttling_ = false; | 524 allow_throttling_ = false; |
(...skipping 26 matching lines...) Expand all Loading... |
474 for (const auto& map_entry : queue_details_) { | 551 for (const auto& map_entry : queue_details_) { |
475 if (map_entry.second.throttling_ref_count == 0) | 552 if (map_entry.second.throttling_ref_count == 0) |
476 continue; | 553 continue; |
477 | 554 |
478 TaskQueue* queue = map_entry.first; | 555 TaskQueue* queue = map_entry.first; |
479 | 556 |
480 // Throttling is enabled and task queue should be blocked immediately | 557 // Throttling is enabled and task queue should be blocked immediately |
481 // to enforce task alignment. | 558 // to enforce task alignment. |
482 queue->InsertFence(TaskQueue::InsertFencePosition::BEGINNING_OF_TIME); | 559 queue->InsertFence(TaskQueue::InsertFencePosition::BEGINNING_OF_TIME); |
483 queue->SetTimeDomain(time_domain_.get()); | 560 queue->SetTimeDomain(time_domain_.get()); |
484 SchedulePumpQueue(FROM_HERE, lazy_now.Now(), queue); | 561 UpdateQueueThrottlingState(lazy_now.Now(), queue); |
485 } | 562 } |
486 | 563 |
487 TRACE_EVENT0("renderer.scheduler", "TaskQueueThrottler_EnableThrottling"); | 564 TRACE_EVENT0("renderer.scheduler", "TaskQueueThrottler_EnableThrottling"); |
488 } | 565 } |
489 | 566 |
490 } // namespace scheduler | 567 } // namespace scheduler |
491 } // namespace blink | 568 } // namespace blink |
OLD | NEW |