[1/6] gdb: use schedlock_applies in user_visible_resume_ptid.
Checks
Context |
Check |
Description |
linaro-tcwg-bot/tcwg_gdb_build--master-arm |
success
|
Testing passed
|
linaro-tcwg-bot/tcwg_gdb_build--master-aarch64 |
success
|
Testing passed
|
linaro-tcwg-bot/tcwg_gdb_check--master-aarch64 |
success
|
Testing passed
|
Commit Message
This is a refactoring. The logic in user_visible_resume_ptid is very
similar to schedlock_applies, but uses `step` parameter instead of
`tp->control.stepping_command`.
Refactor schedlock_applies logic into the following two methods:
bool schedlock_applies_to_thread (thread_info *tp)
and
bool schedlock_applies (bool step)
such that they share the logic.
Update the call-sites accordingly, where we have only the thread, use
the former, and where we have the bool step, use the latter.
---
gdb/infrun.c | 49 +++++++++++++++++++++++++++----------------------
1 file changed, 27 insertions(+), 22 deletions(-)
Comments
>>>>> "Natalia" == Natalia Saiapova <natalia.saiapova@intel.com> writes:
Natalia> This is a refactoring. The logic in user_visible_resume_ptid is very
Natalia> similar to schedlock_applies, but uses `step` parameter instead of
Natalia> `tp->control.stepping_command`.
Natalia> Refactor schedlock_applies logic into the following two methods:
Natalia> bool schedlock_applies_to_thread (thread_info *tp)
Natalia> and
Natalia> bool schedlock_applies (bool step)
Natalia> such that they share the logic.
Natalia> Update the call-sites accordingly, where we have only the thread, use
Natalia> the former, and where we have the bool step, use the latter.
Thank you for the patch.
Natalia> +static bool schedlock_applies_to_thread (thread_info *tp);
Natalia> +
Natalia> +static bool schedlock_applies (bool step);
I don't mind separate names for these, but at the same time overloading
seems fine and would make the patch smaller.
Natalia> user_visible_resume_ptid (int step)
Natalia> {
Natalia> ptid_t resume_ptid;
Natalia> + thread_info *tp = nullptr;
Natalia> + if (inferior_ptid != null_ptid)
Natalia> + tp = inferior_thread ();
This doesn't seem to be used in this function.
Tom
@@ -107,7 +107,9 @@ static bool start_step_over (void);
static bool step_over_info_valid_p (void);
-static bool schedlock_applies (struct thread_info *tp);
+static bool schedlock_applies_to_thread (thread_info *tp);
+
+static bool schedlock_applies (bool step);
/* Asynchronous signal handler registered as event loop source for
when we have pending events ready to be passed to the core. */
@@ -1970,7 +1972,7 @@ any_thread_needs_target_thread_events (process_stratum_target *target,
{
for (thread_info *tp : all_non_exited_threads (target, resume_ptid))
if (displaced_step_in_progress_thread (tp)
- || schedlock_applies (tp)
+ || schedlock_applies_to_thread (tp)
|| tp->thread_fsm () != nullptr)
return true;
return false;
@@ -1983,7 +1985,7 @@ static void
update_thread_events_after_step_over (thread_info *event_thread,
const target_waitstatus &event_status)
{
- if (schedlock_applies (event_thread))
+ if (schedlock_applies_to_thread (event_thread))
{
/* If scheduler-locking applies, continue reporting
thread-created/thread-cloned events. */
@@ -2383,6 +2385,9 @@ ptid_t
user_visible_resume_ptid (int step)
{
ptid_t resume_ptid;
+ thread_info *tp = nullptr;
+ if (inferior_ptid != null_ptid)
+ tp = inferior_thread ();
if (non_stop)
{
@@ -2390,20 +2395,12 @@ user_visible_resume_ptid (int step)
individually. */
resume_ptid = inferior_ptid;
}
- else if ((scheduler_mode == schedlock_on)
- || (scheduler_mode == schedlock_step && step))
+ else if (schedlock_applies (step))
{
/* User-settable 'scheduler' mode requires solo thread
resume. */
resume_ptid = inferior_ptid;
}
- else if ((scheduler_mode == schedlock_replay)
- && target_record_will_replay (minus_one_ptid, execution_direction))
- {
- /* User-settable 'scheduler' mode requires solo thread resume in replay
- mode. */
- resume_ptid = inferior_ptid;
- }
else if (!sched_multi && target_supports_multi_process ())
{
/* Resume all threads of the current process (and none of other
@@ -2574,7 +2571,7 @@ do_target_resume (ptid_t resume_ptid, bool step, enum gdb_signal sig)
*/
if (step_over_info_valid_p ()
|| displaced_step_in_progress_thread (tp)
- || schedlock_applies (tp))
+ || schedlock_applies_to_thread (tp))
{
gdb_thread_options options
= GDB_THREAD_OPTION_CLONE | GDB_THREAD_OPTION_EXIT;
@@ -3185,15 +3182,23 @@ thread_still_needs_step_over (struct thread_info *tp)
return what;
}
+/* Returns true if scheduler locking applies to TP. */
+
+static bool
+schedlock_applies_to_thread (thread_info *tp)
+{
+ bool step = (tp != nullptr) && tp->control.stepping_command;
+ return schedlock_applies (step);
+}
+
/* Returns true if scheduler locking applies. STEP indicates whether
- we're about to do a step/next-like command to a thread. */
+ we're about to do a step/next-like command. */
static bool
-schedlock_applies (struct thread_info *tp)
+schedlock_applies (bool step)
{
return (scheduler_mode == schedlock_on
- || (scheduler_mode == schedlock_step
- && tp->control.stepping_command)
+ || (scheduler_mode == schedlock_step && step)
|| (scheduler_mode == schedlock_replay
&& target_record_will_replay (minus_one_ptid,
execution_direction)));
@@ -3690,7 +3695,7 @@ proceed (CORE_ADDR addr, enum gdb_signal siggnal)
/* If scheduler locking applies, we can avoid iterating over all
threads. */
- if (!non_stop && !schedlock_applies (cur_thr))
+ if (!non_stop && !schedlock_applies_to_thread (cur_thr))
{
for (thread_info *tp : all_non_exited_threads (resume_target,
resume_ptid))
@@ -5938,7 +5943,7 @@ handle_thread_exited (execution_control_state *ecs)
}
inferior *inf = ecs->event_thread->inf;
- bool slock_applies = schedlock_applies (ecs->event_thread);
+ bool slock_applies = schedlock_applies_to_thread (ecs->event_thread);
delete_thread (ecs->event_thread);
ecs->event_thread = nullptr;
@@ -6377,7 +6382,7 @@ handle_inferior_event (struct execution_control_state *ecs)
/* If resuming the child, mark it running. */
if ((ecs->ws.kind () == TARGET_WAITKIND_THREAD_CLONED
- && !schedlock_applies (ecs->event_thread))
+ && !schedlock_applies_to_thread (ecs->event_thread))
|| (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED
&& (follow_child
|| (!detach_fork && (non_stop || sched_multi)))))
@@ -6386,7 +6391,7 @@ handle_inferior_event (struct execution_control_state *ecs)
/* In non-stop mode, also resume the other branch. */
if ((ecs->ws.kind () == TARGET_WAITKIND_THREAD_CLONED
&& target_is_non_stop_p ()
- && !schedlock_applies (ecs->event_thread))
+ && !schedlock_applies_to_thread (ecs->event_thread))
|| (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED
&& (!detach_fork && (non_stop
|| (sched_multi
@@ -8249,7 +8254,7 @@ switch_back_to_stepped_thread (struct execution_control_state *ecs)
current thread is stepping. If some other thread not the
event thread is stepping, then it must be that scheduler
locking is not in effect. */
- if (schedlock_applies (ecs->event_thread))
+ if (schedlock_applies_to_thread (ecs->event_thread))
return false;
/* Otherwise, we no longer expect a trap in the current thread.