[RFA,5/8] Wrap xfer_partial and enable_btrace for Ravenscar

Message ID 20190326144404.6670-6-tromey@adacore.com
State New, archived
Headers

Commit Message

Tom Tromey March 26, 2019, 2:44 p.m. UTC
  A gdb crash showed that the xfer_partial target method was not wrapped
for Ravenscar.  This caused remote.c to call
remote::set_general_thread with a Ravenscar "fake" ptid, which showed
up later as an event ptid.

I went through all the target methods and looked to see which ones
could call set_general_thread or set_continue_thread (but not
set_general_process, as I think Ravenscar targets aren't
multi-inferior).  This patch wraps the two that I found.

xfer_partial requires special treatment, because it can be called
recursively via get_base_thread_from_ravenscar_task.  To avoid a
recursive call, this patch changes update_thread_list to record all
tasks in the cpu_map, and changes get_thread_base_cpu to prefer this
map.  This avoids some memory reads.

It was unclear to me whether enable_btrace really makes sense for
Ravenscar; but at the same time it seemed harmless to add this patch.

gdb/ChangeLog
2019-03-26  Tom Tromey  <tromey@adacore.com>

	* ravenscar-thread.c (xfer_partial, enable_btrace, add_thread):
	New methods.
	(ravenscar_thread_target::get_thread_base_cpu): Check cpu_map
	first.
	(ravenscar_thread_target::add_thread): Rename from
	ravenscar_add_thread.
	(ravenscar_thread_target::update_thread_list): Use a lambda.
	(ravenscar_thread_target::xfer_partial): New method.
---
 gdb/ChangeLog          | 11 +++++++
 gdb/ravenscar-thread.c | 66 +++++++++++++++++++++++++++++++++++-------
 2 files changed, 66 insertions(+), 11 deletions(-)
  

Patch

diff --git a/gdb/ravenscar-thread.c b/gdb/ravenscar-thread.c
index a3545847288..85a86f5698e 100644
--- a/gdb/ravenscar-thread.c
+++ b/gdb/ravenscar-thread.c
@@ -102,6 +102,13 @@  struct ravenscar_thread_target final : public target_ops
 
   bool stopped_data_address (CORE_ADDR *) override;
 
+  enum target_xfer_status xfer_partial (enum target_object object,
+					const char *annex,
+					gdb_byte *readbuf,
+					const gdb_byte *writebuf,
+					ULONGEST offset, ULONGEST len,
+					ULONGEST *xfered_len) override;
+
   bool thread_alive (ptid_t ptid) override;
 
   int core_of_thread (ptid_t ptid) override;
@@ -112,6 +119,14 @@  struct ravenscar_thread_target final : public target_ops
 
   ptid_t get_ada_task_ptid (long lwp, long thread) override;
 
+  struct btrace_target_info *enable_btrace (ptid_t ptid,
+					    const struct btrace_config *conf)
+    override
+  {
+    ptid = get_base_thread_from_ravenscar_task (ptid);
+    return beneath ()->enable_btrace (ptid, conf);
+  }
+
   void mourn_inferior () override;
 
   void close () override
@@ -133,6 +148,7 @@  private:
   bool runtime_initialized ();
   int get_thread_base_cpu (ptid_t ptid);
   ptid_t get_base_thread_from_ravenscar_task (ptid_t ptid);
+  void add_thread (struct ada_task_info *task);
 
   /* This maps a TID to the CPU on which it was running.  This is
      needed because sometimes the runtime will report an active task
@@ -169,16 +185,18 @@  ravenscar_thread_target::get_thread_base_cpu (ptid_t ptid)
 
   if (is_ravenscar_task (ptid))
     {
-      struct ada_task_info *task_info = ada_get_task_info_from_ptid (ptid);
+      /* Prefer to not read inferior memory if possible, to avoid
+	 reentrancy problems with xfer_partial.  */
+      auto iter = cpu_map.find (ptid.tid ());
 
-      if (task_info != NULL)
-	base_cpu = task_info->base_cpu;
+      if (iter != cpu_map.end ())
+	base_cpu = iter->second;
       else
 	{
-	  auto iter = cpu_map.find (ptid.tid ());
+	  struct ada_task_info *task_info = ada_get_task_info_from_ptid (ptid);
 
-	  gdb_assert (iter != cpu_map.end ());
-	  base_cpu = iter->second;
+	  gdb_assert (task_info != NULL);
+	  base_cpu = task_info->base_cpu;
 	}
     }
   else
@@ -254,7 +272,7 @@  ravenscar_thread_target::update_inferior_ptid ()
      may not always add it to the thread list.  Add it here.  */
   if (!find_thread_ptid (inferior_ptid))
     {
-      add_thread (inferior_ptid);
+      ::add_thread (inferior_ptid);
       cpu_map[inferior_ptid.tid ()] = base_cpu;
     }
 }
@@ -380,11 +398,14 @@  ravenscar_thread_target::wait (ptid_t ptid,
 /* Add the thread associated to the given TASK to the thread list
    (if the thread has already been added, this is a no-op).  */
 
-static void
-ravenscar_add_thread (struct ada_task_info *task)
+void
+ravenscar_thread_target::add_thread (struct ada_task_info *task)
 {
   if (find_thread_ptid (task->ptid) == NULL)
-    add_thread (task->ptid);
+    {
+      ::add_thread (task->ptid);
+      cpu_map[task->ptid.tid ()] = task->base_cpu;
+    }
 }
 
 void
@@ -395,7 +416,10 @@  ravenscar_thread_target::update_thread_list ()
      (m_base_ptid) and the running thread, that may not have been included
      to system.tasking.debug's list yet.  */
 
-  iterate_over_live_ada_tasks (ravenscar_add_thread);
+  iterate_over_live_ada_tasks ([=] (struct ada_task_info *task)
+			       {
+				 this->add_thread (task);
+			       });
 }
 
 ptid_t
@@ -537,6 +561,26 @@  ravenscar_thread_target::core_of_thread (ptid_t ptid)
   return beneath ()->core_of_thread (inferior_ptid);
 }
 
+/* Implement the target xfer_partial method.  */
+
+enum target_xfer_status
+ravenscar_thread_target::xfer_partial (enum target_object object,
+				       const char *annex,
+				       gdb_byte *readbuf,
+				       const gdb_byte *writebuf,
+				       ULONGEST offset, ULONGEST len,
+				       ULONGEST *xfered_len)
+{
+  scoped_restore save_ptid = make_scoped_restore (&inferior_ptid);
+  /* Calling get_base_thread_from_ravenscar_task can read memory from
+     the inferior.  However, that function is written to prefer our
+     internal map, so it should not result in recursive calls in
+     practice.  */
+  inferior_ptid = get_base_thread_from_ravenscar_task (inferior_ptid);
+  return beneath ()->xfer_partial (object, annex, readbuf, writebuf,
+				   offset, len, xfered_len);
+}
+
 /* Observer on inferior_created: push ravenscar thread stratum if needed.  */
 
 static void