@@ -720,6 +720,7 @@ ALL_TARGET_OBS = \
iq2000-tdep.o \
linux-record.o \
linux-tdep.o \
+ lk-low.o \
lm32-tdep.o \
m32c-tdep.o \
m32r-linux-tdep.o \
@@ -1303,6 +1304,7 @@ HFILES_NO_SRCDIR = \
linux-nat.h \
linux-record.h \
linux-tdep.h \
+ lk-low.h \
location.h \
m2-lang.h \
m32r-tdep.h \
@@ -2251,6 +2253,7 @@ ALLDEPFILES = \
linux-fork.c \
linux-record.c \
linux-tdep.c \
+ lk-low.c \
lm32-tdep.c \
m32r-linux-nat.c \
m32r-linux-tdep.c \
@@ -41,6 +41,10 @@ x86_tobjs="x86-tdep.o"
i386_tobjs="i386-tdep.o arch/i386.o i387-tdep.o ${x86_tobjs}"
amd64_tobjs="amd64-tdep.o arch/amd64.o ${x86_tobjs}"
+# List of objectfiles for Linux kernel support. To be included into *-linux*
+# targets wich support Linux kernel debugging.
+lk_tobjs="lk-low.o"
+
# Here are three sections to get a list of target specific object
# files according to target triplet $TARG.
@@ -483,6 +483,7 @@ enum gdb_osabi
GDB_OSABI_HURD,
GDB_OSABI_SOLARIS,
GDB_OSABI_LINUX,
+ GDB_OSABI_LINUX_KERNEL,
GDB_OSABI_FREEBSD,
GDB_OSABI_NETBSD,
GDB_OSABI_OPENBSD,
@@ -3,7 +3,7 @@
/* Dynamic architecture support for GDB, the GNU debugger.
- Copyright (C) 1998-2019 Free Software Foundation, Inc.
+ Copyright (C) 1998-2018 Free Software Foundation, Inc.
This file is part of GDB.
@@ -356,6 +356,7 @@ struct gdbarch
char ** disassembler_options;
const disasm_options_and_args_t * valid_disassembler_options;
gdbarch_type_align_ftype *type_align;
+ gdbarch_get_new_lk_ops_ftype *get_new_lk_ops;
};
/* Create a new ``struct gdbarch'' based on information provided by
@@ -717,6 +718,7 @@ verify_gdbarch (struct gdbarch *gdbarch)
/* Skip verify of disassembler_options, invalid_p == 0 */
/* Skip verify of valid_disassembler_options, invalid_p == 0 */
/* Skip verify of type_align, invalid_p == 0 */
+ /* Skip verify of get_new_lk_ops, has predicate. */
if (!log.empty ())
internal_error (__FILE__, __LINE__,
_("verify_gdbarch: the following are invalid ...%s"),
@@ -1062,6 +1064,12 @@ gdbarch_dump (struct gdbarch *gdbarch, struct ui_file *file)
"gdbarch_dump: get_longjmp_target = <%s>\n",
host_address_to_string (gdbarch->get_longjmp_target));
fprintf_unfiltered (file,
+ "gdbarch_dump: gdbarch_get_new_lk_ops_p() = %d\n",
+ gdbarch_get_new_lk_ops_p (gdbarch));
+ fprintf_unfiltered (file,
+ "gdbarch_dump: get_new_lk_ops = <%s>\n",
+ host_address_to_string (gdbarch->get_new_lk_ops));
+ fprintf_unfiltered (file,
"gdbarch_dump: gdbarch_get_siginfo_type_p() = %d\n",
gdbarch_get_siginfo_type_p (gdbarch));
fprintf_unfiltered (file,
@@ -5092,6 +5100,30 @@ set_gdbarch_type_align (struct gdbarch *gdbarch,
gdbarch->type_align = type_align;
}
+int
+gdbarch_get_new_lk_ops_p (struct gdbarch *gdbarch)
+{
+ gdb_assert (gdbarch != NULL);
+ return gdbarch->get_new_lk_ops != NULL;
+}
+
+linux_kernel_ops *
+gdbarch_get_new_lk_ops (struct gdbarch *gdbarch)
+{
+ gdb_assert (gdbarch != NULL);
+ gdb_assert (gdbarch->get_new_lk_ops != NULL);
+ if (gdbarch_debug >= 2)
+ fprintf_unfiltered (gdb_stdlog, "gdbarch_get_new_lk_ops called\n");
+ return gdbarch->get_new_lk_ops (gdbarch);
+}
+
+void
+set_gdbarch_get_new_lk_ops (struct gdbarch *gdbarch,
+ gdbarch_get_new_lk_ops_ftype get_new_lk_ops)
+{
+ gdbarch->get_new_lk_ops = get_new_lk_ops;
+}
+
/* Keep a registry of per-architecture data-pointers required by GDB
modules. */
@@ -3,7 +3,7 @@
/* Dynamic architecture support for GDB, the GNU debugger.
- Copyright (C) 1998-2019 Free Software Foundation, Inc.
+ Copyright (C) 1998-2018 Free Software Foundation, Inc.
This file is part of GDB.
@@ -66,6 +66,7 @@ struct mem_range;
struct syscalls_info;
struct thread_info;
struct ui_out;
+class linux_kernel_ops;
#include "regcache.h"
@@ -1595,6 +1596,14 @@ typedef ULONGEST (gdbarch_type_align_ftype) (struct gdbarch *gdbarch, struct typ
extern ULONGEST gdbarch_type_align (struct gdbarch *gdbarch, struct type *type);
extern void set_gdbarch_type_align (struct gdbarch *gdbarch, gdbarch_type_align_ftype *type_align);
+/* Return a new instance of a class inherited from linux_kernel_ops */
+
+extern int gdbarch_get_new_lk_ops_p (struct gdbarch *gdbarch);
+
+typedef linux_kernel_ops * (gdbarch_get_new_lk_ops_ftype) (struct gdbarch *gdbarch);
+extern linux_kernel_ops * gdbarch_get_new_lk_ops (struct gdbarch *gdbarch);
+extern void set_gdbarch_get_new_lk_ops (struct gdbarch *gdbarch, gdbarch_get_new_lk_ops_ftype *get_new_lk_ops);
+
extern struct gdbarch_tdep *gdbarch_tdep (struct gdbarch *gdbarch);
@@ -1174,6 +1174,9 @@ v;const disasm_options_and_args_t *;valid_disassembler_options;;;0;0;;0;host_add
# Type alignment.
m;ULONGEST;type_align;struct type *type;type;;default_type_align;;0
+# Return a new instance of a class inherited from linux_kernel_ops
+M;linux_kernel_ops *;get_new_lk_ops;void;;
+
EOF
}
@@ -1300,6 +1303,7 @@ struct mem_range;
struct syscalls_info;
struct thread_info;
struct ui_out;
+class linux_kernel_ops;
#include "regcache.h"
new file mode 100644
@@ -0,0 +1,226 @@
+/* Iterator for bitmaps from the Linux kernel.
+
+ Copyright (C) 2017 Free Software Foundation, Inc.
+
+ This file is part of GDB.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
+
+#ifndef __LK_BITMAP_H__
+#define __LK_BITMAP_H__
+
+#include "defs.h"
+
+#include "lk-low.h"
+
+/* Short hand access to frequently used bitmap. */
+#define lk_cpu_online_mask lk_bitmap ("cpu_online_mask", "cpumask->bits")
+
+/* Container class to handle bitmaps declared with DECLARE_BITMAP from
+ <linux>/include/linux/types.h. */
+
+class lk_bitmap
+{
+public:
+
+ template<class T>
+ class base_iterator
+ : public std::iterator<std::bidirectional_iterator_tag, T>
+ {
+ public:
+ base_iterator (const base_iterator<T> &it) = default;
+ base_iterator (std::vector<unsigned long>::const_iterator start,
+ size_t bit, size_t size)
+ : m_start (start), m_bit (bit), m_size (size)
+ { next (); }
+
+ base_iterator<T> &operator++ ()
+ { m_bit++; return next (); }
+
+ base_iterator<T> operator++ (int)
+ { base_iterator<T> retval = *this; ++(*this); return retval; }
+
+ base_iterator<T> &operator-- ()
+ { m_bit--; return prev (); }
+
+ base_iterator<T> operator-- (int)
+ { base_iterator<T> retval = *this; --(*this); return retval; }
+
+ bool operator== (base_iterator<T> other) const
+ { return (m_start == other.m_start && m_bit == other.m_bit
+ && m_size == other.m_size); }
+
+ bool operator!= (base_iterator<T> other) const
+ { return !(*this == other); }
+
+ T operator* () const
+ { return m_bit; }
+
+ private:
+ /* Start of the vector containing the bitmap. */
+ std::vector<unsigned long>::const_iterator m_start;
+
+ /* Last set bit returned. */
+ size_t m_bit;
+
+ /* Size of the bitmap in bit. */
+ size_t m_size;
+
+ /* Get next set bit. */
+ base_iterator<T> &next ();
+
+ /* Get previous set bit. */
+ base_iterator<T> &prev ();
+ }; /* class base_iterator */
+
+ /* Constructor for bitmaps defined as variable NAME. */
+ inline lk_bitmap (const std::string &name);
+
+ /* Constructor for bitmaps defined as field in variable NAME. */
+ inline lk_bitmap (const std::string &name, const std::string &alias);
+
+ typedef base_iterator<size_t> iterator;
+ typedef base_iterator<const size_t> const_iterator;
+
+ iterator begin () { return iterator (m_bitmap.cbegin (), 0, size ()); }
+ iterator end () { return iterator (m_bitmap.cbegin (), size (), size ()); }
+
+ const_iterator cbegin () const
+ { return const_iterator (m_bitmap.cbegin (), 0, size ()); }
+ const_iterator cend () const
+ { return const_iterator (m_bitmap.cbegin (), size (), size ()); }
+
+ const_iterator begin () const
+ { return this->cbegin (); }
+ const_iterator end () const
+ { return this->cend (); }
+
+ /* Returns size of bitmap in bits. */
+ inline size_t size () const;
+
+ /* Returns Hamming weight, i.e. number of set bits, of bitmap. */
+ inline size_t hweight () const;
+
+private:
+ /* Read content of bitmap NAME. */
+ inline void read (const std::string &name);
+
+ /* Returns number of unsigned longs needed to store N bytes. */
+ inline size_t byte_to_ulong (size_t n) const;
+
+ /* Storage for content of bitmap. */
+ std::vector<unsigned long> m_bitmap;
+}; /* class bitmap */
+
+/* see declaration. */
+
+template<class T>
+lk_bitmap::base_iterator<T> &
+lk_bitmap::base_iterator<T>::next ()
+{
+ size_t ulong_bits = lk_builtin_type_size (unsigned_long) * LK_BITS_PER_BYTE;
+ auto ulong = m_start + m_bit / ulong_bits;
+ while (m_bit < m_size)
+ {
+ if (*ulong & (1ULL << m_bit))
+ return *this;
+
+ m_bit++;
+ if ((m_bit % ulong_bits) == 0)
+ ulong++;
+ }
+ return *this;
+}
+
+/* see declaration. */
+
+template<class T>
+lk_bitmap::base_iterator<T> &
+lk_bitmap::base_iterator<T>::prev ()
+{
+ size_t ulong_bits = lk_builtin_type_size (unsigned_long) * LK_BITS_PER_BYTE;
+ auto ulong = m_start + m_bit / ulong_bits;
+ while (m_bit > m_size)
+ {
+ if (*ulong & (1 << m_bit))
+ return *this;
+
+ m_bit--;
+ if ((m_bit % ulong_bits) == 0)
+ ulong--;
+ }
+ return *this;
+}
+
+/* see declaration. */
+
+lk_bitmap::lk_bitmap (const std::string &name)
+{
+ symbol *sym = lookup_symbol (name.c_str (), NULL, VAR_DOMAIN, NULL).symbol;
+ size_t size = TYPE_LENGTH (check_typedef (SYMBOL_TYPE (sym)));
+
+ m_bitmap.resize (byte_to_ulong (size));
+ read (name);
+}
+
+/* see declaration. */
+
+lk_bitmap::lk_bitmap (const std::string &name, const std::string &alias)
+{
+ field *field = lk_field (alias);
+ m_bitmap.resize (byte_to_ulong (FIELD_SIZE (field)));
+ read (name);
+}
+
+/* see declaration. */
+
+void
+lk_bitmap::read (const std::string &name)
+{
+ size_t ulong_size = lk_builtin_type_size (unsigned_long);
+ CORE_ADDR addr = lk_address (name);
+
+ for (size_t i = 0; i < m_bitmap.size (); i++)
+ m_bitmap[i] = lk_read_ulong (addr + i * ulong_size);
+}
+
+/* see declaration. */
+size_t
+lk_bitmap::byte_to_ulong (size_t n) const
+{
+ size_t ulong_size = lk_builtin_type_size (unsigned_long);
+ return (n + ulong_size - 1) / ulong_size;
+}
+
+/* see declaration. */
+
+size_t
+lk_bitmap::size () const
+{
+ size_t ulong_size = lk_builtin_type_size (unsigned_long);
+ return (m_bitmap.size () * ulong_size * LK_BITS_PER_BYTE);
+}
+
+/* see declaration. */
+
+size_t
+lk_bitmap::hweight () const
+{
+ size_t ret = 0;
+// for (auto bit : *this)
+// ret++;
+ return ret;
+}
+
+#endif /* __LK_BITMAP_H__ */
new file mode 100644
@@ -0,0 +1,200 @@
+/* Iterators for internal data structures of the Linux kernel.
+
+ Copyright (C) 2016 Free Software Foundation, Inc.
+
+ This file is part of GDB.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
+
+#ifndef __LK_LIST_H__
+#define __LK_LIST_H__
+
+#include "defs.h"
+
+#include "inferior.h"
+#include "lk-low.h"
+
+/* Container class to handle doubly linked list using struct list_head from
+ <linux>/include/linux/types.h . */
+
+class lk_list
+{
+ template<class T>
+ class base_iterator
+ : public std::iterator<std::bidirectional_iterator_tag, T>
+ {
+ public:
+ base_iterator (const base_iterator<T> &it) = default;
+ base_iterator (CORE_ADDR start, CORE_ADDR offset, bool embedded)
+ : m_current (start), m_start (start), m_offset (offset)
+ {
+ if (!embedded)
+ next ();
+ }
+
+ base_iterator<T> &operator++ ()
+ { return next (); }
+
+ base_iterator<T> operator++ (int)
+ { base_iterator<T> retval = *this; ++(*this); return retval; }
+
+ base_iterator<T> &operator-- ()
+ { return next (false); }
+
+ base_iterator<T> operator-- (int)
+ { base_iterator<T> retval = *this; --(*this); return retval; }
+
+ bool operator== (base_iterator<T> &other) const
+ { return (m_start == other.m_start && m_current == other.m_current
+ && !m_first); }
+
+ bool operator!= (base_iterator<T> &other) const
+ { return !(*this == other); }
+
+ /* Return container of the list_head. */
+ T operator* () const
+ { return m_current - m_offset; }
+
+ private:
+ /* The list_head we are currently at. */
+ CORE_ADDR m_current;
+
+ /* First element of the list. */
+ CORE_ADDR m_start;
+
+ /* Offset of the list_head in the containing struct. */
+ CORE_ADDR m_offset;
+
+ /* For doubly linked lists start == end. Use m_first to track if we
+ just started. */
+ bool m_first = true;
+
+ /* Go to the next (forward) or prev (!forward) element. */
+ base_iterator<T> &next (bool forward = true);
+
+ /* We must always assume that the data we handle is corrupted. Use
+ curr->next->prev == curr (or ->prev->next if goining back). */
+ bool is_valid_next (CORE_ADDR next, bool forward) const;
+ }; /* class base_iterator */
+
+public:
+ /* Constructor for lists starting at address START. */
+ inline lk_list (CORE_ADDR start, const std::string &alias,
+ bool embedded = true);
+
+ /* Constructor for lists starting at variable NAME. */
+ inline lk_list (const std::string &name, const std::string &alias)
+ : lk_list (lk_address (name), alias, is_embedded (name))
+ {}
+
+ typedef base_iterator<CORE_ADDR> iterator;
+ typedef base_iterator<const CORE_ADDR> const_iterator;
+
+ /* Never advance to next element for end () --> embedded = true. */
+ iterator begin () { return iterator (m_start, m_offset, m_embedded); }
+ iterator end () { return iterator (m_start, m_offset, true); }
+
+ const_iterator cbegin () const
+ { return const_iterator (m_start, m_offset, m_embedded); }
+ const_iterator cend () const
+ { return const_iterator (m_start, m_offset, true); }
+
+ const_iterator begin () const
+ { return this->cbegin (); }
+ const_iterator end () const
+ { return this->cend (); }
+
+private:
+ /* First element of the list. */
+ CORE_ADDR m_start;
+
+ /* Offset of the list_head in the containing struct. */
+ CORE_ADDR m_offset;
+
+ /* Is the first list_head embedded in the containing struct, i.e. do we
+ have to consider m_start as a full element of the list or just an entry
+ point? */
+ bool m_embedded;
+
+ /* Check whether variable name is embedded, i.e. is not a list_head. */
+ inline bool is_embedded (const std::string &name) const;
+}; /* class lk_list */
+
+/* see declaration. */
+
+lk_list::lk_list (CORE_ADDR start, const std::string &alias, bool embedded)
+ : m_offset (lk_offset (alias)), m_embedded (embedded)
+{
+ m_start = start;
+ if (m_embedded)
+ m_start += m_offset;
+}
+
+/* see declaration. */
+
+bool
+lk_list::is_embedded (const std::string &name) const
+{
+ symbol *sym = lookup_symbol (name.c_str (), NULL, VAR_DOMAIN, NULL).symbol;
+ type *type = SYMBOL_TYPE (sym);
+
+ return !(TYPE_CODE (type) == TYPE_CODE_STRUCT
+ && streq ("list_head", TYPE_NAME (type)));
+}
+
+/* see declaration. */
+
+template<class T>
+bool
+lk_list::base_iterator<T>::is_valid_next (CORE_ADDR next, bool forward) const
+{
+ if (forward)
+ next += lk_offset ("list_head->prev");
+ else
+ next += lk_offset ("list_head->next");
+
+ return m_current == lk_read_addr (next);
+}
+
+/* see declaration. */
+
+template<class T>
+lk_list::base_iterator<T> &
+lk_list::base_iterator<T>::next (bool forward)
+{
+ CORE_ADDR next;
+
+ if (m_current == m_start && !m_first)
+ return *this;
+
+ m_first = false;
+
+ if (forward)
+ next = lk_read_addr (m_current + lk_offset ("list_head->next"));
+ else
+ next = lk_read_addr (m_current + lk_offset ("list_head->prev"));
+
+ if (!is_valid_next (next, forward))
+ {
+ error (_("Memory corruption detected while iterating list_head at "
+ "0x%s: list_head->%s != list_head."),
+ phex (m_current, lk_builtin_type_size (unsigned_long)),
+ forward ? "next->prev" : "prev->next");
+ }
+
+ m_current = next;
+
+ return *this;
+}
+#endif /* __LK_LIST_H__ */
new file mode 100644
@@ -0,0 +1,1126 @@
+/* Basic Linux kernel support, architecture independent.
+
+ Copyright (C) 2019 Free Software Foundation, Inc.
+
+ This file is part of GDB.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
+
+#include "defs.h"
+
+#include "block.h"
+#include "exceptions.h"
+#include "frame.h"
+#include "gdbarch.h"
+#include "gdbcore.h"
+#include "gdbthread.h"
+#include "gdbtypes.h"
+#include "inferior.h"
+#include "lk-bitmap.h"
+#include "lk-list.h"
+#include "lk-low.h"
+#include "objfiles.h"
+#include "observable.h"
+#include "solib.h"
+#include "target.h"
+#include "value.h"
+
+#include <algorithm>
+
+/* Helper function for declare_address. Returns address of variable NAME on
+ success or -1 on failure. */
+
+static CORE_ADDR
+lk_find_address (const std::string &name)
+{
+ bound_minimal_symbol bmsym = lookup_minimal_symbol (name.c_str (), NULL,
+ NULL);
+ if (bmsym.minsym == NULL)
+ return -1;
+
+ return BMSYMBOL_VALUE_ADDRESS (bmsym);
+}
+
+/* Helper function for try_declare_type. Returns type on success or NULL on
+ failure */
+
+static struct type *
+lk_find_type (const std::string &name)
+{
+ const struct block *global;
+ const struct symbol *sym;
+
+ global = block_global_block(get_selected_block (0));
+ sym = lookup_symbol (name.c_str (), global, STRUCT_DOMAIN, NULL).symbol;
+ if (sym != NULL)
+ return SYMBOL_TYPE (sym);
+
+ /* Chek for "typedef struct { ... } name;"-like definitions. */
+ sym = lookup_symbol (name.c_str (), global, VAR_DOMAIN, NULL).symbol;
+ if (sym == NULL)
+ return NULL;
+
+ struct type *type = check_typedef (SYMBOL_TYPE (sym));
+ if (TYPE_CODE (type) != TYPE_CODE_STRUCT)
+ return NULL;
+
+ return type;
+}
+
+/* Helper function for try_declare_field. Returns lk_symbol with field
+ belonging to TYPE on success or empty on failure. */
+
+static lk_symbol
+lk_find_field (const std::string &f_name, const struct type *type)
+{
+ struct field *field = TYPE_FIELDS (type);
+ struct field *last = field + TYPE_NFIELDS (type);
+
+ while (field != last)
+ {
+ if (streq (field->name, f_name.c_str ()))
+ return lk_symbol (field, FIELD_BYTEPOS (field));
+
+ /* Check if field is defined in anonymous struct within TYPE. */
+ if (streq (field->name, ""))
+ {
+ lk_symbol sym = lk_find_field (f_name, FIELD_TYPE (*field));
+ if (sym.field != NULL)
+ return lk_symbol (sym.field, FIELD_BYTEPOS (field) + sym.offset);
+ }
+ field++;
+ }
+ return lk_symbol ();
+}
+
+/* Helper class to parse C-like field names (type->field1->field2->...) and
+ generate aliases used in lk_ops->m_symbols. */
+
+class lk_field_parser
+{
+public:
+ lk_field_parser (const std::string &alias)
+ : m_alias (alias)
+ {
+ /* The alias must begin with s_name->f_name of the first field. */
+ m_end = m_alias.find (delim);
+ gdb_assert (m_end != std::string::npos);
+ m_end = m_alias.find (delim, m_end + delim.size ());
+ }
+
+ /* Return the struct, i.e. type name of the current field. */
+ std::string s_name () const
+ {
+ if (m_last_type == NULL)
+ return m_alias.substr (0, m_alias.find (delim));
+
+ if (TYPE_CODE (m_last_type) == TYPE_CODE_TYPEDEF)
+ return TYPE_NAME (m_last_type);
+ else
+ return TYPE_NAME (m_last_type);
+ return "invalid";
+ }
+
+ /* Return the field name of the current field. */
+ std::string f_name () const
+ {
+ size_t start;
+
+ if (m_last_type == NULL)
+ start = m_alias.find (delim) + delim.size ();
+ else
+ start = m_start;
+
+ return m_alias.substr (start, m_end - start);
+ }
+
+ /* Return the full name of the current field. */
+ std::string name () const
+ { return s_name () + delim + f_name (); }
+
+ /* Advance to the next field. */
+ lk_field_parser *next ()
+ {
+ gdb_assert (!empty ());
+
+ m_last_type = FIELD_TYPE (*lk_field (name ()));
+ m_start = m_end + delim.size ();
+ m_end = m_alias.find (delim, m_start);
+
+ return this;
+ }
+
+ /* True when all fiels have been parsed. */
+ bool empty () const
+ { return m_end == std::string::npos; }
+
+ /* Return the depth, i.e. number of fields, in m_alias. */
+ unsigned int depth () const
+ {
+ size_t pos = m_alias.find (delim);
+ unsigned int ret = 0;
+
+ while (pos != std::string::npos)
+ {
+ ret ++;
+ pos = m_alias.find (delim, pos + delim.size ());
+ }
+
+ return ret;
+ }
+
+private:
+ /* Alias originally passed to parser. */
+ std::string m_alias;
+
+ /* First index of current field in m_alias. */
+ size_t m_start = 0;
+
+ /* Last index of current field in m_alias. */
+ size_t m_end = 0;
+
+ /* Type of the last field found. Needed to get s_name of embedded
+ fields. */
+ struct type *m_last_type = NULL;
+
+ /* Delemiter used to separate fields. */
+ const std::string delim = "->";
+};
+
+/* Helper functions to read and return basic types at a given ADDRess. */
+
+/* Read and return the integer value at address ADDR. */
+
+int
+lk_read_int (CORE_ADDR addr)
+{
+ size_t int_size = lk_builtin_type_size (int);
+ enum bfd_endian endian = gdbarch_byte_order (current_inferior ()->gdbarch);
+ return read_memory_integer (addr, int_size, endian);
+}
+
+/* Read and return the unsigned integer value at address ADDR. */
+
+unsigned int
+lk_read_uint (CORE_ADDR addr)
+{
+ size_t uint_size = lk_builtin_type_size (unsigned_int);
+ enum bfd_endian endian = gdbarch_byte_order (current_inferior ()->gdbarch);
+ return read_memory_unsigned_integer (addr, uint_size, endian);
+}
+
+/* Read and return the long integer value at address ADDR. */
+
+LONGEST
+lk_read_long (CORE_ADDR addr)
+{
+ size_t long_size = lk_builtin_type_size (long);
+ enum bfd_endian endian = gdbarch_byte_order (current_inferior ()->gdbarch);
+ return read_memory_integer (addr, long_size, endian);
+}
+
+/* Read and return the unsigned long integer value at address ADDR. */
+
+ULONGEST
+lk_read_ulong (CORE_ADDR addr)
+{
+ size_t ulong_size = lk_builtin_type_size (unsigned_long);
+ enum bfd_endian endian = gdbarch_byte_order (current_inferior ()->gdbarch);
+ return read_memory_unsigned_integer (addr, ulong_size, endian);
+}
+
+/* Read and return the address value at address ADDR. */
+
+CORE_ADDR
+lk_read_addr (CORE_ADDR addr)
+{
+ return (CORE_ADDR) lk_read_ulong (addr);
+}
+
+/* Pointer to Linux kernel ops for current target architecture. */
+
+linux_kernel_ops *lk_ops = NULL;
+
+/* See lk-low.h. */
+
+linux_kernel_ops::~linux_kernel_ops ()
+{
+ /* Delete all gdb threads which correspond to exited kernel tasks. */
+ for (auto& task : task_struct_ptids)
+ {
+ struct thread_info *tp = find_thread_ptid (task.second);
+ if (tp)
+ delete_thread (tp);
+ }
+}
+
+/* See lk-low.h. */
+
+bool
+linux_kernel_ops::try_declare_address (const std::string &alias,
+ const std::string &name)
+{
+ if (has_address (alias))
+ return true;
+
+ CORE_ADDR addr = lk_find_address (name);
+ if (addr == -1)
+ return false;
+
+ m_symbols[alias].addr = addr;
+ return true;
+}
+
+/* See lk-low.h. */
+
+void
+linux_kernel_ops::declare_address (const std::string &alias,
+ const std::string &name,
+ const lk_kconfig config)
+{
+ if (!try_declare_address (alias, name))
+ {
+ m_kconfig |= config;
+ warning (_("Missing address: %s"), alias.c_str ());
+ }
+}
+
+/* See lk-low.h. */
+
+void
+linux_kernel_ops::declare_address (const std::string &alias,
+ const std::initializer_list<const std::string> names,
+ const lk_kconfig config)
+{
+ for (auto &name: names)
+ if (try_declare_address (alias, name))
+ break;
+
+ if (!has_address (alias))
+ {
+ m_kconfig |= config;
+ warning (_("Missing address: %s"), alias.c_str ());
+ }
+}
+
+/* See lk-low.h. */
+
+bool
+linux_kernel_ops::try_declare_type (const std::string &alias,
+ const std::string &name)
+{
+ if (has_type (alias))
+ return true;
+
+ struct type *type = lk_find_type (name);
+
+ if (type == NULL)
+ return false;
+
+ m_symbols[unique_type_alias (alias)].type = type;
+
+ /* Also add an entry with the name actually used to m_symbol. Needed to
+ support chained field lookup. */
+ if (alias != name)
+ m_symbols[unique_type_alias (name)].type = type;
+
+ return true;
+}
+
+/* See lk-low.h. */
+
+void
+linux_kernel_ops::declare_type (const std::string &alias,
+ const std::string &name,
+ const lk_kconfig config)
+{
+ if (!try_declare_type (alias, name))
+ {
+ m_kconfig |= config;
+ warning (_("Missing type: %s"), unique_type_alias (alias).c_str ());
+ }
+}
+
+/* See lk-low.h. */
+
+void
+linux_kernel_ops::declare_type (const std::string &alias,
+ const std::initializer_list<const std::string> names,
+ const lk_kconfig config)
+{
+ for (auto &name: names)
+ if (try_declare_type (alias, name))
+ break;
+
+ if (!has_type (alias))
+ {
+ m_kconfig |= config;
+ warning (_("Missing type: %s"), unique_type_alias (alias).c_str ());
+ }
+}
+
+/* See lk-low.h. */
+
+bool
+linux_kernel_ops::try_declare_field (const std::string &orig_alias,
+ const std::string &orig_name)
+{
+ if (has_field (orig_alias))
+ return true;
+
+ lk_field_parser alias (orig_alias);
+ lk_field_parser name (orig_name);
+
+ /* Only allow declaration of one field at a time. */
+ gdb_assert (alias.depth () == 1);
+ gdb_assert (name.depth () == 1);
+
+ if (!try_declare_type (alias.s_name (), name.s_name ()))
+ return false;
+
+ lk_symbol field = lk_find_field (name.f_name (), type (alias.s_name ()));
+ if (field.field == NULL)
+ return false;
+
+ m_symbols[alias.name ()] = field;
+ return true;
+}
+
+/* See lk-low.h. */
+
+void
+linux_kernel_ops::declare_field (const std::string &alias,
+ const std::string &name,
+ const lk_kconfig config)
+{
+ if (!try_declare_field (alias, name))
+ {
+ m_kconfig |= config;
+ warning (_("Missing field: %s"), alias.c_str ());
+ }
+}
+
+/* See lk-low.h. */
+
+void
+linux_kernel_ops::declare_field (const std::string &alias,
+ const std::initializer_list<const std::string> names,
+ const lk_kconfig config)
+{
+ for (auto &name: names)
+ if (try_declare_field (alias, name))
+ break;
+
+ if (!has_field (alias))
+ {
+ m_kconfig |= config;
+ warning (_("Missing field: %s"), alias.c_str ());
+ }
+}
+
+/* See lk-low.h. */
+
+void
+linux_kernel_ops::read_symbols ()
+{
+ if (!m_symbols.empty ())
+ m_symbols.clear ();
+
+ declare_field ("task_struct->tasks", LK_CONFIG_ALWAYS);
+ declare_field ("task_struct->pid", LK_CONFIG_ALWAYS);
+ declare_field ("task_struct->tgid", LK_CONFIG_ALWAYS);
+ declare_field ("task_struct->thread_group", LK_CONFIG_ALWAYS);
+ declare_field ("task_struct->comm", LK_CONFIG_ALWAYS);
+ declare_field ("task_struct->on_cpu", LK_CONFIG_ALWAYS);
+ declare_field ("task_struct->thread", LK_CONFIG_ALWAYS);
+
+ declare_field ("list_head->next", LK_CONFIG_ALWAYS);
+ declare_field ("list_head->prev", LK_CONFIG_ALWAYS);
+
+ declare_field ("rq->curr", LK_CONFIG_ALWAYS);
+ declare_field ("rq->idle", LK_CONFIG_ALWAYS);
+
+ declare_field ("cpumask->bits", LK_CONFIG_ALWAYS);
+
+ declare_address ("init_task", LK_CONFIG_ALWAYS);
+ declare_address ("runqueues", LK_CONFIG_ALWAYS);
+ declare_address ("__per_cpu_offset", LK_CONFIG_ALWAYS);
+
+ declare_address ("cpu_online_mask", {"__cpu_online_mask", /* linux 4.5+ */
+ "cpu_online_bits"}, /* linux -4.4 */
+ LK_CONFIG_ALWAYS);
+
+ declare_address ("high_memory", LK_CONFIG_ALWAYS);
+ declare_address ("_text", LK_CONFIG_ALWAYS);
+
+ arch_read_symbols ();
+
+ if (!ifdef (LK_CONFIG_ALWAYS))
+ error (_("Could not find all symbols needed. Aborting."));
+}
+
+/* See lk-low.h. */
+
+CORE_ADDR
+linux_kernel_ops::offset (const std::string &orig_alias) const
+{
+ lk_field_parser alias (orig_alias);
+ CORE_ADDR ret = m_symbols.at (alias.name ()).offset;
+
+ while (!alias.empty ())
+ ret += m_symbols.at (alias.next ()->name ()).offset;
+
+ return ret;
+}
+
+/* See lk-low.h. */
+
+ptid_t
+linux_kernel_ops::beneath_to_kernel_ptid (ptid_t ptid)
+{
+ for ( const auto &cpu_ptid : cpu_ptid_pair)
+ if (cpu_ptid.second.second == ptid)
+ return cpu_ptid.second.first;
+
+ return ptid;
+}
+
+/* See lk-low.h. */
+
+ptid_t
+linux_kernel_ops::cpu_to_beneath_ptid (unsigned int cpu)
+{
+ if (cpu_ptid_pair.count (cpu) > 0)
+ return cpu_ptid_pair [cpu].second;
+ else
+ return null_ptid;
+}
+
+/* See lk-low.h. */
+
+CORE_ADDR
+linux_kernel_ops::percpu_offset (unsigned int cpu)
+{
+ size_t ulong_size = lk_builtin_type_size (unsigned_long);
+ CORE_ADDR percpu_elt = address ("__per_cpu_offset") + (ulong_size * cpu);
+ return lk_read_addr (percpu_elt);
+}
+
+/* See lk-low.h. */
+
+bool
+linux_kernel_ops::is_kernel_address (CORE_ADDR addr)
+{
+ return (addr >= address ("_text")
+ && addr < address ("high_memory")) ? true : false;
+}
+
+/* See lk-low.h. */
+
+unsigned int
+linux_kernel_ops::is_running_task (CORE_ADDR task)
+{
+ if (cpu_curr_task_struct_addr.empty())
+ {
+ for (unsigned int cpu : lk_cpu_online_mask)
+ {
+ CORE_ADDR rq = lk_address ("runqueues") + lk_ops->percpu_offset (cpu);
+ CORE_ADDR curr = lk_read_addr (rq + lk_offset ("rq->curr"));
+
+ cpu_curr_task_struct_addr [cpu] = curr;
+ }
+ }
+
+ for (auto cpu_task : cpu_curr_task_struct_addr)
+ if (cpu_task.second == task)
+ return cpu_task.first;
+
+ return LK_CPU_INVAL;
+}
+
+/* See lk-low.h. */
+
+unsigned int
+linux_kernel_ops::is_idle_task (CORE_ADDR task)
+{
+ if (cpu_idle_task_struct_addr.empty())
+ {
+ for (unsigned int cpu : lk_cpu_online_mask)
+ {
+ CORE_ADDR rq = lk_address ("runqueues") + lk_ops->percpu_offset (cpu);
+ CORE_ADDR idle = lk_read_addr (rq + lk_offset ("rq->idle"));
+
+ cpu_idle_task_struct_addr [cpu] = idle;
+ }
+ }
+
+ for (auto cpu_task : cpu_idle_task_struct_addr)
+ if (cpu_task.second == task)
+ return cpu_task.first;
+
+ return LK_CPU_INVAL;
+}
+
+/* See lk-low.h. */
+
+CORE_ADDR
+linux_kernel_ops::get_cpu_task_struct_addr (unsigned int task_cpu)
+{
+ if (cpu_curr_task_struct_addr.empty())
+ {
+ for (unsigned int cpu : lk_cpu_online_mask)
+ {
+ CORE_ADDR rq = lk_address ("runqueues") + lk_ops->percpu_offset (cpu);
+ CORE_ADDR curr = lk_read_addr (rq + lk_offset ("rq->curr"));
+ CORE_ADDR idle = lk_read_addr (rq + lk_offset ("rq->idle"));
+
+ cpu_curr_task_struct_addr [cpu] = curr;
+ cpu_idle_task_struct_addr [cpu] = idle;
+ }
+ }
+
+ return cpu_curr_task_struct_addr [task_cpu];
+}
+
+/* See lk-low.h. */
+
+CORE_ADDR
+linux_kernel_ops::get_task_struct_addr (ptid_t ptid)
+{
+ long tid = ptid.tid();
+ if (tid_task_struct.count (tid) > 0)
+ return tid_task_struct [tid];
+ return 0;
+}
+
+/* See lk-low.h. */
+
+long
+linux_kernel_ops::get_task_struct_tid (CORE_ADDR task_struct_addr)
+{
+ long tid = lk_thread_count++;
+ tid_task_struct [tid] = task_struct_addr;
+ return tid;
+}
+
+/* See lk-low.h. */
+
+bool
+linux_kernel_ops::update_tasks ()
+{
+ lk_task_ptid_list now_running_ptids;
+ lk_task_ptid_map new_task_struct_ptids;
+ auto last_cpu_task_struct_addr = cpu_curr_task_struct_addr;
+ int inf_pid = current_inferior ()->pid;
+
+ /* Clear cpu_task_struct_addr and cpu_ptid_pair cache that we created
+ on last stop. */
+ cpu_curr_task_struct_addr.clear();
+ cpu_ptid_pair.clear();
+ tid_task_struct.clear();
+ lk_thread_count = 1;
+
+ /* Iterate over all threads and register target beneath threads. */
+ for (thread_info *tp : all_threads_safe ())
+ {
+ /* Check if this task represents a CPU. */
+ if (tp->ptid.tid () == 0)
+ {
+ //TODO: Can we have a target beneath thread with lwp != cpu ???
+ unsigned int thread_cpu = tp->ptid.lwp() - 1;
+ CORE_ADDR task = get_cpu_task_struct_addr (thread_cpu);
+ int pid = lk_read_int (task + lk_offset ("task_struct->pid"));
+ long tid = get_task_struct_tid (task);
+ ptid_t kernel_ptid (tp->ptid.pid (), pid, tid);
+
+ /* If cpu is not idle and current cpu task has a sleeping
+ gdb thread created against it on last stop. */
+ CORE_ADDR idle = cpu_idle_task_struct_addr [thread_cpu];
+ if (idle != task && task_struct_ptids.count (task) > 0)
+ {
+ /* If idle task has a gdb thread created against it. */
+ long tid = get_task_struct_tid (idle);
+ ptid_t new_ptid (inf_pid, 0, tid);
+ if (task_struct_ptids.count (idle) > 0)
+ {
+ thread_change_ptid (task_struct_ptids [idle], new_ptid);
+ new_task_struct_ptids [idle] = new_ptid;
+ now_running_ptids.push_back(task_struct_ptids [task]);
+ task_struct_ptids.erase(task);
+ }
+ else
+ {
+ thread_change_ptid (task_struct_ptids [task], new_ptid);
+ new_task_struct_ptids [idle] = new_ptid;
+ task_struct_ptids.erase(task);
+ }
+ }
+
+ if (idle == task && task_struct_ptids.count (idle) > 0)
+ {
+ now_running_ptids.push_back(task_struct_ptids [idle]);
+ task_struct_ptids.erase(idle);
+ }
+
+ cpu_ptid_pair [thread_cpu] = std::pair<ptid_t, ptid_t> (kernel_ptid, tp->ptid);
+ thread_change_ptid (tp->ptid, kernel_ptid);
+ }
+ }
+
+ /* Create an updated map of Linux kernel task structs mapping to gdb ptid. */
+ for (CORE_ADDR task : lk_list ("init_task", "task_struct->tasks"))
+ {
+ for (CORE_ADDR thread : lk_list (task, "task_struct->thread_group"))
+ {
+ if (is_running_task (thread) != LK_CPU_INVAL)
+ continue;
+
+ if (is_idle_task (thread) != LK_CPU_INVAL)
+ continue;
+
+ int pid = lk_read_int (thread + lk_offset ("task_struct->pid"));
+ int tid = get_task_struct_tid (thread);
+ ptid_t ptid (inf_pid, pid, tid);
+ new_task_struct_ptids [thread] = ptid;
+
+ /* Check if we created a gdb thread against
+ this task struct address on last stop. */
+ if (task_struct_ptids.count (thread) > 0)
+ {
+ /* Check if ptid needs to be updated. */
+ if (task_struct_ptids [thread] != ptid)
+ thread_change_ptid (task_struct_ptids [thread], ptid);
+ task_struct_ptids.erase(thread);
+ }
+ else
+ {
+ /* If this task was running on last stop, try to replace
+ it with gdb thread that just started running. */
+ bool create_new_thread = true;
+ for (auto last_cpu_task : last_cpu_task_struct_addr)
+ if (last_cpu_task.second == thread
+ && !now_running_ptids.empty())
+ {
+ thread_change_ptid (now_running_ptids.back(), ptid);
+ last_cpu_task_struct_addr.erase(last_cpu_task.first);
+ now_running_ptids.pop_back();
+ create_new_thread = false;
+ break;
+ }
+
+ /* Create a new gdb thread against this kernel task,
+ if thread was not swapped above. */
+ if (create_new_thread)
+ add_thread_with_info (ptid, NULL);
+ }
+ }
+ }
+
+ /* Delete all gdb threads which correspond to exited kernel tasks. */
+ for (auto& task : task_struct_ptids)
+ {
+ struct thread_info *tp = find_thread_ptid (task.second);
+ if (tp)
+ delete_thread (tp);
+ }
+
+ /* Delete all gdb threads which correspond to exited kernel tasks. */
+ for (auto& ptid : now_running_ptids)
+ {
+ struct thread_info *tp = find_thread_ptid (ptid);
+ if (tp)
+ delete_thread (tp);
+ }
+
+ task_struct_ptids = new_task_struct_ptids;
+ lk_threads_refresh = false;
+ return true;
+}
+
+/* Linux kernel target info. */
+
+static const target_info linux_kernel_target_info = {
+ "linux-kernel",
+ N_("Linux kernel support"),
+ N_("Adds support to debug Linux kernel")
+};
+
+/* Definition of linux_kernel_target target_ops derived class. */
+
+class linux_kernel_target final : public target_ops
+{
+public:
+ const target_info &info () const override
+ { return linux_kernel_target_info; }
+
+ strata stratum () const override { return thread_stratum; }
+
+ void close () override;
+ void mourn_inferior () override;
+ void detach (inferior *, int) override;
+
+ void resume (ptid_t, int, enum gdb_signal) override;
+ ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
+
+ bool can_async_p () override;
+ bool is_async_p () override;
+
+ void fetch_registers (struct regcache *, int) override;
+
+ void update_thread_list () override;
+ bool thread_alive (ptid_t ptid) override;
+ const char *pid_to_str (ptid_t) override;
+ const char *thread_name (struct thread_info *) override;
+ const char *extra_thread_info (struct thread_info *) override;
+};
+
+static linux_kernel_target linux_kernel_target_ops;
+
+/* Implementation of linux_kernel_target->close method. */
+
+void
+linux_kernel_target::close ()
+{
+ delete (lk_ops);
+}
+
+/* Implementation of linux_kernel_target->detach method. */
+
+void
+linux_kernel_target::mourn_inferior ()
+{
+ target_ops *beneath = this->beneath ();
+
+ delete (lk_ops);
+ beneath->mourn_inferior ();
+}
+
+/* Implementation of linux_kernel_target->detach method. */
+
+void
+linux_kernel_target::detach (inferior *inf, int from_tty)
+{
+ unpush_target (this);
+
+ reinit_frame_cache ();
+
+ if (from_tty)
+ printf_filtered (_("Linux kernel target detached.\n"));
+
+ beneath ()->detach (inf, from_tty);
+}
+
+/* Implementation of linux_kernel_target->resume method. */
+
+void
+linux_kernel_target::resume (ptid_t ptid, int step, enum gdb_signal sig)
+{
+ /* Update thread ptid to ptid assigned by target beneath. */
+ for ( const auto &cpu_ptid_pair : lk_ops->cpu_ptid_pair)
+ thread_change_ptid (cpu_ptid_pair.second.first, cpu_ptid_pair.second.second);;
+
+ lk_ops->cpu_ptid_pair.clear();
+
+ lk_ops->lk_threads_refresh = true;
+
+ /* Pass the request to the layer beneath. */
+ beneath()->resume (ptid, step, sig);
+}
+
+/* Implementation of linux_kernel_target->wait method. */
+
+ptid_t
+linux_kernel_target::wait (ptid_t ptid, struct target_waitstatus *status,
+ int options)
+{
+ /* Pass the request to the layer beneath. */
+ ptid_t stop_ptid = beneath()->wait (ptid, status, options);
+
+ /* get PC of CPU. */
+ CORE_ADDR pc = regcache_read_pc (get_thread_regcache (stop_ptid));
+
+ /* Check if target is stopped at a kernel address before accessing kernel
+ memory. */
+
+ if (!lk_ops->is_kernel_address(pc))
+ {
+ fprintf_unfiltered (gdb_stdlog, "Target stopped in user space. "
+ "Disabling Linux kernel target\n");
+
+ unpush_target (this);
+
+ reinit_frame_cache ();
+
+ return stop_ptid;
+ }
+
+ /* Mark register cache needs update. */
+ registers_changed ();
+
+ /* Rescan for running tasks and update corresponding ptid. */
+ lk_ops->update_tasks ();
+
+ /* Return thread ptid corresponding to linux kernel target. */
+ return lk_ops->beneath_to_kernel_ptid(stop_ptid);
+}
+
+/* Implementation of linux_kernel_target->can_async_p method. */
+
+bool
+linux_kernel_target::can_async_p ()
+{
+ return false;
+}
+
+/* Implementation of linux_kernel_target->is_async_p method. */
+
+bool
+linux_kernel_target::is_async_p ()
+{
+ return false;
+}
+
+/* Implementation of linux_kernel_target->fetch_registers method. */
+
+void
+linux_kernel_target::fetch_registers (struct regcache *regcache, int regnum)
+{
+ CORE_ADDR task = lk_ops->get_task_struct_addr (regcache->ptid());
+
+ /* Are we called during init? */
+ if (task == 0)
+ return beneath ()->fetch_registers (regcache, regnum);
+
+ unsigned int cpu = lk_ops->is_running_task (task);
+
+ /* Let the target beneath fetch registers of running tasks. */
+ if (cpu != LK_CPU_INVAL)
+ {
+ scoped_restore_regcache_ptid restore_regcache (regcache);
+ regcache->set_ptid (lk_ops->cpu_to_beneath_ptid (cpu));
+ beneath ()->fetch_registers (regcache, regnum);
+ }
+ else
+ {
+ lk_ops->get_registers(task, regcache, regnum);
+
+ /* Mark all registers not found as unavailable. */
+ for (int i = 0; i < gdbarch_num_regs (regcache->arch ()); i++)
+ {
+ if (regcache->get_register_status(i) != REG_VALID)
+ regcache->invalidate (i);
+ }
+ }
+}
+
+/* Implementation of linux_kernel_target->update_thread_list method. */
+
+void
+linux_kernel_target::update_thread_list ()
+{
+ if (lk_ops->lk_threads_refresh)
+ lk_ops->update_tasks ();
+}
+
+/* Implementation of linux_kernel_target->thread_alive method. */
+
+bool
+linux_kernel_target::thread_alive(ptid_t ptid)
+{
+ if (ptid.tid_p ())
+ {
+ if (lk_ops->get_task_struct_addr (ptid) != 0)
+ return true;
+ else
+ return false;
+ }
+ else
+ {
+ /* Pass the request to the target beneath. */
+ return beneath ()->thread_alive (ptid);
+ }
+}
+
+/* Implementation of linux_kernel_target->pid_to_str method. */
+
+const char *
+linux_kernel_target::pid_to_str (ptid_t ptid)
+{
+ CORE_ADDR task = lk_ops->get_task_struct_addr (ptid);
+
+ static std::string str;
+
+ if (lk_ops->is_running_task (task) != LK_CPU_INVAL)
+ str = string_printf ("LK_Thread PID: %li*", ptid.lwp ());
+ else
+ str = string_printf ("LK_Thread PID: %li", ptid.lwp ());
+
+ return str.c_str ();
+}
+
+/* Implementation of linux_kernel_target->thread_name method. */
+
+const char *
+linux_kernel_target::thread_name (struct thread_info *ti)
+{
+ static std::string str (LK_TASK_COMM_LEN, '\0');
+
+ size_t size = std::min ((unsigned int) LK_TASK_COMM_LEN,
+ LK_ARRAY_LEN(lk_field ("task_struct->comm")));
+
+ CORE_ADDR task = lk_ops->get_task_struct_addr (ti->ptid);
+ CORE_ADDR comm = task + lk_offset ("task_struct->comm");
+
+ target_read_memory (comm, (gdb_byte *) str.data (), size);
+ str = string_printf ("%-16s", str.c_str ());
+
+ return str.c_str ();
+}
+
+/* Implementation of linux_kernel_target->extra_thread_info method. */
+
+const char *
+linux_kernel_target::extra_thread_info(struct thread_info *info)
+{
+ CORE_ADDR task = lk_ops->get_task_struct_addr (info->ptid);
+
+ if (task)
+ {
+ char *msg = get_print_cell ();
+
+ snprintf (msg, PRINT_CELL_SIZE, "pid: %li ", info->ptid.lwp());
+
+ return msg;
+ }
+
+ return "LinuxThread";
+}
+
+/* Initializes all private data and pushes the Linux kernel target,
+ if not done already. */
+
+static void
+lk_try_push_target (struct objfile *objfile)
+{
+ struct gdbarch *gdbarch_p = current_inferior ()->gdbarch;
+
+ if (!(gdbarch_p && gdbarch_get_new_lk_ops_p (gdbarch_p)))
+ error (_("Linux kernel debugging not supported on %s."),
+ gdbarch_bfd_arch_info (gdbarch_p)->printable_name);
+
+ lk_ops = gdbarch_get_new_lk_ops (gdbarch_p);
+
+ lk_ops->read_symbols ();
+
+ CORE_ADDR pc = regcache_read_pc (get_thread_regcache (inferior_ptid));
+
+ if (!lk_ops->is_kernel_address(pc))
+ {
+ fprintf_unfiltered (gdb_stdlog, "Target stopped in user space. "
+ "Disabling Linux kernel target\n");
+
+ delete (lk_ops);
+ reinit_frame_cache ();
+ }
+ else if (!target_is_pushed (&linux_kernel_target_ops))
+ {
+ lk_ops->update_tasks ();
+ push_target (&linux_kernel_target_ops);
+ }
+}
+
+/* Check if OBFFILE is a Linux kernel. */
+
+static bool
+lk_is_linux_kernel (struct objfile *objfile)
+{
+ int ok = 0;
+
+ if (objfile == NULL || !(objfile->flags & OBJF_MAINLINE))
+ return false;
+
+ ok += lookup_minimal_symbol ("linux_banner", NULL, objfile).minsym != NULL;
+ ok += lookup_minimal_symbol ("_stext", NULL, objfile).minsym != NULL;
+ ok += lookup_minimal_symbol ("_etext", NULL, objfile).minsym != NULL;
+
+ return (ok > 2);
+}
+
+/* The open method of Linux kernel target. */
+
+static void
+lk_open (const char *args, int from_tty)
+{
+ if (target_is_pushed (&linux_kernel_target_ops))
+ {
+ printf_unfiltered (_("Linux kernel target already pushed. Aborting\n"));
+ return;
+ }
+
+ for (objfile *objfile : current_program_space->objfiles ())
+ {
+ if (lk_is_linux_kernel (objfile)
+ && inferior_ptid.pid () != 0)
+ {
+ lk_try_push_target (objfile);
+ return;
+ }
+ }
+
+ printf_unfiltered (_("Could not find a valid Linux kernel object file. "
+ "Aborting.\n"));
+}
+
+/* Function for new_objfile observer. */
+
+static void
+lk_observer_new_objfile (struct objfile *objfile)
+{
+ if (lk_is_linux_kernel (objfile) && inferior_ptid.pid () != 0)
+ lk_try_push_target (objfile);
+}
+
+/* Function for inferior_created observer. */
+
+static void
+lk_observer_inferior_created (struct target_ops *ops, int from_tty)
+{
+ if (inferior_ptid.pid () == 0)
+ return;
+
+ for (objfile *objfile : current_program_space->objfiles ())
+ {
+ if (lk_is_linux_kernel (objfile))
+ {
+ lk_try_push_target (objfile);
+ return;
+ }
+ }
+}
+
+/* Module startup initialization function, automatically called by
+ init.c. */
+
+void
+_initialize_linux_kernel (void)
+{
+ add_target (linux_kernel_target_info, lk_open);
+
+ /* Notice when object files get loaded and unloaded. */
+ gdb::observers::new_objfile.attach (lk_observer_new_objfile);
+
+ /* Add Linux kernel target to inferior_created event chain.
+ This is needed to enable the Linux kernel target on "attach". */
+ gdb::observers::inferior_created.attach (lk_observer_inferior_created);
+}
new file mode 100644
@@ -0,0 +1,354 @@
+/* Basic Linux kernel support, architecture independent.
+
+ Copyright (C) 2019 Free Software Foundation, Inc.
+
+ This file is part of GDB.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
+
+#ifndef __LK_LOW_H__
+#define __LK_LOW_H__
+
+#include "gdbtypes.h"
+#include "target.h"
+
+#include <map>
+#include <unordered_map>
+
+/* Copied constants defined in Linux kernel. */
+#define LK_TASK_COMM_LEN 16
+#define LK_BITS_PER_BYTE 8
+
+/* Definitions used in linux kernel target. */
+#define LK_CPU_INVAL -1U
+
+/* Helper functions to read and return a value at a given ADDRess. */
+extern int lk_read_int (CORE_ADDR addr);
+extern unsigned int lk_read_uint (CORE_ADDR addr);
+extern LONGEST lk_read_long (CORE_ADDR addr);
+extern ULONGEST lk_read_ulong (CORE_ADDR addr);
+extern CORE_ADDR lk_read_addr (CORE_ADDR addr);
+
+/* Enum to track the config options used to build the kernel. Whenever
+ a symbol is declared (in linux_kernel_ops::{arch_}read_symbols) which
+ only exists if the kernel was built with a certain config option an entry
+ has to be added here. */
+enum lk_kconfig_values
+{
+ LK_CONFIG_ALWAYS = 1 << 0,
+ LK_CONFIG_SMT = 1 << 1,
+ LK_CONFIG_MODULES = 1 << 2,
+};
+DEF_ENUM_FLAGS_TYPE (enum lk_kconfig_values, lk_kconfig);
+
+/* We use the following convention for PTIDs:
+
+ ptid->pid = inferiors PID
+ ptid->lwp = PID from task_stuct
+ ptid->tid = TID generated by Linux kernel target.
+
+ Linux kernel target generates a unique integer TID against each
+ task_struct. These TIDs map to their corresponding task_struct
+ addresses stored in a lk_tid_task_map.
+
+ We update PTIDs of running tasks to the ones generated by Linux
+ kernel target in order to map them over their corresponding
+ task_struct addresses. These PTIDs are reverted on target resume. */
+
+/* A std::vector that can hold a list of PTIDs. */
+typedef std::vector <ptid_t> lk_task_ptid_list;
+
+/* A std::map that uses task struct address as key and PTID as value. */
+typedef std::map <CORE_ADDR, ptid_t> lk_task_ptid_map;
+
+/* A std::map that uses task struct address as key and TID as value. */
+typedef std::map <long, CORE_ADDR> lk_tid_task_map;
+
+/* A std::map that uses cpu number as key and task struct address as value. */
+typedef std::map<unsigned int, CORE_ADDR> lk_cpu_task_struct_map;
+
+/* A std::map that uses cpu numbers as key and a std::pair of PTIDs as value. */
+typedef std::map<unsigned int, std::pair <ptid_t, ptid_t>> lk_task_ptid_pair_map;
+
+/* Cache for the value of a symbol. Used in linux_kernel_ops->m_symbols. */
+
+union lk_symbol
+{
+ CORE_ADDR addr;
+ struct type *type;
+ struct
+ {
+ struct field *field;
+ CORE_ADDR offset;
+ };
+
+ lk_symbol () {field = NULL; offset = 0;}
+ lk_symbol (struct field *f, CORE_ADDR o) {field = f, offset = o;}
+};
+
+class linux_kernel_ops
+{
+public:
+ linux_kernel_ops ()
+ {}
+
+ /* Non default destructor as we need to clean-up gdb threads
+ created by this linux_kernel_ops object. */
+ virtual ~linux_kernel_ops ();
+
+ /* Read registers from the target and supply their content to regcache. */
+ virtual void get_registers (CORE_ADDR task, struct regcache *regcache,
+ int regnum) = 0;
+
+ /* Return the per_cpu_offset of cpu CPU. Default uses __per_cpu_offset
+ array to determine the offset. */
+ virtual CORE_ADDR percpu_offset (unsigned int cpu);
+
+ /* Verify if we are stopped at a direct mapped address in kernel space. */
+ virtual bool is_kernel_address (CORE_ADDR addr);
+
+ /* Whether the cached Linux thread list needs refreshing */
+ int lk_threads_refresh = true;
+
+ /* Return a previously declared address with key ALIAS.
+ Throws internal_error if requested symbol was not declared first. */
+ CORE_ADDR address (const std::string &alias) const
+ {
+ gdb_assert (has_address (alias));
+ return m_symbols.at (alias).addr;
+ }
+
+ /* Same like address but for types. */
+ struct type *type (const std::string &alias) const
+ {
+ gdb_assert (has_type (alias));
+ return m_symbols.at (unique_type_alias(alias)).type;
+ }
+
+ /* Same like address but for fields. */
+ struct field *field (const std::string &alias) const
+ {
+ gdb_assert (has_field (alias));
+ return m_symbols.at (alias).field;
+ }
+
+ /* Checks whether address ALIAS exists in m_symbols. */
+ bool has_address (const std::string &alias) const
+ { return has_symbol (alias); }
+
+ /* Same like has_address but for types. */
+ bool has_type (const std::string &alias) const
+ { return has_symbol (unique_type_alias (alias)); }
+
+ /* Same like has_address but for fields. */
+ bool has_field (const std::string &alias) const
+ { return has_symbol (alias); }
+
+ /* Return offset of field ALIAS (in byte). */
+ CORE_ADDR offset (const std::string &alias) const;
+
+ /* Check whether the kernel was build using this config option. */
+ bool ifdef (lk_kconfig config) const
+ { return !(m_kconfig & config); }
+
+ /* Holds Linux kernel target tids as key and
+ corresponding task struct address as value. */
+ lk_tid_task_map tid_task_struct;
+
+ /* Maps cpu number to linux kernel and target beneath ptids. */
+ lk_task_ptid_pair_map cpu_ptid_pair;
+
+ /* Maps task_struct addresses to ptids. */
+ lk_task_ptid_map task_struct_ptids;
+
+ /* Holds cpu to current running task struct address mappings. */
+ lk_cpu_task_struct_map cpu_curr_task_struct_addr;
+
+ /* Holds cpu to current idle task struct address mappings. */
+ lk_cpu_task_struct_map cpu_idle_task_struct_addr;
+
+ /* Update task_struct_ptids map by walking the task_structs starting from
+ init_task. */
+ bool update_task_struct_ptids ();
+
+ /* Update map of running tasks and create a mapping between
+ target beneath PTIDs and their linux kernel specific PTIDs. */
+ bool update_tasks ();
+
+ /* Declare and initialize all symbols needed. Must be called _after_
+ symbol tables were initialized. */
+ void read_symbols ();
+
+ /* Returns target beneath ptid corresponding to cpu number. */
+ ptid_t cpu_to_beneath_ptid (unsigned int cpu);
+
+ /* Returns linux kernel PTID which maps to passed target beanth PTID. */
+ ptid_t beneath_to_kernel_ptid (ptid_t ptid);
+
+ /* Tests if a given task TASK is running. Returns either the cpu-id
+ if running or LK_CPU_INVAL if not. */
+ unsigned int is_running_task (CORE_ADDR task);
+
+ /* Tests if a given task is idle task on current run queue.
+ Returns either the cpu number or LK_CPU_INVAL. */
+ unsigned int is_idle_task (CORE_ADDR task);
+
+ /* Returns Linux kernel task struct against the ptid. */
+ CORE_ADDR get_task_struct_addr (ptid_t ptid);
+
+ /* Returns Linux kernel thread tid against task struct address. */
+ long get_task_struct_tid (CORE_ADDR task);
+
+ /* Return task struct address of currently running task on cpu. */
+ CORE_ADDR get_cpu_task_struct_addr (unsigned int cpu);
+
+protected:
+ /* Virtual method to allow architectures to declare their own symbols.
+ Called by read_symbols. */
+ virtual void arch_read_symbols ()
+ {}
+
+ /* Helper function to declare_address. Returns true when symbol NAME
+ using key ALIAS was successfully declared, false otherwise. Try not to
+ use this function directly but use declare_address instead. */
+ bool try_declare_address (const std::string &alias,
+ const std::string &names);
+
+ /* Same like try_declare_address but for types. */
+ bool try_declare_type (const std::string &alias, const std::string &name);
+
+ /* Same like try_declare_address but for fields. */
+ bool try_declare_field (const std::string &alias, const std::string &name);
+
+ /* Same like try_declare_field but with NAME = ALIAS. */
+ bool try_declare_field (const std::string &name)
+ { return try_declare_field (name, name); }
+
+ /* Declare symbol NAME using key ALIAS. If no symbol NAME could be found
+ mark CONFIG as missing. */
+ void declare_address (const std::string &alias, const std::string &name,
+ const lk_kconfig config);
+
+ /* Same like above but with NAME = ALIAS. */
+ void declare_address (const std::string &name, const lk_kconfig config)
+ { declare_address (name, name, config); }
+
+ /* Same like above but only mark CONFIG as missing if none of the symbols
+ in NAMES could be found. */
+ void declare_address (const std::string &alias,
+ const std::initializer_list<const std::string> names,
+ const lk_kconfig config);
+
+ /* See declare_address. */
+ void declare_type (const std::string &alias, const std::string &name,
+ const lk_kconfig config);
+
+ /* See declare_address. */
+ void declare_type (const std::string &name, const lk_kconfig config)
+ { declare_type (name, name, config); }
+
+ /* See declare_address. */
+ void declare_type (const std::string &alias,
+ const std::initializer_list<const std::string> names,
+ const lk_kconfig config);
+
+ /* See declare_address. */
+ void declare_field (const std::string &alias, const std::string &name,
+ const lk_kconfig kconfig);
+
+ /* See declare_address. */
+ void declare_field (const std::string &name, const lk_kconfig kconfig)
+ { declare_field (name, name, kconfig); }
+
+ /* See declare_address. */
+ void declare_field (const std::string &alias,
+ const std::initializer_list <const std::string> names,
+ const lk_kconfig config);
+
+private:
+ /* The configuration used to build the kernel. To make the implementation
+ easier m_kconfig is inverse, i.e. it tracks the _missing_ config options
+ not the set ones. */
+ lk_kconfig m_kconfig = 0;
+
+ /* Linux kernel target thread id counter. Refreshed on every stop/resume. */
+ long lk_thread_count = 1;
+
+ /* Collection of all declared symbols (addresses, fields etc.). */
+ std::unordered_map<std::string, union lk_symbol> m_symbols;
+
+ /* Returns unique alias for struct ALIAS. */
+ const std::string unique_type_alias (const std::string &alias) const
+ {
+ std::string prefix ("struct ");
+ if (startswith (alias.c_str (), prefix.c_str ()))
+ return alias;
+ return prefix + alias;
+ }
+
+ /* Check if m_symbols contains ALIAS. */
+ bool has_symbol (const std::string &alias) const
+ { return m_symbols.count (alias) != 0; }
+};
+
+extern linux_kernel_ops *lk_ops;
+
+/* Short hand access to frequently used lk_ops methods. */
+
+static inline CORE_ADDR
+lk_address (const std::string &alias)
+{
+ return lk_ops->address (alias);
+}
+
+static inline struct field *
+lk_field (const std::string &alias)
+{
+ return lk_ops->field (alias);
+}
+
+static inline CORE_ADDR
+lk_offset (const std::string &alias)
+{
+ return lk_ops->offset (alias);
+}
+
+/* Short hand access to current gdbarchs builtin types and their
+ size (in byte). For TYPE replace spaces " " by underscore "_", e.g.
+ "unsigned int" => "unsigned_int". */
+#define lk_builtin_type(type) \
+ (builtin_type (current_inferior ()->gdbarch)->builtin_##type)
+
+#define lk_builtin_type_size(type) \
+ (lk_builtin_type (type)->length)
+
+/* If field FIELD is an array returns its length (in #elements). */
+#define LK_ARRAY_LEN(field) \
+ (FIELD_SIZE (field) / FIELD_TARGET_SIZE (field))
+
+/* Additional access macros to fields in the style of gdbtypes.h */
+/* Returns the size of field FIELD (in bytes). If FIELD is an array returns
+ the size of the whole array. */
+#define FIELD_SIZE(field) \
+ TYPE_LENGTH (check_typedef (FIELD_TYPE ((*field))))
+
+/* Returns the size of the target type of field FIELD (in bytes). If FIELD is
+ an array returns the size of its elements. */
+#define FIELD_TARGET_SIZE(field) \
+ TYPE_LENGTH (check_typedef (TYPE_TARGET_TYPE (FIELD_TYPE ((*field)))))
+
+#define FIELD_BYTEPOS(field) \
+ (FIELD_BITPOS (*field) / LK_BITS_PER_BYTE)
+
+#endif /* __LK_LOW_H__ */
@@ -65,6 +65,7 @@ static const struct osabi_names gdb_osabi_names[] =
{ "GNU/Hurd", NULL },
{ "Solaris", NULL },
{ "GNU/Linux", "linux(-gnu[^-]*)?" },
+ { "Linux kernel", NULL },
{ "FreeBSD", NULL },
{ "NetBSD", NULL },
{ "OpenBSD", NULL },