@@ -491,6 +491,15 @@ linux-amd64-ipa.o: linux-amd64-ipa.c
amd64-linux-ipa.o: amd64-linux.c
$(IPAGENT_COMPILE) $<
$(POSTCOMPILE)
+linux-ppc-ipa.o: linux-ppc-ipa.c
+ $(IPAGENT_COMPILE) $<
+ $(POSTCOMPILE)
+powerpc-64l-ipa.o: powerpc-64l.c
+ $(IPAGENT_COMPILE) $<
+ $(POSTCOMPILE)
+powerpc-32l-ipa.o: powerpc-32l.c
+ $(IPAGENT_COMPILE) $<
+ $(POSTCOMPILE)
tdesc-ipa.o: tdesc.c
$(IPAGENT_COMPILE) $<
$(POSTCOMPILE)
@@ -245,6 +245,7 @@ case "${target}" in
srv_linux_usrregs=yes
srv_linux_regsets=yes
srv_linux_thread_db=yes
+ ipa_obj="powerpc-64l-ipa.o powerpc-32l-ipa.o linux-ppc-ipa.o"
;;
powerpc-*-lynxos*) srv_regobj="powerpc-32.o"
srv_tgtobj="lynx-low.o lynx-ppc-low.o"
new file mode 100644
@@ -0,0 +1,120 @@
+/* GNU/Linux/PowerPC specific low level interface, for the in-process
+ agent library for GDB.
+
+ Copyright (C) 2010-2015 Free Software Foundation, Inc.
+
+ This file is part of GDB.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
+
+#include "server.h"
+#include "tracepoint.h"
+
+#if defined __PPC64__
+void init_registers_powerpc_64l (void);
+extern const struct target_desc *tdesc_powerpc_64l;
+#define REGSZ 8
+#else
+void init_registers_powerpc_32l (void);
+extern const struct target_desc *tdesc_powerpc_32l;
+#define REGSZ 4
+#endif
+
+/* These macros define the position of registers in the buffer collected
+ by the fast tracepoint jump pad. */
+#define FT_CR_PC 0
+#define FT_CR_R0 1
+#define FT_CR_CR 33
+#define FT_CR_XER 34
+#define FT_CR_LR 35
+#define FT_CR_CTR 36
+#define FT_CR_GPR(n) (FT_CR_R0 + (n))
+
+static const int ppc_ft_collect_regmap[] = {
+ /* GPRs */
+ FT_CR_GPR (0), FT_CR_GPR (1), FT_CR_GPR (2),
+ FT_CR_GPR (3), FT_CR_GPR (4), FT_CR_GPR (5),
+ FT_CR_GPR (6), FT_CR_GPR (7), FT_CR_GPR (8),
+ FT_CR_GPR (9), FT_CR_GPR (10), FT_CR_GPR (11),
+ FT_CR_GPR (12), FT_CR_GPR (13), FT_CR_GPR (14),
+ FT_CR_GPR (15), FT_CR_GPR (16), FT_CR_GPR (17),
+ FT_CR_GPR (18), FT_CR_GPR (19), FT_CR_GPR (20),
+ FT_CR_GPR (21), FT_CR_GPR (22), FT_CR_GPR (23),
+ FT_CR_GPR (24), FT_CR_GPR (25), FT_CR_GPR (26),
+ FT_CR_GPR (27), FT_CR_GPR (28), FT_CR_GPR (29),
+ FT_CR_GPR (30), FT_CR_GPR (31),
+ /* FPRs - not collected. */
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ FT_CR_PC, /* PC */
+ -1, /* MSR */
+ FT_CR_CR, /* CR */
+ FT_CR_LR, /* LR */
+ FT_CR_CTR, /* CTR */
+ FT_CR_XER, /* XER */
+ -1, /* FPSCR */
+};
+
+#define PPC_NUM_FT_COLLECT_GREGS \
+ (sizeof (ppc_ft_collect_regmap) / sizeof(ppc_ft_collect_regmap[0]))
+
+/* Supply registers collected by the fast tracepoint jump pad.
+ BUF is the second argument we pass to gdb_collect in jump pad. */
+
+void
+supply_fast_tracepoint_registers (struct regcache *regcache,
+ const unsigned char *buf)
+{
+ int i;
+
+ for (i = 0; i < PPC_NUM_FT_COLLECT_GREGS; i++)
+ {
+ if (ppc_ft_collect_regmap[i] == -1)
+ continue;
+ supply_register (regcache, i,
+ ((char *) buf)
+ + ppc_ft_collect_regmap[i] * REGSZ);
+ }
+}
+
+/* Return the value of register REGNUM. RAW_REGS is collected buffer
+ by jump pad. This function is called by emit_reg. */
+
+ULONGEST __attribute__ ((visibility("default"), used))
+gdb_agent_get_raw_reg (const unsigned char *raw_regs, int regnum)
+{
+ if (regnum >= PPC_NUM_FT_COLLECT_GREGS)
+ return 0;
+ if (ppc_ft_collect_regmap[regnum] == -1)
+ return 0;
+
+ return *(ULONGEST *) (raw_regs
+ + ppc_ft_collect_regmap[regnum] * REGSZ);
+}
+
+/* Initialize ipa_tdesc and others. */
+
+void
+initialize_low_tracepoint (void)
+{
+#if defined __PPC64__
+ init_registers_powerpc_64l ();
+ ipa_tdesc = tdesc_powerpc_64l;
+#else
+ init_registers_powerpc_32l ();
+ ipa_tdesc = tdesc_powerpc_32l;
+#endif
+}
@@ -24,6 +24,8 @@
#include <asm/ptrace.h>
#include "nat/ppc-linux.h"
+#include "ax.h"
+#include "tracepoint.h"
static unsigned long ppc_hwcap;
@@ -512,6 +514,1266 @@ ppc_breakpoint_at (CORE_ADDR where)
return 0;
}
+/* Implement supports_z_point_type target-ops.
+ Returns true if type Z_TYPE breakpoint is supported.
+
+ Handling software breakpoint at server side, so tracepoints
+ and breakpoints can be inserted at the same location. */
+
+static int
+ppc_supports_z_point_type (char z_type)
+{
+ switch (z_type)
+ {
+ case Z_PACKET_SW_BP:
+ return 1;
+ case Z_PACKET_HW_BP:
+ case Z_PACKET_WRITE_WP:
+ case Z_PACKET_ACCESS_WP:
+ default:
+ return 0;
+ }
+}
+
+/* Implement insert_point target-ops.
+ Returns 0 on success, -1 on failure and 1 on unsupported. */
+
+static int
+ppc_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
+ int size, struct raw_breakpoint *bp)
+{
+ switch (type)
+ {
+ case raw_bkpt_type_sw:
+ return insert_memory_breakpoint (bp);
+
+ case raw_bkpt_type_hw:
+ case raw_bkpt_type_write_wp:
+ case raw_bkpt_type_access_wp:
+ default:
+ /* Unsupported. */
+ return 1;
+ }
+}
+
+/* Implement remove_point target-ops.
+ Returns 0 on success, -1 on failure and 1 on unsupported. */
+
+static int
+ppc_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
+ int size, struct raw_breakpoint *bp)
+{
+ switch (type)
+ {
+ case raw_bkpt_type_sw:
+ return remove_memory_breakpoint (bp);
+
+ case raw_bkpt_type_hw:
+ case raw_bkpt_type_write_wp:
+ case raw_bkpt_type_access_wp:
+ default:
+ /* Unsupported. */
+ return 1;
+ }
+}
+
+/* Put a 32-bit INSN instruction in BUF in target endian. */
+
+static int
+put_i32 (unsigned char *buf, uint32_t insn)
+{
+ if (__BYTE_ORDER == __LITTLE_ENDIAN)
+ {
+ buf[3] = (insn >> 24) & 0xff;
+ buf[2] = (insn >> 16) & 0xff;
+ buf[1] = (insn >> 8) & 0xff;
+ buf[0] = insn & 0xff;
+ }
+ else
+ {
+ buf[0] = (insn >> 24) & 0xff;
+ buf[1] = (insn >> 16) & 0xff;
+ buf[2] = (insn >> 8) & 0xff;
+ buf[3] = insn & 0xff;
+ }
+
+ return 4;
+}
+
+/* return a 32-bit value in target endian in BUF. */
+
+__attribute__((unused)) /* Maybe unused due to conditional compilation. */
+static uint32_t
+get_i32 (unsigned char *buf)
+{
+ uint32_t r;
+
+ if (__BYTE_ORDER == __LITTLE_ENDIAN)
+ r = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
+ else
+ r = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
+
+ return r;
+}
+
+/* Generate a ds-form instruction in BUF and return the number of bytes written
+
+ 0 6 11 16 30 32
+ | OPCD | RST | RA | DS |XO| */
+
+__attribute__((unused)) /* Maybe unused due to conditional compilation. */
+static int
+gen_ds_form (unsigned char *buf, int opcd, int rst, int ra, int ds, int xo)
+{
+ uint32_t insn = opcd << 26;
+
+ insn |= (rst << 21) | (ra << 16) | (ds & 0xfffc) | (xo & 0x3);
+ return put_i32 (buf, insn);
+}
+
+/* Followings are frequently used ds-form instructions. */
+
+#define GEN_STD(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 0)
+#define GEN_STDU(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 1)
+#define GEN_LD(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 0)
+#define GEN_LDU(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 1)
+
+/* Generate a d-form instruction in BUF.
+
+ 0 6 11 16 32
+ | OPCD | RST | RA | D | */
+
+static int
+gen_d_form (unsigned char *buf, int opcd, int rst, int ra, int si)
+{
+ uint32_t insn = opcd << 26;
+
+ insn |= (rst << 21) | (ra << 16) | (si & 0xffff);
+ return put_i32 (buf, insn);
+}
+
+/* Followings are frequently used d-form instructions. */
+
+#define GEN_ADDI(buf, rt, ra, si) gen_d_form (buf, 14, rt, ra, si)
+#define GEN_ADDIS(buf, rt, ra, si) gen_d_form (buf, 15, rt, ra, si)
+#define GEN_LI(buf, rt, si) GEN_ADDI (buf, rt, 0, si)
+#define GEN_LIS(buf, rt, si) GEN_ADDIS (buf, rt, 0, si)
+#define GEN_ORI(buf, rt, ra, si) gen_d_form (buf, 24, rt, ra, si)
+#define GEN_ORIS(buf, rt, ra, si) gen_d_form (buf, 25, rt, ra, si)
+#define GEN_LWZ(buf, rt, ra, si) gen_d_form (buf, 32, rt, ra, si)
+#define GEN_STW(buf, rt, ra, si) gen_d_form (buf, 36, rt, ra, si)
+
+/* Generate a xfx-form instruction in BUF and return the number of bytes
+ written.
+
+ 0 6 11 21 31 32
+ | OPCD | RST | RI | XO |/| */
+
+static int
+gen_xfx_form (unsigned char *buf, int opcd, int rst, int ri, int xo)
+{
+ uint32_t insn = opcd << 26;
+ unsigned int n = ((ri & 0x1f) << 5) | ((ri >> 5) & 0x1f);
+
+ insn |= (rst << 21) | (n << 11) | (xo << 1);
+ return put_i32 (buf, insn);
+}
+
+/* Followings are frequently used xfx-form instructions. */
+
+#define GEN_MFSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 339)
+#define GEN_MTSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 467)
+
+/* Generate a x-form instruction in BUF and return the number of bytes written.
+
+ 0 6 11 16 21 31 32
+ | OPCD | RST | RA | RB | XO |RC| */
+
+static int
+gen_x_form (unsigned char *buf, int opcd, int rst, int ra, int rb,
+ int xo, int rc)
+{
+ uint32_t insn = opcd << 26;
+
+ insn |= (rst << 21) | (ra << 16) | (rb << 11) | (xo << 1) | rc;
+ return put_i32 (buf, insn);
+}
+
+/* Followings are frequently used x-form instructions. */
+
+#define GEN_OR(buf, ra, rs, rb) gen_x_form (buf, 31, rs, ra, rb, 444, 0)
+#define GEN_MR(buf, ra, rs) GEN_OR (buf, ra, rs, rs)
+#define GEN_LWARX(buf, rt, ra, rb) gen_x_form (buf, 31, rt, ra, rb, 20, 0)
+#define GEN_STWCX(buf, rs, ra, rb) gen_x_form (buf, 31, rs, ra, rb, 150, 1)
+/* Assume bf = cr7. */
+#define GEN_CMPW(buf, ra, rb) gen_x_form (buf, 31, 28, ra, rb, 0, 0)
+
+/* Generate a md-form instruction in BUF and return the number of bytes written.
+
+ 0 6 11 16 21 27 30 31 32
+ | OPCD | RS | RA | sh | mb | XO |sh|Rc| */
+
+static int
+gen_md_form (unsigned char *buf, int opcd, int rs, int ra, int sh, int mb,
+ int xo, int rc)
+{
+ uint32_t insn = opcd << 26;
+ unsigned int n = ((mb & 0x1f) << 1) | ((mb >> 5) & 0x1);
+ unsigned int sh0_4 = sh & 0x1f;
+ unsigned int sh5 = (sh >> 5) & 1;
+
+ insn |= (rs << 21) | (ra << 16) | (sh0_4 << 11) | (n << 5) | (sh5 << 1)
+ | (xo << 2);
+ return put_i32 (buf, insn);
+}
+
+/* The following are frequently used md-form instructions. */
+
+#define GEN_RLDICL(buf, ra, rs ,sh, mb) \
+ gen_md_form (buf, 30, rs, ra, sh, mb, 0, 0)
+#define GEN_RLDICR(buf, ra, rs ,sh, mb) \
+ gen_md_form (buf, 30, rs, ra, sh, mb, 1, 0)
+
+/* Generate a i-form instruction in BUF and return the number of bytes written.
+
+ 0 6 30 31 32
+ | OPCD | LI |AA|LK| */
+
+static int
+gen_i_form (unsigned char *buf, int opcd, int li, int aa, int lk)
+{
+ uint32_t insn = opcd << 26;
+
+ insn |= (li & 0x3fffffc) | (aa & 1) | (lk & 1);
+ return put_i32 (buf, insn);
+}
+
+/* The following are frequently used i-form instructions. */
+
+#define GEN_B(buf, li) gen_i_form (buf, 18, li, 0, 0)
+#define GEN_BL(buf, li) gen_i_form (buf, 18, li, 0, 1)
+
+/* Generate a b-form instruction in BUF and return the number of bytes written.
+
+ 0 6 11 16 30 31 32
+ | OPCD | BO | BI | BD |AA|LK| */
+
+static int
+gen_b_form (unsigned char *buf, int opcd, int bo, int bi, int bd,
+ int aa, int lk)
+{
+ uint32_t insn = opcd << 26;
+
+ insn |= (bo << 21) | (bi << 16) | (bd & 0xfffc) | (aa & 1) | (lk & 1);
+ return put_i32 (buf, insn);
+}
+
+/* The following are frequently used b-form instructions. */
+/* Assume bi = cr7. */
+#define GEN_BNE(buf, bd) gen_b_form (buf, 16, 0x4, (7 << 2) | 2, bd, 0 ,0)
+
+/* GEN_LOAD and GEN_STORE generate 64- or 32-bit load/store for ppc64 or ppc32
+ respectively. They are primary used for save/restore GPRs in jump-pad,
+ not used for bytecode compiling. */
+
+#if defined __PPC64__
+#define GEN_LOAD(buf, rt, ra, si) GEN_LD (buf, rt, ra, si)
+#define GEN_STORE(buf, rt, ra, si) GEN_STD (buf, rt, ra, si)
+#else
+#define GEN_LOAD(buf, rt, ra, si) GEN_LWZ (buf, rt, ra, si)
+#define GEN_STORE(buf, rt, ra, si) GEN_STW (buf, rt, ra, si)
+#endif
+
+/* Generate a sequence of instructions to load IMM in the register REG.
+ Write the instructions in BUF and return the number of bytes written. */
+
+static int
+gen_limm (unsigned char *buf, int reg, uint64_t imm)
+{
+ int i = 0;
+
+ if ((imm >> 8) == 0)
+ {
+ /* li reg, imm[7:0] */
+ i += GEN_LI (buf + i, reg, imm);
+ }
+ else if ((imm >> 16) == 0)
+ {
+ /* li reg, 0
+ ori reg, reg, imm[15:0] */
+ i += GEN_LI (buf + i, reg, 0);
+ i += GEN_ORI (buf + i, reg, reg, imm);
+ }
+ else if ((imm >> 32) == 0)
+ {
+ /* lis reg, imm[31:16]
+ ori reg, reg, imm[15:0]
+ rldicl reg, reg, 0, 32 */
+ i += GEN_LIS (buf + i, reg, (imm >> 16) & 0xffff);
+ i += GEN_ORI (buf + i, reg, reg, imm & 0xffff);
+ i += GEN_RLDICL (buf + i, reg, reg, 0, 32);
+ }
+ else
+ {
+ /* lis reg, <imm[63:48]>
+ ori reg, reg, <imm[48:32]>
+ rldicr reg, reg, 32, 31
+ oris reg, reg, <imm[31:16]>
+ ori reg, reg, <imm[15:0]> */
+ i += GEN_LIS (buf + i, reg, ((imm >> 48) & 0xffff));
+ i += GEN_ORI (buf + i, reg, reg, ((imm >> 32) & 0xffff));
+ i += GEN_RLDICR (buf + i, reg, reg, 32, 31);
+ i += GEN_ORIS (buf + i, reg, reg, ((imm >> 16) & 0xffff));
+ i += GEN_ORI (buf + i, reg, reg, (imm & 0xffff));
+ }
+
+ return i;
+}
+
+/* Generate a sequence for atomically exchange at location LOCK.
+ This code sequence clobbers r6, r7, r8, r9. */
+
+static int
+gen_atomic_xchg (unsigned char *buf, CORE_ADDR lock, int old_value, int new_value)
+{
+ int i = 0;
+ const int r_lock = 6;
+ const int r_old = 7;
+ const int r_new = 8;
+ const int r_tmp = 9;
+
+ /*
+ 1: lwsync
+ 2: lwarx TMP, 0, LOCK
+ cmpwi TMP, OLD
+ bne 1b
+ stwcx. NEW, 0, LOCK
+ bne 2b */
+
+ i += gen_limm (buf + i, r_lock, lock);
+ i += gen_limm (buf + i, r_new, new_value);
+ i += gen_limm (buf + i, r_old, old_value);
+
+ i += put_i32 (buf + i, 0x7c2004ac); /* lwsync */
+ i += GEN_LWARX (buf + i, r_tmp, 0, r_lock);
+ i += GEN_CMPW (buf + i, r_tmp, r_old);
+ i += GEN_BNE (buf + i, -12);
+ i += GEN_STWCX (buf + i, r_new, 0, r_lock);
+ i += GEN_BNE (buf + i, -16);
+
+ return i;
+}
+
+/* Generate a sequence of instructions for calling a function
+ at address of FN. Return the number of bytes are written in BUF.
+
+ FIXME: For ppc64be, FN should be the address to the function
+ descriptor, so we should load 8(FN) to R2, 16(FN) to R11
+ and then call the function-entry at 0(FN). However, current GDB
+ implicitly convert the address from function descriptor to the actual
+ function address. See qSymbol handling in remote.c. Although it
+ seems we can successfully call however, things go wrong when callee
+ trying to access global variable. */
+
+static int
+gen_call (unsigned char *buf, CORE_ADDR fn)
+{
+ int i = 0;
+
+ /* Must be called by r12 for caller to calculate TOC address. */
+ i += gen_limm (buf + i, 12, fn);
+ i += GEN_MTSPR (buf + i, 12, 9); /* mtctr r12 */
+ i += put_i32 (buf + i, 0x4e800421); /* bctrl */
+
+ return i;
+}
+
+/* Implement supports_tracepoints hook of target_ops.
+ Always return true. */
+
+static int
+ppc_supports_tracepoints (void)
+{
+#if defined (__PPC64__) && _CALL_ELF == 2
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+/* Implement install_fast_tracepoint_jump_pad of target_ops.
+ See target.h for details. */
+
+static int
+ppc_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
+ CORE_ADDR collector,
+ CORE_ADDR lockaddr,
+ ULONGEST orig_size,
+ CORE_ADDR *jump_entry,
+ CORE_ADDR *trampoline,
+ ULONGEST *trampoline_size,
+ unsigned char *jjump_pad_insn,
+ ULONGEST *jjump_pad_insn_size,
+ CORE_ADDR *adjusted_insn_addr,
+ CORE_ADDR *adjusted_insn_addr_end,
+ char *err)
+{
+ unsigned char buf[1028];
+ int i, j, offset;
+ CORE_ADDR buildaddr = *jump_entry;
+#if __PPC64__
+ const int rsz = 8;
+#else
+ const int rsz = 4;
+#endif
+ const int frame_size = (((37 * rsz) + 112) + 0xf) & ~0xf;
+
+ /* Stack frame layout for this jump pad,
+
+ High CTR -8(sp)
+ LR -16(sp)
+ XER
+ CR
+ R31
+ R29
+ ...
+ R1
+ R0
+ Low PC/<tpaddr>
+
+ The code flow of thie jump pad,
+
+ 1. Save GPR and SPR
+ 3. Adjust SP
+ 4. Prepare argument
+ 5. Call gdb_collector
+ 6. Restore SP
+ 7. Restore GPR and SPR
+ 8. Build a jump for back to the program
+ 9. Copy/relocate original instruction
+ 10. Build a jump for replacing orignal instruction. */
+
+ i = 0;
+ for (j = 0; j < 32; j++)
+ i += GEN_STORE (buf + i, j, 1, (-rsz * 36 + j * rsz));
+
+ /* Save PC<tpaddr> */
+ i += gen_limm (buf + i, 3, tpaddr);
+ i += GEN_STORE (buf + i, 3, 1, (-rsz * 37));
+
+ /* Save CR, XER, LR, and CTR. */
+ i += put_i32 (buf + i, 0x7c600026); /* mfcr r3 */
+ i += GEN_MFSPR (buf + i, 4, 1); /* mfxer r4 */
+ i += GEN_MFSPR (buf + i, 5, 8); /* mflr r5 */
+ i += GEN_MFSPR (buf + i, 6, 9); /* mfctr r6 */
+ i += GEN_STORE (buf + i, 3, 1, -4 * rsz); /* std r3, -32(r1) */
+ i += GEN_STORE (buf + i, 4, 1, -3 * rsz); /* std r4, -24(r1) */
+ i += GEN_STORE (buf + i, 5, 1, -2 * rsz); /* std r5, -16(r1) */
+ i += GEN_STORE (buf + i, 6, 1, -1 * rsz); /* std r6, -8(r1) */
+
+ /* Adjust stack pointer. */
+ i += GEN_ADDI (buf + i, 1, 1, -frame_size); /* subi r1,r1,FRAME_SIZE */
+
+ /* Setup arguments to collector. */
+
+ /* Set r4 to collected registers. */
+ i += GEN_ADDI (buf + i, 4, 1, frame_size - rsz * 37);
+ /* Set r3 to TPOINT. */
+ i += gen_limm (buf + i, 3, tpoint);
+
+ i += gen_atomic_xchg (buf + i, lockaddr, 0, 1);
+ /* Call to collector. */
+ i += gen_call (buf + i, collector);
+ i += gen_atomic_xchg (buf + i, lockaddr, 1, 0);
+
+ /* Restore stack and registers. */
+ i += GEN_ADDI (buf + i, 1, 1, frame_size); /* addi r1,r1,FRAME_SIZE */
+ i += GEN_LOAD (buf + i, 3, 1, -4 * rsz); /* ld r3, -32(r1) */
+ i += GEN_LOAD (buf + i, 4, 1, -3 * rsz); /* ld r4, -24(r1) */
+ i += GEN_LOAD (buf + i, 5, 1, -2 * rsz); /* ld r5, -16(r1) */
+ i += GEN_LOAD (buf + i, 6, 1, -1 * rsz); /* ld r6, -8(r1) */
+ i += put_i32 (buf + i, 0x7c6ff120); /* mtcr r3 */
+ i += GEN_MTSPR (buf + i, 4, 1); /* mtxer r4 */
+ i += GEN_MTSPR (buf + i, 5, 8); /* mtlr r5 */
+ i += GEN_MTSPR (buf + i, 6, 9); /* mtctr r6 */
+ for (j = 0; j < 32; j++)
+ i += GEN_LOAD (buf + i, j, 1, (-rsz * 36 + j * rsz));
+
+ /* Flush instructions to inferior memory. */
+ write_inferior_memory (buildaddr, buf, i);
+
+ /* Now, insert the original instruction to execute in the jump pad. */
+ *adjusted_insn_addr = buildaddr + i;
+ *adjusted_insn_addr_end = *adjusted_insn_addr;
+ relocate_instruction (adjusted_insn_addr_end, tpaddr);
+
+ /* Verify the relocation size. If should be 4 for normal copy, or 8
+ for some conditional branch. */
+ if ((*adjusted_insn_addr_end - *adjusted_insn_addr == 0)
+ || (*adjusted_insn_addr_end - *adjusted_insn_addr > 8))
+ {
+ sprintf (err, "E.Unexpected instruction length = %d"
+ "when relocate instruction.",
+ (int) (*adjusted_insn_addr_end - *adjusted_insn_addr));
+ return 1;
+ }
+
+ buildaddr = *adjusted_insn_addr_end;
+ i = 0;
+ /* Finally, write a jump back to the program. */
+ offset = (tpaddr + 4) - (buildaddr + i);
+ if (offset >= (1 << 26) || offset < -(1 << 26))
+ {
+ sprintf (err, "E.Jump back from jump pad too far from tracepoint "
+ "(offset 0x%x > 26-bit).", offset);
+ return 1;
+ }
+ /* b <tpaddr+4> */
+ i += GEN_B (buf + i, offset);
+ write_inferior_memory (buildaddr, buf, i);
+
+ /* The jump pad is now built. Wire in a jump to our jump pad. This
+ is always done last (by our caller actually), so that we can
+ install fast tracepoints with threads running. This relies on
+ the agent's atomic write support. */
+ offset = *jump_entry - tpaddr;
+ if (offset >= (1 << 25) || offset < -(1 << 25))
+ {
+ sprintf (err, "E.Jump back from jump pad too far from tracepoint "
+ "(offset 0x%x > 26-bit).", offset);
+ return 1;
+ }
+ /* b <jentry> */
+ i += GEN_B (jjump_pad_insn, offset);
+ *jjump_pad_insn_size = 4;
+
+ *jump_entry = buildaddr + i;
+
+ gdb_assert (i < sizeof (buf));
+ return 0;
+}
+
+/* Returns the minimum instruction length for installing a tracepoint. */
+
+static int
+ppc_get_min_fast_tracepoint_insn_len ()
+{
+ return 4;
+}
+
+#if __PPC64__
+
+static void
+emit_insns (unsigned char *buf, int n)
+{
+ write_inferior_memory (current_insn_ptr, buf, n);
+ current_insn_ptr += n;
+}
+
+#define EMIT_ASM(NAME, INSNS) \
+ do \
+ { \
+ extern unsigned char start_bcax_ ## NAME []; \
+ extern unsigned char end_bcax_ ## NAME []; \
+ emit_insns (start_bcax_ ## NAME, \
+ end_bcax_ ## NAME - start_bcax_ ## NAME); \
+ __asm__ (".section .text.__ppcbcax\n\t" \
+ "start_bcax_" #NAME ":\n\t" \
+ INSNS "\n\t" \
+ "end_bcax_" #NAME ":\n\t" \
+ ".previous\n\t"); \
+ } while (0)
+
+/*
+
+ Bytecode execution stack frame
+
+ | Parameter save area (SP + 48) [8 doublewords]
+ | TOC save area (SP + 40)
+ | link editor doubleword (SP + 32)
+ | compiler doubleword (SP + 24) save TOP here during call
+ | LR save area (SP + 16)
+ | CR save area (SP + 8)
+ SP' -> +- Back chain (SP + 0)
+ | Save r31
+ | Save r30
+ | Save r4 for *value
+ | Save r3 for CTX
+ r30 -> +- Bytecode execution stack
+ |
+ | 64-byte (8 doublewords) at initial. Expand stack as needed.
+ |
+ r31 -> +-
+
+ initial frame size
+ = (48 + 8 * 8) + (4 * 8) + 64
+ = 112 + 96
+ = 208
+
+ r31 is the frame-base for restoring stack-pointer.
+ r30 is the stack-pointer for bytecode machine.
+ It should point to next-empty, so we can use LDU for pop.
+ r3 is used for cache of TOP value.
+ It is the first argument, pointer to CTX.
+ r4 is the second argument, pointer to the result.
+ SP+24 is used for saving TOP during call.
+
+ Note:
+ * To restore stack at epilogue
+ => sp = r31 + 208
+ * To check stack is big enough for bytecode execution.
+ => r30 - 8 > SP + 112
+ * To return execution result.
+ => 0(r4) = TOP
+
+ */
+
+enum { bc_framesz = 208 };
+
+/* Emit prologue in inferior memory. See above comments. */
+
+static void
+ppc64_emit_prologue (void)
+{
+ EMIT_ASM (ppc64_prologue,
+ "mflr 0 \n"
+ "std 0, 16(1) \n"
+ "std 31, -8(1) \n"
+ "std 30, -16(1) \n"
+ "std 4, -24(1) \n"
+ "std 3, -32(1) \n"
+ "addi 30, 1, -40 \n"
+ "li 3, 0 \n"
+ "stdu 1, -208(1) \n"
+ "mr 31, 1 \n");
+}
+
+/* Emit epilogue in inferior memory. See above comments. */
+
+static void
+ppc64_emit_epilogue (void)
+{
+ EMIT_ASM (ppc64_epilogue,
+ /* Restore SP. */
+ "addi 1, 31, 208 \n"
+ /* *result = TOP */
+ "ld 4, -24(1) \n"
+ "std 3, 0(4) \n"
+ /* Return 0 for no-erro. */
+ "li 3, 0 \n"
+ "ld 0, 16(1) \n"
+ "ld 31, -8(1) \n"
+ "ld 30, -16(1) \n"
+ "mtlr 0 \n"
+ "blr \n");
+}
+
+/* TOP = stack[--sp] + TOP */
+
+static void
+ppc64_emit_add (void)
+{
+ EMIT_ASM (ppc64_add,
+ "ldu 4, 8(30) \n"
+ "add 3, 4, 3 \n");
+}
+
+/* TOP = stack[--sp] - TOP */
+
+static void
+ppc64_emit_sub (void)
+{
+ EMIT_ASM (ppc64_sub,
+ "ldu 4, 8(30) \n"
+ "sub 3, 4, 3 \n");
+}
+
+/* TOP = stack[--sp] * TOP */
+
+static void
+ppc64_emit_mul (void)
+{
+ EMIT_ASM (ppc64_mul,
+ "ldu 4, 8(30) \n"
+ "mulld 3, 4, 3 \n");
+}
+
+/* TOP = stack[--sp] << TOP */
+
+static void
+ppc64_emit_lsh (void)
+{
+ EMIT_ASM (ppc64_lsh,
+ "ldu 4, 8(30) \n"
+ "sld 3, 4, 3 \n");
+}
+
+/* Top = stack[--sp] >> TOP
+ (Arithmetic shift right) */
+
+static void
+ppc64_emit_rsh_signed (void)
+{
+ EMIT_ASM (ppc64_rsha,
+ "ldu 4, 8(30) \n"
+ "srad 3, 4, 3 \n");
+}
+
+/* Top = stack[--sp] >> TOP
+ (Logical shift right) */
+
+static void
+ppc64_emit_rsh_unsigned (void)
+{
+ EMIT_ASM (ppc64_rshl,
+ "ldu 4, 8(30) \n"
+ "srd 3, 4, 3 \n");
+}
+
+/* Emit code for signed-extension specified by ARG. */
+
+static void
+ppc64_emit_ext (int arg)
+{
+ switch (arg)
+ {
+ case 8:
+ EMIT_ASM (ppc64_ext8, "extsb 3, 3 \n");
+ break;
+ case 16:
+ EMIT_ASM (ppc64_ext16, "extsh 3, 3 \n");
+ break;
+ case 32:
+ EMIT_ASM (ppc64_ext32, "extsw 3, 3 \n");
+ break;
+ default:
+ emit_error = 1;
+ }
+}
+
+/* Emit code for zero-extension specified by ARG. */
+
+static void
+ppc64_emit_zero_ext (int arg)
+{
+ switch (arg)
+ {
+ case 8:
+ EMIT_ASM (ppc64_zext8, "rldicl 3,3,0,56 \n");
+ break;
+ case 16:
+ EMIT_ASM (ppc64_zext16, "rldicl 3,3,0,48 \n");
+ break;
+ case 32:
+ EMIT_ASM (ppc64_zext32, "rldicl 3,3,0,32 \n");
+ break;
+ default:
+ emit_error = 1;
+ }
+}
+
+/* TOP = !TOP
+ i.e., TOP = (TOP == 0) ? 1 : 0; */
+
+static void
+ppc64_emit_log_not (void)
+{
+ EMIT_ASM (ppc64_log_not,
+ "cntlzd 3, 3 \n"
+ "srdi 3, 3, 6 \n");
+}
+
+/* TOP = stack[--sp] & TOP */
+
+static void
+ppc64_emit_bit_and (void)
+{
+ EMIT_ASM (ppc64_bit_and,
+ "ldu 4, 8(30) \n"
+ "and 3, 4, 3 \n");
+}
+
+/* TOP = stack[--sp] | TOP */
+
+static void
+ppc64_emit_bit_or (void)
+{
+ EMIT_ASM (ppc64_bit_or,
+ "ldu 4, 8(30) \n"
+ "or 3, 4, 3 \n");
+}
+
+/* TOP = stack[--sp] ^ TOP */
+
+static void
+ppc64_emit_bit_xor (void)
+{
+ EMIT_ASM (ppc64_bit_xor,
+ "ldu 4, 8(30) \n"
+ "xor 3, 4, 3 \n");
+}
+
+/* TOP = ~TOP
+ i.e., TOP = ~(TOP | TOP) */
+
+static void
+ppc64_emit_bit_not (void)
+{
+ EMIT_ASM (ppc64_bit_not,
+ "nor 3, 3, 3 \n");
+}
+
+/* TOP = stack[--sp] == TOP */
+
+static void
+ppc64_emit_equal (void)
+{
+ EMIT_ASM (ppc64_equal,
+ "ldu 4, 8(30) \n"
+ "xor 3, 3, 4 \n"
+ "cntlzd 3, 3 \n"
+ "srdi 3, 3, 6 \n");
+}
+
+/* TOP = stack[--sp] < TOP
+ (Signed comparison) */
+
+static void
+ppc64_emit_less_signed (void)
+{
+ EMIT_ASM (ppc64_less_signed,
+ "ldu 4, 8(30) \n"
+ "cmpd 7, 4, 3 \n"
+ "mfocrf 3, 1 \n"
+ "rlwinm 3, 3, 29, 31, 31 \n");
+}
+
+/* TOP = stack[--sp] < TOP
+ (Unsigned comparison) */
+
+static void
+ppc64_emit_less_unsigned (void)
+{
+ EMIT_ASM (ppc64_less_unsigned,
+ "ldu 4, 8(30) \n"
+ "cmpld 7, 4, 3 \n"
+ "mfocrf 3, 1 \n"
+ "rlwinm 3, 3, 29, 31, 31 \n");
+}
+
+/* Access the memory address in TOP in size of SIZE.
+ Zero-extend the read value. */
+
+static void
+ppc64_emit_ref (int size)
+{
+ switch (size)
+ {
+ case 1:
+ EMIT_ASM (ppc64_ref8, "lbz 3, 0(3) \n");
+ break;
+ case 2:
+ EMIT_ASM (ppc64_ref16, "lhz 3, 0(3) \n");
+ break;
+ case 4:
+ EMIT_ASM (ppc64_ref32, "lwz 3, 0(3) \n");
+ break;
+ case 8:
+ EMIT_ASM (ppc64_ref64, "ld 3, 0(3) \n");
+ break;
+ }
+}
+
+/* TOP = NUM */
+
+static void
+ppc64_emit_const (LONGEST num)
+{
+ unsigned char buf[5 * 4];
+ int i = 0;
+
+ i += gen_limm (buf + i, 3, num);
+
+ write_inferior_memory (current_insn_ptr, buf, i);
+ current_insn_ptr += i;
+}
+
+/* Set TOP to the value of register REG by calling get_raw_reg function
+ with two argument, collected buffer and register number. */
+
+static void
+ppc64_emit_reg (int reg)
+{
+ unsigned char buf[8 * 8];
+ int i = 0;
+
+ i += GEN_LD (buf + i, 3, 31, bc_framesz - 32);
+ i += GEN_LD (buf + i, 3, 3, 48);
+ i += GEN_LI (buf + i, 4, reg); /* mr r4, reg */
+ i += gen_call (buf + i, get_raw_reg_func_addr ());
+
+ write_inferior_memory (current_insn_ptr, buf, i);
+ current_insn_ptr += i;
+}
+
+/* TOP = stack[--sp] */
+
+static void
+ppc64_emit_pop (void)
+{
+ EMIT_ASM (ppc64_pop, "ldu 3, 8(30) \n");
+}
+
+/* stack[sp++] = TOP
+
+ Because we may use up bytecode stack, expand 8 doublewords more
+ if needed. */
+
+static void
+ppc64_emit_stack_flush (void)
+{
+ /* Make sure bytecode stack is big enough before push.
+ Otherwise, expand 64-byte more. */
+
+ EMIT_ASM (ppc64_stack_flush,
+ " std 3, 0(30) \n"
+ " addi 4, 30, -(112 + 8) \n"
+ " cmpd 7, 4, 1 \n"
+ " bgt 1f \n"
+ " ld 4, 0(1) \n"
+ " addi 1, 1, -64 \n"
+ " std 4, 0(1) \n"
+ "1:addi 30, 30, -8 \n"
+ );
+}
+
+/* Swap TOP and stack[sp-1] */
+
+static void
+ppc64_emit_swap (void)
+{
+ EMIT_ASM (ppc64_swap,
+ "ld 4, 8(30) \n"
+ "std 3, 8(30) \n"
+ "mr 3, 4 \n");
+}
+
+/* Discard N elements in the stack. */
+
+static void
+ppc64_emit_stack_adjust (int n)
+{
+ unsigned char buf[4];
+ int i = 0;
+
+ i += GEN_ADDI (buf, 30, 30, n << 3); /* addi r30, r30, (n << 3) */
+
+ write_inferior_memory (current_insn_ptr, buf, i);
+ current_insn_ptr += i;
+ gdb_assert (i <= sizeof (buf));
+}
+
+/* Call function FN. */
+
+static void
+ppc64_emit_call (CORE_ADDR fn)
+{
+ unsigned char buf[8 * 4];
+ int i = 0;
+
+ i += gen_call (buf + i, fn);
+
+ write_inferior_memory (current_insn_ptr, buf, i);
+ current_insn_ptr += i;
+ gdb_assert (i <= sizeof (buf));
+}
+
+/* FN's prototype is `LONGEST(*fn)(int)'.
+ TOP = fn (arg1)
+ */
+
+static void
+ppc64_emit_int_call_1 (CORE_ADDR fn, int arg1)
+{
+ unsigned char buf[8 * 4];
+ int i = 0;
+
+ /* Setup argument. arg1 is a 16-bit value. */
+ i += GEN_LI (buf, 3, arg1); /* li r3, arg1 */
+ i += gen_call (buf + i, fn);
+
+ write_inferior_memory (current_insn_ptr, buf, i);
+ current_insn_ptr += i;
+ gdb_assert (i <= sizeof (buf));
+}
+
+/* FN's prototype is `void(*fn)(int,LONGEST)'.
+ fn (arg1, TOP)
+
+ TOP should be preserved/restored before/after the call. */
+
+static void
+ppc64_emit_void_call_2 (CORE_ADDR fn, int arg1)
+{
+ unsigned char buf[12 * 4];
+ int i = 0;
+
+ /* Save TOP */
+ i += GEN_STD (buf, 3, 31, bc_framesz + 24);
+
+ /* Setup argument. arg1 is a 16-bit value. */
+ i += GEN_MR (buf + i, 4, 3); /* mr r4, r3 */
+ i += GEN_LI (buf + i, 3, arg1); /* li r3, arg1 */
+ i += gen_call (buf + i, fn);
+
+ /* Restore TOP */
+ i += GEN_LD (buf, 3, 31, bc_framesz + 24);
+
+ write_inferior_memory (current_insn_ptr, buf, i);
+ current_insn_ptr += i;
+ gdb_assert (i <= sizeof (buf));
+}
+
+/* Note in the following goto ops:
+
+ When emitting goto, the target address is later relocated by
+ write_goto_address. OFFSET_P is the offset of the branch instruction
+ in the code sequence, and SIZE_P is how to relocate the instruction,
+ recognized by ppc_write_goto_address. In current implementation,
+ SIZE can be either 24 or 14 for branch of conditional-branch instruction.
+ */
+
+/* If TOP is true, goto somewhere. Otherwise, just fall-through. */
+
+static void
+ppc64_emit_if_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM (ppc64_if_goto,
+ "mr 4, 3 \n"
+ "ldu 3, 8(30) \n"
+ "cmpdi 7, 4, 0 \n"
+ "1:bne 7, 1b \n");
+
+ if (offset_p)
+ *offset_p = 12;
+ if (size_p)
+ *size_p = 14;
+}
+
+/* Unconditional goto. */
+
+static void
+ppc64_emit_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM (ppc64_goto,
+ "1:b 1b \n");
+
+ if (offset_p)
+ *offset_p = 0;
+ if (size_p)
+ *size_p = 24;
+}
+
+/* Goto if stack[--sp] == TOP */
+
+static void
+ppc64_emit_eq_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM (ppc64_eq_goto,
+ "ldu 4, 8(30) \n"
+ "cmpd 7, 4, 3 \n"
+ "ldu 3, 8(30) \n"
+ "1:beq 7, 1b \n");
+
+ if (offset_p)
+ *offset_p = 12;
+ if (size_p)
+ *size_p = 14;
+}
+
+/* Goto if stack[--sp] != TOP */
+
+static void
+ppc64_emit_ne_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM (ppc64_ne_goto,
+ "ldu 4, 8(30) \n"
+ "cmpd 7, 4, 3 \n"
+ "ldu 3, 8(30) \n"
+ "1:bne 7, 1b \n");
+
+ if (offset_p)
+ *offset_p = 12;
+ if (size_p)
+ *size_p = 14;
+}
+
+/* Goto if stack[--sp] < TOP */
+
+static void
+ppc64_emit_lt_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM (ppc64_lt_goto,
+ "ldu 4, 8(30) \n"
+ "cmpd 7, 4, 3 \n"
+ "ldu 3, 8(30) \n"
+ "1:blt 7, 1b \n");
+
+ if (offset_p)
+ *offset_p = 12;
+ if (size_p)
+ *size_p = 14;
+}
+
+/* Goto if stack[--sp] <= TOP */
+
+static void
+ppc64_emit_le_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM (ppc64_le_goto,
+ "ldu 4, 8(30) \n"
+ "cmpd 7, 4, 3 \n"
+ "ldu 3, 8(30) \n"
+ "1:ble 7, 1b \n");
+
+ if (offset_p)
+ *offset_p = 12;
+ if (size_p)
+ *size_p = 14;
+}
+
+/* Goto if stack[--sp] > TOP */
+
+static void
+ppc64_emit_gt_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM (ppc64_gt_goto,
+ "ldu 4, 8(30) \n"
+ "cmpd 7, 4, 3 \n"
+ "ldu 3, 8(30) \n"
+ "1:bgt 7, 1b \n");
+
+ if (offset_p)
+ *offset_p = 12;
+ if (size_p)
+ *size_p = 14;
+}
+
+/* Goto if stack[--sp] >= TOP */
+
+static void
+ppc64_emit_ge_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM (ppc64_ge_goto,
+ "ldu 4, 8(30) \n"
+ "cmpd 7, 4, 3 \n"
+ "ldu 3, 8(30) \n"
+ "1:bge 7, 1b \n");
+
+ if (offset_p)
+ *offset_p = 12;
+ if (size_p)
+ *size_p = 14;
+}
+
+/* Relocate previous emitted branch instruction. FROM is the address
+ of the branch instruction, TO is the goto target address, and SIZE
+ if the value we set by *SIZE_P before. Currently, it is either
+ 24 or 14 of branch and conditional-branch instruction. */
+
+static void
+ppc_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
+{
+ int rel = to - from;
+ uint32_t insn;
+ int opcd;
+ unsigned char buf[4];
+
+ read_inferior_memory (from, buf, 4);
+ insn = get_i32 (buf);
+ opcd = (insn >> 26) & 0x3f;
+
+ switch (size)
+ {
+ case 14:
+ if (opcd != 16)
+ emit_error = 1;
+ insn = (insn & ~0xfffc) | (rel & 0xfffc);
+ break;
+ case 24:
+ if (opcd != 18)
+ emit_error = 1;
+ insn = (insn & ~0x3fffffc) | (rel & 0x3fffffc);
+ break;
+ default:
+ emit_error = 1;
+ }
+
+ put_i32 (buf, insn);
+ write_inferior_memory (from, buf, 4);
+}
+
+/* Vector of emit ops for PowerPC64. */
+
+static struct emit_ops ppc64_emit_ops_vector =
+{
+ ppc64_emit_prologue,
+ ppc64_emit_epilogue,
+ ppc64_emit_add,
+ ppc64_emit_sub,
+ ppc64_emit_mul,
+ ppc64_emit_lsh,
+ ppc64_emit_rsh_signed,
+ ppc64_emit_rsh_unsigned,
+ ppc64_emit_ext,
+ ppc64_emit_log_not,
+ ppc64_emit_bit_and,
+ ppc64_emit_bit_or,
+ ppc64_emit_bit_xor,
+ ppc64_emit_bit_not,
+ ppc64_emit_equal,
+ ppc64_emit_less_signed,
+ ppc64_emit_less_unsigned,
+ ppc64_emit_ref,
+ ppc64_emit_if_goto,
+ ppc64_emit_goto,
+ ppc_write_goto_address,
+ ppc64_emit_const,
+ ppc64_emit_call,
+ ppc64_emit_reg,
+ ppc64_emit_pop,
+ ppc64_emit_stack_flush,
+ ppc64_emit_zero_ext,
+ ppc64_emit_swap,
+ ppc64_emit_stack_adjust,
+ ppc64_emit_int_call_1,
+ ppc64_emit_void_call_2,
+ ppc64_emit_eq_goto,
+ ppc64_emit_ne_goto,
+ ppc64_emit_lt_goto,
+ ppc64_emit_le_goto,
+ ppc64_emit_gt_goto,
+ ppc64_emit_ge_goto
+};
+
+/* Implementation of emit_ops target ops. */
+
+__attribute__ ((unused))
+static struct emit_ops *
+ppc_emit_ops (void)
+{
+ return &ppc64_emit_ops_vector;
+}
+#endif
+
+/* Returns true for supporting range-stepping. */
+
+static int
+ppc_supports_range_stepping (void)
+{
+ return 1;
+}
+
/* Provide only a fill function for the general register set. ps_lgetregs
will use this for NPTL support. */
@@ -687,16 +1949,31 @@ struct linux_target_ops the_low_target = {
ppc_set_pc,
(const unsigned char *) &ppc_breakpoint,
ppc_breakpoint_len,
- NULL,
- 0,
+ NULL, /* breakpoint_reinsert_addr */
+ 0, /* decr_pc_after_break */
ppc_breakpoint_at,
- NULL, /* supports_z_point_type */
- NULL,
- NULL,
- NULL,
- NULL,
+ ppc_supports_z_point_type, /* supports_z_point_type */
+ ppc_insert_point,
+ ppc_remove_point,
+ NULL, /* stopped_by_watchpoint */
+ NULL, /* stopped_data_address */
ppc_collect_ptrace_register,
ppc_supply_ptrace_register,
+ NULL, /* siginfo_fixup */
+ NULL, /* linux_new_process */
+ NULL, /* linux_new_thread */
+ NULL, /* linux_prepare_to_resume */
+ NULL, /* linux_process_qsupported */
+ ppc_supports_tracepoints,
+ NULL, /* get_thread_area */
+ ppc_install_fast_tracepoint_jump_pad,
+#if __PPC64__
+ ppc_emit_ops,
+#else
+ NULL, /* Use interpreter for ppc32. */
+#endif
+ ppc_get_min_fast_tracepoint_insn_len,
+ ppc_supports_range_stepping,
};
void
@@ -83,6 +83,9 @@
#include "features/rs6000/powerpc-e500.c"
#include "features/rs6000/rs6000.c"
+#include "ax.h"
+#include "ax-gdb.h"
+
/* Determine if regnum is an SPE pseudo-register. */
#define IS_SPE_PSEUDOREG(tdep, regnum) ((tdep)->ppc_ev0_regnum >= 0 \
&& (regnum) >= (tdep)->ppc_ev0_regnum \
@@ -966,6 +969,21 @@ rs6000_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *bp_addr,
return little_breakpoint;
}
+/* Return true if ADDR is a valid address for tracepoint. Set *ISZIE
+ to the number of bytes the target should copy elsewhere for the
+ tracepoint. */
+
+static int
+ppc_fast_tracepoint_valid_at (struct gdbarch *gdbarch,
+ CORE_ADDR addr, int *isize, char **msg)
+{
+ if (isize)
+ *isize = gdbarch_max_insn_length (gdbarch);
+ if (msg)
+ *msg = NULL;
+ return 1;
+}
+
/* Instruction masks for displaced stepping. */
#define BRANCH_MASK 0xfc000000
#define BP_MASK 0xFC0007FE
@@ -3139,6 +3157,7 @@ struct rs6000_frame_cache
static struct rs6000_frame_cache *
rs6000_frame_cache (struct frame_info *this_frame, void **this_cache)
{
+ volatile struct gdb_exception ex;
struct rs6000_frame_cache *cache;
struct gdbarch *gdbarch = get_frame_arch (this_frame);
struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
@@ -3153,7 +3172,14 @@ rs6000_frame_cache (struct frame_info *this_frame, void **this_cache)
(*this_cache) = cache;
cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
- func = get_frame_func (this_frame);
+ TRY_CATCH (ex, RETURN_MASK_ERROR)
+ {
+ func = get_frame_func (this_frame);
+ }
+ if (ex.reason < 0 && ex.error != NOT_AVAILABLE_ERROR)
+ throw_exception (ex);
+ return (*this_cache);
+
pc = get_frame_pc (this_frame);
skip_prologue (gdbarch, func, pc, &fdata);
@@ -3323,6 +3349,11 @@ rs6000_frame_this_id (struct frame_info *this_frame, void **this_cache,
{
struct rs6000_frame_cache *info = rs6000_frame_cache (this_frame,
this_cache);
+ if (info->base == 0 && info->initial_sp == 0)
+ {
+ (*this_id) = frame_id_build_unavailable_stack (0);
+ return;
+ }
/* This marks the outermost frame. */
if (info->base == 0)
return;
@@ -3679,6 +3710,8 @@ bfd_uses_spe_extensions (bfd *abfd)
#define PPC_LK(insn) PPC_BIT (insn, 31)
#define PPC_TX(insn) PPC_BIT (insn, 31)
#define PPC_LEV(insn) PPC_FIELD (insn, 20, 7)
+#define PPC_LI(insn) (PPC_SEXT (PPC_FIELD (insn, 6, 24), 24) << 2)
+#define PPC_BD(insn) (PPC_SEXT (PPC_FIELD (insn, 16, 14), 14) << 2)
#define PPC_XT(insn) ((PPC_TX (insn) << 5) | PPC_T (insn))
#define PPC_XER_NB(xer) (xer & 0x7f)
@@ -5332,6 +5365,146 @@ UNKNOWN_OP:
return 0;
}
+/* Copy the instruction from OLDLOC to *TO, and update *TO to *TO + size
+ of instruction. This function is used to adjust pc-relative instructions
+ when copying. */
+
+static void
+ppc_relocate_instruction (struct gdbarch *gdbarch,
+ CORE_ADDR *to, CORE_ADDR oldloc)
+{
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
+ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
+ uint32_t insn;
+ int op6, rel, newrel;
+
+ insn = read_memory_unsigned_integer (oldloc, 4, byte_order);
+ op6 = PPC_OP6 (insn);
+
+ if (op6 == 18 && (insn & 2) == 0)
+ {
+ /* branch && AA = 0 */
+ rel = PPC_LI (insn);
+ newrel = (oldloc - *to) + rel;
+
+ /* Out of range. Cannot relocate instruction. */
+ if (newrel >= (1 << 25) || newrel < -(1 << 25))
+ return;
+
+ insn = (insn & ~0x3fffffc) | (newrel & 0x3fffffc);
+ }
+ else if (op6 == 16 && (insn & 2) == 0)
+ {
+ /* conditional branch && AA = 0 */
+
+ rel = PPC_BD (insn);
+ newrel = (oldloc - *to) + rel;
+
+ if (newrel >= (1 << 25) || newrel < -(1 << 25))
+ return;
+
+ newrel -= 4;
+ if (newrel >= (1 << 15) || newrel < -(1 << 15))
+ {
+ /* The offset of to big for conditional-branch (16-bit).
+ Try to invert the condition and jump with 26-bit branch.
+ For example,
+
+ beq .Lgoto
+ INSN1
+
+ =>
+
+ bne 1f
+ b .Lgoto
+ 1:INSN1
+
+ */
+
+ /* Check whether BO is 001at or 011 at. */
+ if ((PPC_BO (insn) & 0x14) != 0x4)
+ return;
+
+ /* Invert condition. */
+ insn ^= (1 << 24);
+ /* Jump over the unconditional branch. */
+ insn = (insn & ~0xfffc) | 0x8;
+ write_memory_unsigned_integer (*to, 4, byte_order, insn);
+ *to += 4;
+
+ /* Copy LK bit. */
+ insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
+ write_memory_unsigned_integer (*to, 4, byte_order, insn);
+ *to += 4;
+
+ return;
+ }
+ else
+ insn = (insn & ~0xfffc) | (newrel & 0xfffc);
+ }
+
+ write_memory_unsigned_integer (*to, 4, byte_order, insn);
+ *to += 4;
+}
+
+/* Implement gdbarch_gen_return_address. Generate a bytecode expression
+ to get the value of the saved PC. SCOPE is the address we want to
+ get return address for. SCOPE maybe in the middle of a function. */
+
+static void
+ppc_gen_return_address (struct gdbarch *gdbarch,
+ struct agent_expr *ax, struct axs_value *value,
+ CORE_ADDR scope)
+{
+ struct rs6000_framedata frame;
+ CORE_ADDR func_addr;
+
+ /* Try to find the start of the function and analyze the prologue. */
+ if (find_pc_partial_function (scope, NULL, &func_addr, NULL))
+ {
+ skip_prologue (gdbarch, func_addr, scope, &frame);
+
+ if (frame.lr_offset == 0)
+ {
+ value->type = register_type (gdbarch, PPC_LR_REGNUM);
+ value->kind = axs_lvalue_register;
+ value->u.reg = PPC_LR_REGNUM;
+ return;
+ }
+ }
+ else
+ {
+ /* If we don't where the function starts, we cannot analyze it.
+ Assuming it's not a leaf function, not frameless, and LR is
+ saved at back-chain + 16. */
+
+ frame.frameless = 0;
+ frame.lr_offset = 16;
+ }
+
+ /* if (frameless)
+ load 16(SP)
+ else
+ BC = 0(SP)
+ load 16(BC) */
+
+ ax_reg (ax, gdbarch_sp_regnum (gdbarch));
+
+ /* Load back-chain. */
+ if (!frame.frameless)
+ {
+ if (register_size (gdbarch, PPC_LR_REGNUM) == 8)
+ ax_simple (ax, aop_ref64);
+ else
+ ax_simple (ax, aop_ref32);
+ }
+
+ ax_const_l (ax, frame.lr_offset);
+ ax_simple (ax, aop_add);
+ value->type = register_type (gdbarch, PPC_LR_REGNUM);
+ value->kind = axs_lvalue_memory;
+}
+
/* Initialize the current architecture based on INFO. If possible, re-use an
architecture from ARCHES, which is a list of architectures already created
during this debugging session.
@@ -5892,6 +6065,7 @@ rs6000_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
set_gdbarch_breakpoint_from_pc (gdbarch, rs6000_breakpoint_from_pc);
+ set_gdbarch_fast_tracepoint_valid_at (gdbarch, ppc_fast_tracepoint_valid_at);
/* The value of symbols of type N_SO and N_FUN maybe null when
it shouldn't be. */
@@ -5929,6 +6103,9 @@ rs6000_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
set_gdbarch_displaced_step_location (gdbarch,
displaced_step_at_entry_point);
+ set_gdbarch_relocate_instruction (gdbarch, ppc_relocate_instruction);
+ set_gdbarch_gen_return_address (gdbarch, ppc_gen_return_address);
+
set_gdbarch_max_insn_length (gdbarch, PPC_INSN_SIZE);
/* Hook in ABI-specific overrides, if they have been registered. */
@@ -146,6 +146,9 @@ if [is_amd64_regs_target] {
} elseif [is_x86_like_target] {
set fpreg "\$ebp"
set spreg "\$esp"
+} elseif [istarget "powerpc*-*-*"] {
+ set fpreg "\$r31"
+ set spreg "\$r1"
} else {
set fpreg "\$fp"
set spreg "\$sp"
@@ -36,6 +36,8 @@ func4 (void)
SYMBOL(set_tracepoint) ":\n"
#if (defined __x86_64__ || defined __i386__)
" call " SYMBOL(func5) "\n"
+#elif (defined __PPC64__ || defined __PPC__)
+ " nop\n"
#endif
);
@@ -44,6 +44,10 @@ if [is_amd64_regs_target] {
set fpreg "ebp"
set spreg "esp"
set pcreg "eip"
+} elseif [istarget "powerpc*-*-*"] {
+ set fpreg "r31"
+ set spreg "r1"
+ set pcreg "pc"
} else {
set fpreg "fp"
set spreg "sp"
@@ -218,6 +218,8 @@ if [is_amd64_regs_target] {
set spreg "\$rsp"
} elseif [is_x86_like_target] {
set spreg "\$esp"
+} elseif [istarget "powerpc*-*-*"] {
+ set spreg "\$r1"
} else {
set spreg "\$sp"
}
@@ -42,6 +42,8 @@ marker (int anarg)
SYMBOL(set_point) ":\n"
#if (defined __x86_64__ || defined __i386__)
" call " SYMBOL(func) "\n"
+#elif (defined __PPC64__ || defined __PPC__)
+ " nop\n"
#endif
);
@@ -53,6 +55,8 @@ marker (int anarg)
SYMBOL(four_byter) ":\n"
#if (defined __i386__)
" cmpl $0x1,0x8(%ebp) \n"
+#elif (defined __PPC64__ || defined __PPC__)
+ " nop\n"
#endif
);
}
@@ -84,7 +84,8 @@ proc test_fast_tracepoints {} {
gdb_test "print gdb_agent_gdb_trampoline_buffer_error" ".*" ""
- if { [istarget "x86_64-*-*"] || [istarget "i\[34567\]86-*-*"] } {
+ if { [istarget "x86_64-*-*"] || [istarget "i\[34567\]86-*-*"] \
+ || [istarget "powerpc*-*-*"] } {
gdb_test "ftrace set_point" "Fast tracepoint .*" \
"fast tracepoint at a long insn"
@@ -56,6 +56,8 @@ if [is_amd64_regs_target] {
set pcreg "rip"
} elseif [is_x86_like_target] {
set pcreg "eip"
+} elseif [istarget "powerpc*-*-*"] {
+ set pcreg "pc"
} else {
# Other ports that support tracepoints should set the name of pc
# register here.
@@ -135,6 +135,8 @@ proc test_trace_unavailable { data_source } {
set pcnum 16
} elseif [is_x86_like_target] {
set pcnum 8
+ } elseif [istarget "powerpc*-*-*"] {
+ set pcnum 64
} else {
# Other ports support tracepoint should define the number
# of its own pc register.
@@ -441,6 +441,8 @@ proc pending_tracepoint_with_action_resolved { trace_type } \
set pcreg "rip"
} elseif [is_x86_like_target] {
set pcreg "eip"
+ } elseif [istarget "powerpc*-*-*"] {
+ set pcreg "pc"
}
gdb_trace_setactions "set action for pending tracepoint" "" \
@@ -38,6 +38,8 @@ pendfunc (int x)
SYMBOL(set_point1) ":\n"
#if (defined __x86_64__ || defined __i386__)
" call " SYMBOL(pendfunc1) "\n"
+#elif (defined __PPC64__ || defined __PPC__)
+ " nop\n"
#endif
);
}
@@ -35,6 +35,8 @@ pendfunc2 (int x)
SYMBOL(set_point2) ":\n"
#if (defined __x86_64__ || defined __i386__)
" call " SYMBOL(foo) "\n"
+#elif (defined __PPC64__ || defined __PPC__)
+ " nop\n"
#endif
);
}
@@ -26,6 +26,8 @@
tracepoint jump. */
#if (defined __x86_64__ || defined __i386__)
# define NOP " .byte 0xe9,0x00,0x00,0x00,0x00\n" /* jmp $+5 (5-byte nop) */
+#elif (defined __PPC64__ || defined __PPC__)
+# define NOP " nop\n"
#else
# define NOP "" /* port me */
#endif
@@ -158,6 +158,10 @@ if [is_amd64_regs_target] {
set fpreg "ebp"
set spreg "esp"
set pcreg "eip"
+} elseif [istarget "powerpc*-*-*"] {
+ set fpreg "r31"
+ set spreg "r1"
+ set pcreg "pc"
} else {
set fpreg "fp"
set spreg "sp"
@@ -41,6 +41,8 @@ marker (void)
SYMBOL(set_point) ":\n"
#if (defined __x86_64__ || defined __i386__)
" call " SYMBOL(func) "\n"
+#elif (defined __PPC64__ || defined __PPC__)
+ " nop\n"
#endif
);
@@ -48,6 +50,8 @@ marker (void)
SYMBOL(after_set_point) ":\n"
#if (defined __x86_64__ || defined __i386__)
" call " SYMBOL(func) "\n"
+#elif (defined __PPC64__ || defined __PPC__)
+ " nop\n"
#endif
);
}
@@ -49,6 +49,10 @@ if [is_amd64_regs_target] {
set fpreg "ebp"
set spreg "esp"
set pcreg "eip"
+} elseif [istarget "powerpc*-*-*"] {
+ set fpreg "r31"
+ set spreg "r1"
+ set pcreg "pc"
}
# Set breakpoint and tracepoint at the same address.
@@ -37,6 +37,8 @@ thread_function(void *arg)
SYMBOL(set_point1) ":\n"
#if (defined __x86_64__ || defined __i386__)
" call " SYMBOL(func) "\n"
+#elif (defined __PPC64__ || defined __PPC__)
+ " nop\n"
#endif
);
}
@@ -47,6 +47,8 @@ if [is_amd64_regs_target] {
set fpreg "\$rbp"
} elseif [is_x86_like_target] {
set fpreg "\$ebp"
+} elseif [istarget "powerpc*-*-*"] {
+ set fpreg "\$r31"
} else {
set fpreg "\$fp"
}