summaryrefslogtreecommitdiff
path: root/gdb-linux_perf-bundle.patch
diff options
context:
space:
mode:
Diffstat (limited to 'gdb-linux_perf-bundle.patch')
-rw-r--r--gdb-linux_perf-bundle.patch226
1 files changed, 226 insertions, 0 deletions
diff --git a/gdb-linux_perf-bundle.patch b/gdb-linux_perf-bundle.patch
new file mode 100644
index 0000000..bb6fb6a
--- /dev/null
+++ b/gdb-linux_perf-bundle.patch
@@ -0,0 +1,226 @@
+From FEDORA_PATCHES Mon Sep 17 00:00:00 2001
+From: Fedora GDB patches <invalid@email.com>
+Date: Fri, 27 Oct 2017 21:07:50 +0200
+Subject: gdb-linux_perf-bundle.patch
+
+;; [dts+el7] [x86*] Bundle linux_perf.h for libipt (RH BZ 1256513).
+;;=fedora
+
+diff --git a/gdb/gdb.c b/gdb/gdb.c
+--- a/gdb/gdb.c
++++ b/gdb/gdb.c
+@@ -21,6 +21,10 @@
+ #include "interps.h"
+ #include "run-on-main-thread.h"
+
++#ifdef PERF_ATTR_SIZE_VER5_BUNDLE
++extern "C" void __libipt_init(void);
++#endif
++
+ int
+ main (int argc, char **argv)
+ {
+@@ -32,6 +36,10 @@ main (int argc, char **argv)
+
+ struct captured_main_args args;
+
++#ifdef PERF_ATTR_SIZE_VER5_BUNDLE
++ __libipt_init();
++#endif
++
+ memset (&args, 0, sizeof args);
+ args.argc = argc;
+ args.argv = argv;
+diff --git a/gdb/nat/linux-btrace.h b/gdb/nat/linux-btrace.h
+--- a/gdb/nat/linux-btrace.h
++++ b/gdb/nat/linux-btrace.h
+@@ -28,6 +28,177 @@
+ # include <linux/perf_event.h>
+ #endif
+
++#ifdef PERF_ATTR_SIZE_VER5_BUNDLE
++#ifndef HAVE_LINUX_PERF_EVENT_H
++# error "PERF_ATTR_SIZE_VER5_BUNDLE && !HAVE_LINUX_PERF_EVENT_H"
++#endif
++#ifndef PERF_ATTR_SIZE_VER5
++#define PERF_ATTR_SIZE_VER5
++#define perf_event_mmap_page perf_event_mmap_page_bundle
++// kernel-headers-3.10.0-493.el7.x86_64/usr/include/linux/perf_event.h
++/*
++ * Structure of the page that can be mapped via mmap
++ */
++struct perf_event_mmap_page {
++ __u32 version; /* version number of this structure */
++ __u32 compat_version; /* lowest version this is compat with */
++
++ /*
++ * Bits needed to read the hw events in user-space.
++ *
++ * u32 seq, time_mult, time_shift, index, width;
++ * u64 count, enabled, running;
++ * u64 cyc, time_offset;
++ * s64 pmc = 0;
++ *
++ * do {
++ * seq = pc->lock;
++ * barrier()
++ *
++ * enabled = pc->time_enabled;
++ * running = pc->time_running;
++ *
++ * if (pc->cap_usr_time && enabled != running) {
++ * cyc = rdtsc();
++ * time_offset = pc->time_offset;
++ * time_mult = pc->time_mult;
++ * time_shift = pc->time_shift;
++ * }
++ *
++ * index = pc->index;
++ * count = pc->offset;
++ * if (pc->cap_user_rdpmc && index) {
++ * width = pc->pmc_width;
++ * pmc = rdpmc(index - 1);
++ * }
++ *
++ * barrier();
++ * } while (pc->lock != seq);
++ *
++ * NOTE: for obvious reason this only works on self-monitoring
++ * processes.
++ */
++ __u32 lock; /* seqlock for synchronization */
++ __u32 index; /* hardware event identifier */
++ __s64 offset; /* add to hardware event value */
++ __u64 time_enabled; /* time event active */
++ __u64 time_running; /* time event on cpu */
++ union {
++ __u64 capabilities;
++ struct {
++ __u64 cap_bit0 : 1, /* Always 0, deprecated, see commit 860f085b74e9 */
++ cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */
++
++ cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */
++ cap_user_time : 1, /* The time_* fields are used */
++ cap_user_time_zero : 1, /* The time_zero field is used */
++ cap_____res : 59;
++ };
++ };
++
++ /*
++ * If cap_user_rdpmc this field provides the bit-width of the value
++ * read using the rdpmc() or equivalent instruction. This can be used
++ * to sign extend the result like:
++ *
++ * pmc <<= 64 - width;
++ * pmc >>= 64 - width; // signed shift right
++ * count += pmc;
++ */
++ __u16 pmc_width;
++
++ /*
++ * If cap_usr_time the below fields can be used to compute the time
++ * delta since time_enabled (in ns) using rdtsc or similar.
++ *
++ * u64 quot, rem;
++ * u64 delta;
++ *
++ * quot = (cyc >> time_shift);
++ * rem = cyc & (((u64)1 << time_shift) - 1);
++ * delta = time_offset + quot * time_mult +
++ * ((rem * time_mult) >> time_shift);
++ *
++ * Where time_offset,time_mult,time_shift and cyc are read in the
++ * seqcount loop described above. This delta can then be added to
++ * enabled and possible running (if index), improving the scaling:
++ *
++ * enabled += delta;
++ * if (index)
++ * running += delta;
++ *
++ * quot = count / running;
++ * rem = count % running;
++ * count = quot * enabled + (rem * enabled) / running;
++ */
++ __u16 time_shift;
++ __u32 time_mult;
++ __u64 time_offset;
++ /*
++ * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated
++ * from sample timestamps.
++ *
++ * time = timestamp - time_zero;
++ * quot = time / time_mult;
++ * rem = time % time_mult;
++ * cyc = (quot << time_shift) + (rem << time_shift) / time_mult;
++ *
++ * And vice versa:
++ *
++ * quot = cyc >> time_shift;
++ * rem = cyc & (((u64)1 << time_shift) - 1);
++ * timestamp = time_zero + quot * time_mult +
++ * ((rem * time_mult) >> time_shift);
++ */
++ __u64 time_zero;
++ __u32 size; /* Header size up to __reserved[] fields. */
++
++ /*
++ * Hole for extension of the self monitor capabilities
++ */
++
++ __u8 __reserved[118*8+4]; /* align to 1k. */
++
++ /*
++ * Control data for the mmap() data buffer.
++ *
++ * User-space reading the @data_head value should issue an smp_rmb(),
++ * after reading this value.
++ *
++ * When the mapping is PROT_WRITE the @data_tail value should be
++ * written by userspace to reflect the last read data, after issueing
++ * an smp_mb() to separate the data read from the ->data_tail store.
++ * In this case the kernel will not over-write unread data.
++ *
++ * See perf_output_put_handle() for the data ordering.
++ *
++ * data_{offset,size} indicate the location and size of the perf record
++ * buffer within the mmapped area.
++ */
++ __u64 data_head; /* head in the data section */
++ __u64 data_tail; /* user-space written tail */
++ __u64 data_offset; /* where the buffer starts */
++ __u64 data_size; /* data buffer size */
++
++ /*
++ * AUX area is defined by aux_{offset,size} fields that should be set
++ * by the userspace, so that
++ *
++ * aux_offset >= data_offset + data_size
++ *
++ * prior to mmap()ing it. Size of the mmap()ed area should be aux_size.
++ *
++ * Ring buffer pointers aux_{head,tail} have the same semantics as
++ * data_{head,tail} and same ordering rules apply.
++ */
++ __u64 aux_head;
++ __u64 aux_tail;
++ __u64 aux_offset;
++ __u64 aux_size;
++};
++#endif // PERF_ATTR_SIZE_VER5
++#endif // PERF_ATTR_SIZE_VER5_BUNDLE
++
+ struct target_ops;
+
+ #if HAVE_LINUX_PERF_EVENT_H
+diff --git a/gdbsupport/common.m4 b/gdbsupport/common.m4
+--- a/gdbsupport/common.m4
++++ b/gdbsupport/common.m4
+@@ -168,7 +168,7 @@ AC_DEFUN([GDB_AC_COMMON], [
+ AC_PREPROC_IFELSE([AC_LANG_SOURCE([[
+ #include <linux/perf_event.h>
+ #ifndef PERF_ATTR_SIZE_VER5
+- # error
++ // error // PERF_ATTR_SIZE_VER5_BUNDLE is not available here - Fedora+RHEL
+ #endif
+ ]])], [perf_event=yes], [perf_event=no])
+ if test "$perf_event" != yes; then