mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 14:42:24 +00:00
This includes two fixes
- Bring back context level recursive protection in ring buffer. The simpler counter protection failed, due to a path when tracing with trace_clock_global() as it could not be reentrant and depended on the ring buffer recursive protection to keep that from happening. - Prevent branch profiling when FORTIFY_SOURCE is enabled. It causes 50 - 60 MB in warning messages. Branch profiling should never be run on production systems, so there's no reason that it needs to be enabled with FORTIFY_SOURCE. -----BEGIN PGP SIGNATURE----- iQHIBAABCgAyFiEEPm6V/WuN2kyArTUe1a05Y9njSUkFAlpdXGsUHHJvc3RlZHRA Z29vZG1pcy5vcmcACgkQ1a05Y9njSUmTIwv+JZPXGYdzZs2nAdCNKfC0bR0sM+MQ sqBN9En1UVOWn60LEt1lULTWh2AXzUKEDU3CkiDWwY44z9U3HZ4i/gbxIuxFLBUt oNKxIGkAz9F1yqqH5oZN+YeAgwc5XijglkrxmTUuSodW9ezCVT8gMr3sQpmnR9nk Jz2P/3XK2pbjLQQD8cmW4ZlWF2LMAnDGck8bWA1nFfe3TsZVCs56FK5vvAW+cUdF +cJHbfa3xfj93xHbUcpPJwdNjK8e14qsmNA56ukFp2zlq62Jnh8kDz3MXpOYGkYO fbXuPsopjtAZET3iQZEcwD5NygFF8E2cxB1qGbATXWTXYWbTO7GKQuUGSHrZGxnY DyWkEFSvL1oS3X54sGbnTiyE7iaSwWS2QAMeC/JkmsAQjgQ4H8KqxKLsyMpdZSEK IcoKGeLXqcSmu1mjrZ2PK+EKdKdONRm0bJXmjwkSTSQ12KXa5ubE3ahv3xpo7WD7 Lye2b1FupNPZc6sjq0iyx83t0yfaUl01AOWm =qWqD -----END PGP SIGNATURE----- Merge tag 'trace-v4.15-rc4-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace Pull tracing fixes from Steven Rostedt: - Bring back context level recursive protection in ring buffer. The simpler counter protection failed, due to a path when tracing with trace_clock_global() as it could not be reentrant and depended on the ring buffer recursive protection to keep that from happening. - Prevent branch profiling when FORTIFY_SOURCE is enabled. It causes 50 - 60 MB in warning messages. Branch profiling should never be run on production systems, so there's no reason that it needs to be enabled with FORTIFY_SOURCE. * tag 'trace-v4.15-rc4-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: tracing: Prevent PROFILE_ALL_BRANCHES when FORTIFY_SOURCE=y ring-buffer: Bring back context level recursive checks
This commit is contained in:
commit
921d4f67bf
@ -355,7 +355,7 @@ config PROFILE_ANNOTATED_BRANCHES
|
||||
on if you need to profile the system's use of these macros.
|
||||
|
||||
config PROFILE_ALL_BRANCHES
|
||||
bool "Profile all if conditionals"
|
||||
bool "Profile all if conditionals" if !FORTIFY_SOURCE
|
||||
select TRACE_BRANCH_PROFILING
|
||||
help
|
||||
This tracer profiles all branch conditions. Every if ()
|
||||
|
@ -2534,29 +2534,59 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
|
||||
* The lock and unlock are done within a preempt disable section.
|
||||
* The current_context per_cpu variable can only be modified
|
||||
* by the current task between lock and unlock. But it can
|
||||
* be modified more than once via an interrupt. There are four
|
||||
* different contexts that we need to consider.
|
||||
* be modified more than once via an interrupt. To pass this
|
||||
* information from the lock to the unlock without having to
|
||||
* access the 'in_interrupt()' functions again (which do show
|
||||
* a bit of overhead in something as critical as function tracing,
|
||||
* we use a bitmask trick.
|
||||
*
|
||||
* Normal context.
|
||||
* SoftIRQ context
|
||||
* IRQ context
|
||||
* NMI context
|
||||
* bit 0 = NMI context
|
||||
* bit 1 = IRQ context
|
||||
* bit 2 = SoftIRQ context
|
||||
* bit 3 = normal context.
|
||||
*
|
||||
* If for some reason the ring buffer starts to recurse, we
|
||||
* only allow that to happen at most 4 times (one for each
|
||||
* context). If it happens 5 times, then we consider this a
|
||||
* recusive loop and do not let it go further.
|
||||
* This works because this is the order of contexts that can
|
||||
* preempt other contexts. A SoftIRQ never preempts an IRQ
|
||||
* context.
|
||||
*
|
||||
* When the context is determined, the corresponding bit is
|
||||
* checked and set (if it was set, then a recursion of that context
|
||||
* happened).
|
||||
*
|
||||
* On unlock, we need to clear this bit. To do so, just subtract
|
||||
* 1 from the current_context and AND it to itself.
|
||||
*
|
||||
* (binary)
|
||||
* 101 - 1 = 100
|
||||
* 101 & 100 = 100 (clearing bit zero)
|
||||
*
|
||||
* 1010 - 1 = 1001
|
||||
* 1010 & 1001 = 1000 (clearing bit 1)
|
||||
*
|
||||
* The least significant bit can be cleared this way, and it
|
||||
* just so happens that it is the same bit corresponding to
|
||||
* the current context.
|
||||
*/
|
||||
|
||||
static __always_inline int
|
||||
trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
{
|
||||
if (cpu_buffer->current_context >= 4)
|
||||
unsigned int val = cpu_buffer->current_context;
|
||||
unsigned long pc = preempt_count();
|
||||
int bit;
|
||||
|
||||
if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
|
||||
bit = RB_CTX_NORMAL;
|
||||
else
|
||||
bit = pc & NMI_MASK ? RB_CTX_NMI :
|
||||
pc & HARDIRQ_MASK ? RB_CTX_IRQ :
|
||||
pc & SOFTIRQ_OFFSET ? 2 : RB_CTX_SOFTIRQ;
|
||||
|
||||
if (unlikely(val & (1 << bit)))
|
||||
return 1;
|
||||
|
||||
cpu_buffer->current_context++;
|
||||
/* Interrupts must see this update */
|
||||
barrier();
|
||||
val |= (1 << bit);
|
||||
cpu_buffer->current_context = val;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2564,9 +2594,7 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
static __always_inline void
|
||||
trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
{
|
||||
/* Don't let the dec leak out */
|
||||
barrier();
|
||||
cpu_buffer->current_context--;
|
||||
cpu_buffer->current_context &= cpu_buffer->current_context - 1;
|
||||
}
|
||||
|
||||
/**
|
||||
|
Loading…
Reference in New Issue
Block a user