Merge tag 'kgdb-5.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/danielt/linux

Pull kgdb updates from Daniel Thompson:
 "Changes for kgdb/kdb this cycle are dominated by a change from Sumit
  that removes as small (256K) private heap from kdb. This is change
  I've hoped for ever since I discovered how few users of this heap
  remained in the kernel, so many thanks to Sumit for hunting these
  down.

  The other change is an incremental step towards SPDX headers"

* tag 'kgdb-5.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/danielt/linux:
  kernel: debug: Convert to SPDX identifier
  kdb: Rename members of struct kdbtab_t
  kdb: Simplify kdb_defcmd macro logic
  kdb: Get rid of redundant kdb_register_flags()
  kdb: Rename struct defcmd_set to struct kdb_macro
  kdb: Get rid of custom debug heap allocator
This commit is contained in:
Linus Torvalds
2021-09-07 12:08:04 -07:00
10 changed files with 387 additions and 728 deletions

View File

@@ -1,3 +1,4 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Kernel Debug Core
*
@@ -22,10 +23,6 @@
*
* Original KGDB stub: David Grothe <dave@gcom.com>,
* Tigran Aivazian <tigran@sco.com>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#define pr_fmt(fmt) "KGDB: " fmt

View File

@@ -1,3 +1,4 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Kernel Debug Core
*
@@ -22,10 +23,6 @@
*
* Original KGDB stub: David Grothe <dave@gcom.com>,
* Tigran Aivazian <tigran@sco.com>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/kernel.h>

View File

@@ -523,51 +523,51 @@ static int kdb_ss(int argc, const char **argv)
}
static kdbtab_t bptab[] = {
{ .cmd_name = "bp",
.cmd_func = kdb_bp,
.cmd_usage = "[<vaddr>]",
.cmd_help = "Set/Display breakpoints",
.cmd_flags = KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS,
{ .name = "bp",
.func = kdb_bp,
.usage = "[<vaddr>]",
.help = "Set/Display breakpoints",
.flags = KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS,
},
{ .cmd_name = "bl",
.cmd_func = kdb_bp,
.cmd_usage = "[<vaddr>]",
.cmd_help = "Display breakpoints",
.cmd_flags = KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS,
{ .name = "bl",
.func = kdb_bp,
.usage = "[<vaddr>]",
.help = "Display breakpoints",
.flags = KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS,
},
{ .cmd_name = "bc",
.cmd_func = kdb_bc,
.cmd_usage = "<bpnum>",
.cmd_help = "Clear Breakpoint",
.cmd_flags = KDB_ENABLE_FLOW_CTRL,
{ .name = "bc",
.func = kdb_bc,
.usage = "<bpnum>",
.help = "Clear Breakpoint",
.flags = KDB_ENABLE_FLOW_CTRL,
},
{ .cmd_name = "be",
.cmd_func = kdb_bc,
.cmd_usage = "<bpnum>",
.cmd_help = "Enable Breakpoint",
.cmd_flags = KDB_ENABLE_FLOW_CTRL,
{ .name = "be",
.func = kdb_bc,
.usage = "<bpnum>",
.help = "Enable Breakpoint",
.flags = KDB_ENABLE_FLOW_CTRL,
},
{ .cmd_name = "bd",
.cmd_func = kdb_bc,
.cmd_usage = "<bpnum>",
.cmd_help = "Disable Breakpoint",
.cmd_flags = KDB_ENABLE_FLOW_CTRL,
{ .name = "bd",
.func = kdb_bc,
.usage = "<bpnum>",
.help = "Disable Breakpoint",
.flags = KDB_ENABLE_FLOW_CTRL,
},
{ .cmd_name = "ss",
.cmd_func = kdb_ss,
.cmd_usage = "",
.cmd_help = "Single Step",
.cmd_minlen = 1,
.cmd_flags = KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS,
{ .name = "ss",
.func = kdb_ss,
.usage = "",
.help = "Single Step",
.minlen = 1,
.flags = KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS,
},
};
static kdbtab_t bphcmd = {
.cmd_name = "bph",
.cmd_func = kdb_bp,
.cmd_usage = "[<vaddr>]",
.cmd_help = "[datar [length]|dataw [length]] Set hw brk",
.cmd_flags = KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS,
.name = "bph",
.func = kdb_bp,
.usage = "[<vaddr>]",
.help = "[datar [length]|dataw [length]] Set hw brk",
.flags = KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS,
};
/* Initialize the breakpoint table and register breakpoint commands. */

View File

@@ -140,7 +140,6 @@ int kdb_stub(struct kgdb_state *ks)
*/
kdb_common_deinit_state();
KDB_STATE_CLEAR(PAGER);
kdbnearsym_cleanup();
if (error == KDB_CMD_KGDB) {
if (KDB_STATE(DOING_KGDB))
KDB_STATE_CLEAR(DOING_KGDB);

File diff suppressed because it is too large Load Diff

View File

@@ -109,7 +109,6 @@ extern int kdbgetaddrarg(int, const char **, int*, unsigned long *,
long *, char **);
extern int kdbgetsymval(const char *, kdb_symtab_t *);
extern int kdbnearsym(unsigned long, kdb_symtab_t *);
extern void kdbnearsym_cleanup(void);
extern char *kdb_strdup(const char *str, gfp_t type);
extern void kdb_symbol_print(unsigned long, const kdb_symtab_t *, unsigned int);
@@ -165,19 +164,6 @@ typedef struct _kdb_bp {
#ifdef CONFIG_KGDB_KDB
extern kdb_bp_t kdb_breakpoints[/* KDB_MAXBPT */];
/* The KDB shell command table */
typedef struct _kdbtab {
char *cmd_name; /* Command name */
kdb_func_t cmd_func; /* Function to execute command */
char *cmd_usage; /* Usage String for this command */
char *cmd_help; /* Help message for this command */
short cmd_minlen; /* Minimum legal # command
* chars required */
kdb_cmdflags_t cmd_flags; /* Command behaviour flags */
struct list_head list_node; /* Command list */
bool is_dynamic; /* Command table allocation type */
} kdbtab_t;
extern void kdb_register_table(kdbtab_t *kp, size_t len);
extern int kdb_bt(int, const char **); /* KDB display back trace */
@@ -233,10 +219,6 @@ extern struct task_struct *kdb_curr_task(int);
#define GFP_KDB (in_dbg_master() ? GFP_ATOMIC : GFP_KERNEL)
extern void *debug_kmalloc(size_t size, gfp_t flags);
extern void debug_kfree(void *);
extern void debug_kusage(void);
extern struct task_struct *kdb_current_task;
extern struct pt_regs *kdb_current_regs;

View File

@@ -51,48 +51,48 @@ int kdbgetsymval(const char *symname, kdb_symtab_t *symtab)
}
EXPORT_SYMBOL(kdbgetsymval);
static char *kdb_name_table[100]; /* arbitrary size */
/*
* kdbnearsym - Return the name of the symbol with the nearest address
* less than 'addr'.
/**
* kdbnearsym() - Return the name of the symbol with the nearest address
* less than @addr.
* @addr: Address to check for near symbol
* @symtab: Structure to receive results
*
* Parameters:
* addr Address to check for symbol near
* symtab Structure to receive results
* Returns:
* 0 No sections contain this address, symtab zero filled
* 1 Address mapped to module/symbol/section, data in symtab
* Remarks:
* 2.6 kallsyms has a "feature" where it unpacks the name into a
* string. If that string is reused before the caller expects it
* then the caller sees its string change without warning. To
* avoid cluttering up the main kdb code with lots of kdb_strdup,
* tests and kfree calls, kdbnearsym maintains an LRU list of the
* last few unique strings. The list is sized large enough to
* hold active strings, no kdb caller of kdbnearsym makes more
* than ~20 later calls before using a saved value.
* WARNING: This function may return a pointer to a single statically
* allocated buffer (namebuf). kdb's unusual calling context (single
* threaded, all other CPUs halted) provides us sufficient locking for
* this to be safe. The only constraint imposed by the static buffer is
* that the caller must consume any previous reply prior to another call
* to lookup a new symbol.
*
* Note that, strictly speaking, some architectures may re-enter the kdb
* trap if the system turns out to be very badly damaged and this breaks
* the single-threaded assumption above. In these circumstances successful
* continuation and exit from the inner trap is unlikely to work and any
* user attempting this receives a prominent warning before being allowed
* to progress. In these circumstances we remain memory safe because
* namebuf[KSYM_NAME_LEN-1] will never change from '\0' although we do
* tolerate the possibility of garbled symbol display from the outer kdb
* trap.
*
* Return:
* * 0 - No sections contain this address, symtab zero filled
* * 1 - Address mapped to module/symbol/section, data in symtab
*/
int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab)
{
int ret = 0;
unsigned long symbolsize = 0;
unsigned long offset = 0;
#define knt1_size 128 /* must be >= kallsyms table size */
char *knt1 = NULL;
static char namebuf[KSYM_NAME_LEN];
kdb_dbg_printf(AR, "addr=0x%lx, symtab=%px\n", addr, symtab);
memset(symtab, 0, sizeof(*symtab));
if (addr < 4096)
goto out;
knt1 = debug_kmalloc(knt1_size, GFP_ATOMIC);
if (!knt1) {
kdb_func_printf("addr=0x%lx cannot kmalloc knt1\n", addr);
goto out;
}
symtab->sym_name = kallsyms_lookup(addr, &symbolsize , &offset,
(char **)(&symtab->mod_name), knt1);
(char **)(&symtab->mod_name), namebuf);
if (offset > 8*1024*1024) {
symtab->sym_name = NULL;
addr = offset = symbolsize = 0;
@@ -101,63 +101,14 @@ int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab)
symtab->sym_end = symtab->sym_start + symbolsize;
ret = symtab->sym_name != NULL && *(symtab->sym_name) != '\0';
if (ret) {
int i;
/* Another 2.6 kallsyms "feature". Sometimes the sym_name is
* set but the buffer passed into kallsyms_lookup is not used,
* so it contains garbage. The caller has to work out which
* buffer needs to be saved.
*
* What was Rusty smoking when he wrote that code?
*/
if (symtab->sym_name != knt1) {
strncpy(knt1, symtab->sym_name, knt1_size);
knt1[knt1_size-1] = '\0';
}
for (i = 0; i < ARRAY_SIZE(kdb_name_table); ++i) {
if (kdb_name_table[i] &&
strcmp(kdb_name_table[i], knt1) == 0)
break;
}
if (i >= ARRAY_SIZE(kdb_name_table)) {
debug_kfree(kdb_name_table[0]);
memmove(kdb_name_table, kdb_name_table+1,
sizeof(kdb_name_table[0]) *
(ARRAY_SIZE(kdb_name_table)-1));
} else {
debug_kfree(knt1);
knt1 = kdb_name_table[i];
memmove(kdb_name_table+i, kdb_name_table+i+1,
sizeof(kdb_name_table[0]) *
(ARRAY_SIZE(kdb_name_table)-i-1));
}
i = ARRAY_SIZE(kdb_name_table) - 1;
kdb_name_table[i] = knt1;
symtab->sym_name = kdb_name_table[i];
knt1 = NULL;
}
if (symtab->mod_name == NULL)
symtab->mod_name = "kernel";
kdb_dbg_printf(AR, "returns %d symtab->sym_start=0x%lx, symtab->mod_name=%px, symtab->sym_name=%px (%s)\n",
ret, symtab->sym_start, symtab->mod_name, symtab->sym_name, symtab->sym_name);
out:
debug_kfree(knt1);
return ret;
}
void kdbnearsym_cleanup(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(kdb_name_table); ++i) {
if (kdb_name_table[i]) {
debug_kfree(kdb_name_table[i]);
kdb_name_table[i] = NULL;
}
}
}
static char ks_namebuf[KSYM_NAME_LEN+1], ks_namebuf_prev[KSYM_NAME_LEN+1];
/*
@@ -655,230 +606,6 @@ unsigned long kdb_task_state(const struct task_struct *p, unsigned long mask)
return (mask & kdb_task_state_string(state)) != 0;
}
/* Last ditch allocator for debugging, so we can still debug even when
* the GFP_ATOMIC pool has been exhausted. The algorithms are tuned
* for space usage, not for speed. One smallish memory pool, the free
* chain is always in ascending address order to allow coalescing,
* allocations are done in brute force best fit.
*/
struct debug_alloc_header {
u32 next; /* offset of next header from start of pool */
u32 size;
void *caller;
};
/* The memory returned by this allocator must be aligned, which means
* so must the header size. Do not assume that sizeof(struct
* debug_alloc_header) is a multiple of the alignment, explicitly
* calculate the overhead of this header, including the alignment.
* The rest of this code must not use sizeof() on any header or
* pointer to a header.
*/
#define dah_align 8
#define dah_overhead ALIGN(sizeof(struct debug_alloc_header), dah_align)
static u64 debug_alloc_pool_aligned[256*1024/dah_align]; /* 256K pool */
static char *debug_alloc_pool = (char *)debug_alloc_pool_aligned;
static u32 dah_first, dah_first_call = 1, dah_used, dah_used_max;
/* Locking is awkward. The debug code is called from all contexts,
* including non maskable interrupts. A normal spinlock is not safe
* in NMI context. Try to get the debug allocator lock, if it cannot
* be obtained after a second then give up. If the lock could not be
* previously obtained on this cpu then only try once.
*
* sparse has no annotation for "this function _sometimes_ acquires a
* lock", so fudge the acquire/release notation.
*/
static DEFINE_SPINLOCK(dap_lock);
static int get_dap_lock(void)
__acquires(dap_lock)
{
static int dap_locked = -1;
int count;
if (dap_locked == smp_processor_id())
count = 1;
else
count = 1000;
while (1) {
if (spin_trylock(&dap_lock)) {
dap_locked = -1;
return 1;
}
if (!count--)
break;
udelay(1000);
}
dap_locked = smp_processor_id();
__acquire(dap_lock);
return 0;
}
void *debug_kmalloc(size_t size, gfp_t flags)
{
unsigned int rem, h_offset;
struct debug_alloc_header *best, *bestprev, *prev, *h;
void *p = NULL;
if (!get_dap_lock()) {
__release(dap_lock); /* we never actually got it */
return NULL;
}
h = (struct debug_alloc_header *)(debug_alloc_pool + dah_first);
if (dah_first_call) {
h->size = sizeof(debug_alloc_pool_aligned) - dah_overhead;
dah_first_call = 0;
}
size = ALIGN(size, dah_align);
prev = best = bestprev = NULL;
while (1) {
if (h->size >= size && (!best || h->size < best->size)) {
best = h;
bestprev = prev;
if (h->size == size)
break;
}
if (!h->next)
break;
prev = h;
h = (struct debug_alloc_header *)(debug_alloc_pool + h->next);
}
if (!best)
goto out;
rem = best->size - size;
/* The pool must always contain at least one header */
if (best->next == 0 && bestprev == NULL && rem < dah_overhead)
goto out;
if (rem >= dah_overhead) {
best->size = size;
h_offset = ((char *)best - debug_alloc_pool) +
dah_overhead + best->size;
h = (struct debug_alloc_header *)(debug_alloc_pool + h_offset);
h->size = rem - dah_overhead;
h->next = best->next;
} else
h_offset = best->next;
best->caller = __builtin_return_address(0);
dah_used += best->size;
dah_used_max = max(dah_used, dah_used_max);
if (bestprev)
bestprev->next = h_offset;
else
dah_first = h_offset;
p = (char *)best + dah_overhead;
memset(p, POISON_INUSE, best->size - 1);
*((char *)p + best->size - 1) = POISON_END;
out:
spin_unlock(&dap_lock);
return p;
}
void debug_kfree(void *p)
{
struct debug_alloc_header *h;
unsigned int h_offset;
if (!p)
return;
if ((char *)p < debug_alloc_pool ||
(char *)p >= debug_alloc_pool + sizeof(debug_alloc_pool_aligned)) {
kfree(p);
return;
}
if (!get_dap_lock()) {
__release(dap_lock); /* we never actually got it */
return; /* memory leak, cannot be helped */
}
h = (struct debug_alloc_header *)((char *)p - dah_overhead);
memset(p, POISON_FREE, h->size - 1);
*((char *)p + h->size - 1) = POISON_END;
h->caller = NULL;
dah_used -= h->size;
h_offset = (char *)h - debug_alloc_pool;
if (h_offset < dah_first) {
h->next = dah_first;
dah_first = h_offset;
} else {
struct debug_alloc_header *prev;
unsigned int prev_offset;
prev = (struct debug_alloc_header *)(debug_alloc_pool +
dah_first);
while (1) {
if (!prev->next || prev->next > h_offset)
break;
prev = (struct debug_alloc_header *)
(debug_alloc_pool + prev->next);
}
prev_offset = (char *)prev - debug_alloc_pool;
if (prev_offset + dah_overhead + prev->size == h_offset) {
prev->size += dah_overhead + h->size;
memset(h, POISON_FREE, dah_overhead - 1);
*((char *)h + dah_overhead - 1) = POISON_END;
h = prev;
h_offset = prev_offset;
} else {
h->next = prev->next;
prev->next = h_offset;
}
}
if (h_offset + dah_overhead + h->size == h->next) {
struct debug_alloc_header *next;
next = (struct debug_alloc_header *)
(debug_alloc_pool + h->next);
h->size += dah_overhead + next->size;
h->next = next->next;
memset(next, POISON_FREE, dah_overhead - 1);
*((char *)next + dah_overhead - 1) = POISON_END;
}
spin_unlock(&dap_lock);
}
void debug_kusage(void)
{
struct debug_alloc_header *h_free, *h_used;
#ifdef CONFIG_IA64
/* FIXME: using dah for ia64 unwind always results in a memory leak.
* Fix that memory leak first, then set debug_kusage_one_time = 1 for
* all architectures.
*/
static int debug_kusage_one_time;
#else
static int debug_kusage_one_time = 1;
#endif
if (!get_dap_lock()) {
__release(dap_lock); /* we never actually got it */
return;
}
h_free = (struct debug_alloc_header *)(debug_alloc_pool + dah_first);
if (dah_first == 0 &&
(h_free->size == sizeof(debug_alloc_pool_aligned) - dah_overhead ||
dah_first_call))
goto out;
if (!debug_kusage_one_time)
goto out;
debug_kusage_one_time = 0;
kdb_func_printf("debug_kmalloc memory leak dah_first %d\n", dah_first);
if (dah_first) {
h_used = (struct debug_alloc_header *)debug_alloc_pool;
kdb_func_printf("h_used %px size %d\n", h_used, h_used->size);
}
do {
h_used = (struct debug_alloc_header *)
((char *)h_free + dah_overhead + h_free->size);
kdb_func_printf("h_used %px size %d caller %px\n",
h_used, h_used->size, h_used->caller);
h_free = (struct debug_alloc_header *)
(debug_alloc_pool + h_free->next);
} while (h_free->next);
h_used = (struct debug_alloc_header *)
((char *)h_free + dah_overhead + h_free->size);
if ((char *)h_used - debug_alloc_pool !=
sizeof(debug_alloc_pool_aligned))
kdb_func_printf("h_used %px size %d caller %px\n",
h_used, h_used->size, h_used->caller);
out:
spin_unlock(&dap_lock);
}
/* Maintain a small stack of kdb_flags to allow recursion without disturbing
* the global kdb state.
*/