Skip to content

Commit ae39e0b

Browse files
author
Ingo Molnar
committed
Merge branch 'locking/core' into locking/urgent, to pick up pending commits
Merge all pending locking commits into a single branch. Signed-off-by: Ingo Molnar <mingo@kernel.org>
2 parents 1d7f856 + d00b83d commit ae39e0b

File tree

8 files changed

+201
-36
lines changed

8 files changed

+201
-36
lines changed

Documentation/core-api/cleanup.rst

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
.. SPDX-License-Identifier: GPL-2.0
2+
3+
===========================
4+
Scope-based Cleanup Helpers
5+
===========================
6+
7+
.. kernel-doc:: include/linux/cleanup.h
8+
:doc: scope-based cleanup helpers

Documentation/core-api/index.rst

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@ Library functionality that is used throughout the kernel.
3535

3636
kobject
3737
kref
38+
cleanup
3839
assoc_array
3940
xarray
4041
maple_tree

arch/x86/include/asm/atomic64_32.h

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -163,20 +163,18 @@ static __always_inline s64 arch_atomic64_dec_return(atomic64_t *v)
163163
}
164164
#define arch_atomic64_dec_return arch_atomic64_dec_return
165165

166-
static __always_inline s64 arch_atomic64_add(s64 i, atomic64_t *v)
166+
static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
167167
{
168168
__alternative_atomic64(add, add_return,
169169
ASM_OUTPUT2("+A" (i), "+c" (v)),
170170
ASM_NO_INPUT_CLOBBER("memory"));
171-
return i;
172171
}
173172

174-
static __always_inline s64 arch_atomic64_sub(s64 i, atomic64_t *v)
173+
static __always_inline void arch_atomic64_sub(s64 i, atomic64_t *v)
175174
{
176175
__alternative_atomic64(sub, sub_return,
177176
ASM_OUTPUT2("+A" (i), "+c" (v)),
178177
ASM_NO_INPUT_CLOBBER("memory"));
179-
return i;
180178
}
181179

182180
static __always_inline void arch_atomic64_inc(atomic64_t *v)

arch/x86/lib/atomic64_cx8_32.S

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,11 @@
1616
cmpxchg8b (\reg)
1717
.endm
1818

19+
.macro read64_nonatomic reg
20+
movl (\reg), %eax
21+
movl 4(\reg), %edx
22+
.endm
23+
1924
SYM_FUNC_START(atomic64_read_cx8)
2025
read64 %ecx
2126
RET
@@ -51,7 +56,7 @@ SYM_FUNC_START(atomic64_\func\()_return_cx8)
5156
movl %edx, %edi
5257
movl %ecx, %ebp
5358

54-
read64 %ecx
59+
read64_nonatomic %ecx
5560
1:
5661
movl %eax, %ebx
5762
movl %edx, %ecx
@@ -79,7 +84,7 @@ addsub_return sub sub sbb
7984
SYM_FUNC_START(atomic64_\func\()_return_cx8)
8085
pushl %ebx
8186

82-
read64 %esi
87+
read64_nonatomic %esi
8388
1:
8489
movl %eax, %ebx
8590
movl %edx, %ecx

include/linux/cleanup.h

Lines changed: 136 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,142 @@
44

55
#include <linux/compiler.h>
66

7+
/**
8+
* DOC: scope-based cleanup helpers
9+
*
10+
* The "goto error" pattern is notorious for introducing subtle resource
11+
* leaks. It is tedious and error prone to add new resource acquisition
12+
* constraints into code paths that already have several unwind
13+
* conditions. The "cleanup" helpers enable the compiler to help with
14+
* this tedium and can aid in maintaining LIFO (last in first out)
15+
* unwind ordering to avoid unintentional leaks.
16+
*
17+
* As drivers make up the majority of the kernel code base, here is an
18+
* example of using these helpers to clean up PCI drivers. The target of
19+
* the cleanups are occasions where a goto is used to unwind a device
20+
* reference (pci_dev_put()), or unlock the device (pci_dev_unlock())
21+
* before returning.
22+
*
23+
* The DEFINE_FREE() macro can arrange for PCI device references to be
24+
* dropped when the associated variable goes out of scope::
25+
*
26+
* DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
27+
* ...
28+
* struct pci_dev *dev __free(pci_dev_put) =
29+
* pci_get_slot(parent, PCI_DEVFN(0, 0));
30+
*
31+
* The above will automatically call pci_dev_put() if @dev is non-NULL
32+
* when @dev goes out of scope (automatic variable scope). If a function
33+
* wants to invoke pci_dev_put() on error, but return @dev (i.e. without
34+
* freeing it) on success, it can do::
35+
*
36+
* return no_free_ptr(dev);
37+
*
38+
* ...or::
39+
*
40+
* return_ptr(dev);
41+
*
42+
* The DEFINE_GUARD() macro can arrange for the PCI device lock to be
43+
* dropped when the scope where guard() is invoked ends::
44+
*
45+
* DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
46+
* ...
47+
* guard(pci_dev)(dev);
48+
*
49+
* The lifetime of the lock obtained by the guard() helper follows the
50+
* scope of automatic variable declaration. Take the following example::
51+
*
52+
* func(...)
53+
* {
54+
* if (...) {
55+
* ...
56+
* guard(pci_dev)(dev); // pci_dev_lock() invoked here
57+
* ...
58+
* } // <- implied pci_dev_unlock() triggered here
59+
* }
60+
*
61+
* Observe the lock is held for the remainder of the "if ()" block not
62+
* the remainder of "func()".
63+
*
64+
* Now, when a function uses both __free() and guard(), or multiple
65+
* instances of __free(), the LIFO order of variable definition order
66+
* matters. GCC documentation says:
67+
*
68+
* "When multiple variables in the same scope have cleanup attributes,
69+
* at exit from the scope their associated cleanup functions are run in
70+
* reverse order of definition (last defined, first cleanup)."
71+
*
72+
* When the unwind order matters it requires that variables be defined
73+
* mid-function scope rather than at the top of the file. Take the
74+
* following example and notice the bug highlighted by "!!"::
75+
*
76+
* LIST_HEAD(list);
77+
* DEFINE_MUTEX(lock);
78+
*
79+
* struct object {
80+
* struct list_head node;
81+
* };
82+
*
83+
* static struct object *alloc_add(void)
84+
* {
85+
* struct object *obj;
86+
*
87+
* lockdep_assert_held(&lock);
88+
* obj = kzalloc(sizeof(*obj), GFP_KERNEL);
89+
* if (obj) {
90+
* LIST_HEAD_INIT(&obj->node);
91+
* list_add(obj->node, &list):
92+
* }
93+
* return obj;
94+
* }
95+
*
96+
* static void remove_free(struct object *obj)
97+
* {
98+
* lockdep_assert_held(&lock);
99+
* list_del(&obj->node);
100+
* kfree(obj);
101+
* }
102+
*
103+
* DEFINE_FREE(remove_free, struct object *, if (_T) remove_free(_T))
104+
* static int init(void)
105+
* {
106+
* struct object *obj __free(remove_free) = NULL;
107+
* int err;
108+
*
109+
* guard(mutex)(&lock);
110+
* obj = alloc_add();
111+
*
112+
* if (!obj)
113+
* return -ENOMEM;
114+
*
115+
* err = other_init(obj);
116+
* if (err)
117+
* return err; // remove_free() called without the lock!!
118+
*
119+
* no_free_ptr(obj);
120+
* return 0;
121+
* }
122+
*
123+
* That bug is fixed by changing init() to call guard() and define +
124+
* initialize @obj in this order::
125+
*
126+
* guard(mutex)(&lock);
127+
* struct object *obj __free(remove_free) = alloc_add();
128+
*
129+
* Given that the "__free(...) = NULL" pattern for variables defined at
130+
* the top of the function poses this potential interdependency problem
131+
* the recommendation is to always define and assign variables in one
132+
* statement and not group variable definitions at the top of the
133+
* function when __free() is used.
134+
*
135+
* Lastly, given that the benefit of cleanup helpers is removal of
136+
* "goto", and that the "goto" statement can jump between scopes, the
137+
* expectation is that usage of "goto" and cleanup helpers is never
138+
* mixed in the same function. I.e. for a given routine, convert all
139+
* resources that need a "goto" cleanup to scope-based cleanup, or
140+
* convert none of them.
141+
*/
142+
7143
/*
8144
* DEFINE_FREE(name, type, free):
9145
* simple helper macro that defines the required wrapper for a __free()

kernel/locking/lockdep.c

Lines changed: 36 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -785,7 +785,7 @@ static void lockdep_print_held_locks(struct task_struct *p)
785785
printk("no locks held by %s/%d.\n", p->comm, task_pid_nr(p));
786786
else
787787
printk("%d lock%s held by %s/%d:\n", depth,
788-
depth > 1 ? "s" : "", p->comm, task_pid_nr(p));
788+
str_plural(depth), p->comm, task_pid_nr(p));
789789
/*
790790
* It's not reliable to print a task's held locks if it's not sleeping
791791
* and it's not the current task.
@@ -2067,6 +2067,9 @@ static noinline void print_bfs_bug(int ret)
20672067
/*
20682068
* Breadth-first-search failed, graph got corrupted?
20692069
*/
2070+
if (ret == BFS_EQUEUEFULL)
2071+
pr_warn("Increase LOCKDEP_CIRCULAR_QUEUE_BITS to avoid this warning:\n");
2072+
20702073
WARN(1, "lockdep bfs error:%d\n", ret);
20712074
}
20722075

@@ -6196,25 +6199,27 @@ static struct pending_free *get_pending_free(void)
61966199
static void free_zapped_rcu(struct rcu_head *cb);
61976200

61986201
/*
6199-
* Schedule an RCU callback if no RCU callback is pending. Must be called with
6200-
* the graph lock held.
6201-
*/
6202-
static void call_rcu_zapped(struct pending_free *pf)
6202+
* See if we need to queue an RCU callback, must called with
6203+
* the lockdep lock held, returns false if either we don't have
6204+
* any pending free or the callback is already scheduled.
6205+
* Otherwise, a call_rcu() must follow this function call.
6206+
*/
6207+
static bool prepare_call_rcu_zapped(struct pending_free *pf)
62036208
{
62046209
WARN_ON_ONCE(inside_selftest());
62056210

62066211
if (list_empty(&pf->zapped))
6207-
return;
6212+
return false;
62086213

62096214
if (delayed_free.scheduled)
6210-
return;
6215+
return false;
62116216

62126217
delayed_free.scheduled = true;
62136218

62146219
WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf);
62156220
delayed_free.index ^= 1;
62166221

6217-
call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
6222+
return true;
62186223
}
62196224

62206225
/* The caller must hold the graph lock. May be called from RCU context. */
@@ -6240,6 +6245,7 @@ static void free_zapped_rcu(struct rcu_head *ch)
62406245
{
62416246
struct pending_free *pf;
62426247
unsigned long flags;
6248+
bool need_callback;
62436249

62446250
if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
62456251
return;
@@ -6251,14 +6257,18 @@ static void free_zapped_rcu(struct rcu_head *ch)
62516257
pf = delayed_free.pf + (delayed_free.index ^ 1);
62526258
__free_zapped_classes(pf);
62536259
delayed_free.scheduled = false;
6260+
need_callback =
6261+
prepare_call_rcu_zapped(delayed_free.pf + delayed_free.index);
6262+
lockdep_unlock();
6263+
raw_local_irq_restore(flags);
62546264

62556265
/*
6256-
* If there's anything on the open list, close and start a new callback.
6257-
*/
6258-
call_rcu_zapped(delayed_free.pf + delayed_free.index);
6266+
* If there's pending free and its callback has not been scheduled,
6267+
* queue an RCU callback.
6268+
*/
6269+
if (need_callback)
6270+
call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
62596271

6260-
lockdep_unlock();
6261-
raw_local_irq_restore(flags);
62626272
}
62636273

62646274
/*
@@ -6298,17 +6308,19 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
62986308
{
62996309
struct pending_free *pf;
63006310
unsigned long flags;
6311+
bool need_callback;
63016312

63026313
init_data_structures_once();
63036314

63046315
raw_local_irq_save(flags);
63056316
lockdep_lock();
63066317
pf = get_pending_free();
63076318
__lockdep_free_key_range(pf, start, size);
6308-
call_rcu_zapped(pf);
6319+
need_callback = prepare_call_rcu_zapped(pf);
63096320
lockdep_unlock();
63106321
raw_local_irq_restore(flags);
6311-
6322+
if (need_callback)
6323+
call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
63126324
/*
63136325
* Wait for any possible iterators from look_up_lock_class() to pass
63146326
* before continuing to free the memory they refer to.
@@ -6402,6 +6414,7 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock)
64026414
struct pending_free *pf;
64036415
unsigned long flags;
64046416
int locked;
6417+
bool need_callback = false;
64056418

64066419
raw_local_irq_save(flags);
64076420
locked = graph_lock();
@@ -6410,11 +6423,13 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock)
64106423

64116424
pf = get_pending_free();
64126425
__lockdep_reset_lock(pf, lock);
6413-
call_rcu_zapped(pf);
6426+
need_callback = prepare_call_rcu_zapped(pf);
64146427

64156428
graph_unlock();
64166429
out_irq:
64176430
raw_local_irq_restore(flags);
6431+
if (need_callback)
6432+
call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
64186433
}
64196434

64206435
/*
@@ -6458,6 +6473,7 @@ void lockdep_unregister_key(struct lock_class_key *key)
64586473
struct pending_free *pf;
64596474
unsigned long flags;
64606475
bool found = false;
6476+
bool need_callback = false;
64616477

64626478
might_sleep();
64636479

@@ -6478,11 +6494,14 @@ void lockdep_unregister_key(struct lock_class_key *key)
64786494
if (found) {
64796495
pf = get_pending_free();
64806496
__lockdep_free_key_range(pf, key, 1);
6481-
call_rcu_zapped(pf);
6497+
need_callback = prepare_call_rcu_zapped(pf);
64826498
}
64836499
lockdep_unlock();
64846500
raw_local_irq_restore(flags);
64856501

6502+
if (need_callback)
6503+
call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
6504+
64866505
/* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
64876506
synchronize_rcu();
64886507
}

kernel/locking/lockdep_proc.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -424,7 +424,7 @@ static void seq_line(struct seq_file *m, char c, int offset, int length)
424424
for (i = 0; i < offset; i++)
425425
seq_puts(m, " ");
426426
for (i = 0; i < length; i++)
427-
seq_printf(m, "%c", c);
427+
seq_putc(m, c);
428428
seq_puts(m, "\n");
429429
}
430430

0 commit comments

Comments
 (0)