blob: 7419c02085d7207ba985d6f7e17c93ae21bebd36 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Robert P. J. Day932fb062010-03-13 07:58:13 -05002 * kref.h - library routines for handling generic reference counted objects
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
5 * Copyright (C) 2004 IBM Corp.
6 *
7 * based on kobject.h which was:
8 * Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org>
9 * Copyright (C) 2002-2003 Open Source Development Labs
10 *
11 * This file is released under the GPLv2.
12 *
13 */
14
15#ifndef _KREF_H_
16#define _KREF_H_
17
Greg Kroah-Hartman6261dde2011-12-14 11:19:07 -080018#include <linux/bug.h>
19#include <linux/atomic.h>
James Bottomley67175b82012-01-17 21:14:05 +000020#include <linux/kernel.h>
Al Viro8ad5db82012-08-17 20:10:46 -040021#include <linux/mutex.h>
Joern Engelccf5ae82013-05-13 16:30:06 -040022#include <linux/spinlock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
24struct kref {
25 atomic_t refcount;
26};
27
Peter Zijlstra4af679c2011-12-13 10:36:20 +010028/**
29 * kref_init - initialize object.
30 * @kref: object in question.
31 */
32static inline void kref_init(struct kref *kref)
33{
34 atomic_set(&kref->refcount, 1);
Peter Zijlstra4af679c2011-12-13 10:36:20 +010035}
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
Peter Zijlstra4af679c2011-12-13 10:36:20 +010037/**
38 * kref_get - increment refcount for object.
39 * @kref: object.
40 */
41static inline void kref_get(struct kref *kref)
42{
43 WARN_ON(!atomic_read(&kref->refcount));
44 atomic_inc(&kref->refcount);
Peter Zijlstra4af679c2011-12-13 10:36:20 +010045}
46
47/**
Peter Zijlstra4af679c2011-12-13 10:36:20 +010048 * kref_sub - subtract a number of refcounts for object.
49 * @kref: object.
50 * @count: Number of recounts to subtract.
51 * @release: pointer to the function that will clean up the object when the
52 * last reference to the object is released.
53 * This pointer is required, and it is not acceptable to pass kfree
Greg Kroah-Hartman6261dde2011-12-14 11:19:07 -080054 * in as this function. If the caller does pass kfree to this
55 * function, you will be publicly mocked mercilessly by the kref
56 * maintainer, and anyone else who happens to notice it. You have
57 * been warned.
Peter Zijlstra4af679c2011-12-13 10:36:20 +010058 *
59 * Subtract @count from the refcount, and if 0, call release().
60 * Return 1 if the object was removed, otherwise return 0. Beware, if this
61 * function returns 0, you still can not count on the kref from remaining in
62 * memory. Only use the return value if you want to see if the kref is now
63 * gone, not present.
64 */
65static inline int kref_sub(struct kref *kref, unsigned int count,
66 void (*release)(struct kref *kref))
67{
68 WARN_ON(release == NULL);
Peter Zijlstra4af679c2011-12-13 10:36:20 +010069
70 if (atomic_sub_and_test((int) count, &kref->refcount)) {
71 release(kref);
72 return 1;
73 }
74 return 0;
75}
Peter Zijlstra47dbd7d2011-12-10 11:43:43 +010076
77/**
78 * kref_put - decrement refcount for object.
79 * @kref: object.
80 * @release: pointer to the function that will clean up the object when the
81 * last reference to the object is released.
82 * This pointer is required, and it is not acceptable to pass kfree
Greg Kroah-Hartman6261dde2011-12-14 11:19:07 -080083 * in as this function. If the caller does pass kfree to this
84 * function, you will be publicly mocked mercilessly by the kref
85 * maintainer, and anyone else who happens to notice it. You have
86 * been warned.
Peter Zijlstra47dbd7d2011-12-10 11:43:43 +010087 *
88 * Decrement the refcount, and if 0, call release().
89 * Return 1 if the object was removed, otherwise return 0. Beware, if this
90 * function returns 0, you still can not count on the kref from remaining in
91 * memory. Only use the return value if you want to see if the kref is now
92 * gone, not present.
93 */
94static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref))
95{
96 return kref_sub(kref, 1, release);
97}
Al Viro8ad5db82012-08-17 20:10:46 -040098
Joern Engelccf5ae82013-05-13 16:30:06 -040099/**
100 * kref_put_spinlock_irqsave - decrement refcount for object.
101 * @kref: object.
102 * @release: pointer to the function that will clean up the object when the
103 * last reference to the object is released.
104 * This pointer is required, and it is not acceptable to pass kfree
105 * in as this function.
106 * @lock: lock to take in release case
107 *
108 * Behaves identical to kref_put with one exception. If the reference count
109 * drops to zero, the lock will be taken atomically wrt dropping the reference
110 * count. The release function has to call spin_unlock() without _irqrestore.
111 */
112static inline int kref_put_spinlock_irqsave(struct kref *kref,
113 void (*release)(struct kref *kref),
114 spinlock_t *lock)
115{
116 unsigned long flags;
117
118 WARN_ON(release == NULL);
119 if (atomic_add_unless(&kref->refcount, -1, 1))
120 return 0;
121 spin_lock_irqsave(lock, flags);
122 if (atomic_dec_and_test(&kref->refcount)) {
123 release(kref);
124 local_irq_restore(flags);
125 return 1;
126 }
127 spin_unlock_irqrestore(lock, flags);
128 return 0;
129}
130
Al Viro8ad5db82012-08-17 20:10:46 -0400131static inline int kref_put_mutex(struct kref *kref,
132 void (*release)(struct kref *kref),
133 struct mutex *lock)
134{
135 WARN_ON(release == NULL);
136 if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) {
137 mutex_lock(lock);
138 if (unlikely(!atomic_dec_and_test(&kref->refcount))) {
139 mutex_unlock(lock);
140 return 0;
141 }
142 release(kref);
143 return 1;
144 }
145 return 0;
146}
Thomas Hellstrom4b20db32012-11-06 11:31:49 +0000147
148/**
149 * kref_get_unless_zero - Increment refcount for object unless it is zero.
150 * @kref: object.
151 *
152 * Return non-zero if the increment succeeded. Otherwise return 0.
153 *
154 * This function is intended to simplify locking around refcounting for
155 * objects that can be looked up from a lookup structure, and which are
156 * removed from that lookup structure in the object destructor.
157 * Operations on such objects require at least a read lock around
158 * lookup + kref_get, and a write lock around kref_put + remove from lookup
159 * structure. Furthermore, RCU implementations become extremely tricky.
160 * With a lookup followed by a kref_get_unless_zero *with return value check*
161 * locking in the kref_put path can be deferred to the actual removal from
162 * the lookup structure and RCU lookups become trivial.
163 */
164static inline int __must_check kref_get_unless_zero(struct kref *kref)
165{
166 return atomic_add_unless(&kref->refcount, 1, 0);
167}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168#endif /* _KREF_H_ */