percpu-refcount: Introduce percpu_ref_resurrect()
This function will be used in a later patch to switch the struct request_queue q_usage_counter from killed back to live. In contrast to percpu_ref_reinit(), this new function does not require that the refcount is zero. Signed-off-by: Bart Van Assche <bvanassche@acm.org> Acked-by: Tejun Heo <tj@kernel.org> Reviewed-by: Ming Lei <ming.lei@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Jianchao Wang <jianchao.w.wang@oracle.com> Cc: Hannes Reinecke <hare@suse.com> Cc: Johannes Thumshirn <jthumshirn@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>hifive-unleashed-5.1
parent
0d25bd072b
commit
18c9a6bbe0
|
@ -108,6 +108,7 @@ void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref);
|
||||||
void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
|
void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
|
||||||
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
|
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
|
||||||
percpu_ref_func_t *confirm_kill);
|
percpu_ref_func_t *confirm_kill);
|
||||||
|
void percpu_ref_resurrect(struct percpu_ref *ref);
|
||||||
void percpu_ref_reinit(struct percpu_ref *ref);
|
void percpu_ref_reinit(struct percpu_ref *ref);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -356,11 +356,35 @@ EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
|
||||||
*/
|
*/
|
||||||
void percpu_ref_reinit(struct percpu_ref *ref)
|
void percpu_ref_reinit(struct percpu_ref *ref)
|
||||||
{
|
{
|
||||||
|
WARN_ON_ONCE(!percpu_ref_is_zero(ref));
|
||||||
|
|
||||||
|
percpu_ref_resurrect(ref);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(percpu_ref_reinit);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* percpu_ref_resurrect - modify a percpu refcount from dead to live
|
||||||
|
* @ref: perpcu_ref to resurrect
|
||||||
|
*
|
||||||
|
* Modify @ref so that it's in the same state as before percpu_ref_kill() was
|
||||||
|
* called. @ref must be dead but must not yet have exited.
|
||||||
|
*
|
||||||
|
* If @ref->release() frees @ref then the caller is responsible for
|
||||||
|
* guaranteeing that @ref->release() does not get called while this
|
||||||
|
* function is in progress.
|
||||||
|
*
|
||||||
|
* Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
|
||||||
|
* this function is in progress.
|
||||||
|
*/
|
||||||
|
void percpu_ref_resurrect(struct percpu_ref *ref)
|
||||||
|
{
|
||||||
|
unsigned long __percpu *percpu_count;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
|
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
|
||||||
|
|
||||||
WARN_ON_ONCE(!percpu_ref_is_zero(ref));
|
WARN_ON_ONCE(!(ref->percpu_count_ptr & __PERCPU_REF_DEAD));
|
||||||
|
WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count));
|
||||||
|
|
||||||
ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
|
ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
|
||||||
percpu_ref_get(ref);
|
percpu_ref_get(ref);
|
||||||
|
@ -368,4 +392,4 @@ void percpu_ref_reinit(struct percpu_ref *ref)
|
||||||
|
|
||||||
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
|
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(percpu_ref_reinit);
|
EXPORT_SYMBOL_GPL(percpu_ref_resurrect);
|
||||||
|
|
Loading…
Reference in New Issue