summaryrefslogtreecommitdiff
path: root/fs/btrfs/async-thread.c
diff options
context:
space:
mode:
authorMarcel Ziswiler <marcel.ziswiler@toradex.com>2020-05-21 00:54:36 +0200
committerMarcel Ziswiler <marcel.ziswiler@toradex.com>2020-05-21 00:54:36 +0200
commit135d39dcedcedd1f44ea0bba52f15ac5922c114f (patch)
tree12379fc8ef7489eaca1c7245f8bd6af74c619a8f /fs/btrfs/async-thread.c
parent187764bd111b27783b6d68ffb3b3dbb3a9bafd38 (diff)
parent1279cd128bba968ebe0a2df7f7ae38bae90250ef (diff)
Merge remote-tracking branch 'remotes/fslc/4.9-2.3.x-imx' into toradex_4.9-2.3.x-imx-next
Conflicts: sound/soc/codecs/sgtl5000.c sound/soc/fsl/imx-sgtl5000.c
Diffstat (limited to 'fs/btrfs/async-thread.c')
-rw-r--r--fs/btrfs/async-thread.c64
1 files changed, 52 insertions, 12 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index ff0b0be92d61..5456937836b8 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -265,16 +265,17 @@ out:
}
}
-static void run_ordered_work(struct __btrfs_workqueue *wq)
+static void run_ordered_work(struct __btrfs_workqueue *wq,
+ struct btrfs_work *self)
{
struct list_head *list = &wq->ordered_list;
struct btrfs_work *work;
spinlock_t *lock = &wq->list_lock;
unsigned long flags;
+ void *wtag;
+ bool free_self = false;
while (1) {
- void *wtag;
-
spin_lock_irqsave(lock, flags);
if (list_empty(list))
break;
@@ -300,16 +301,47 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
list_del(&work->ordered_list);
spin_unlock_irqrestore(lock, flags);
- /*
- * We don't want to call the ordered free functions with the
- * lock held though. Save the work as tag for the trace event,
- * because the callback could free the structure.
- */
- wtag = work;
- work->ordered_free(work);
- trace_btrfs_all_work_done(wq->fs_info, wtag);
+ if (work == self) {
+ /*
+ * This is the work item that the worker is currently
+ * executing.
+ *
+ * The kernel workqueue code guarantees non-reentrancy
+ * of work items. I.e., if a work item with the same
+ * address and work function is queued twice, the second
+ * execution is blocked until the first one finishes. A
+ * work item may be freed and recycled with the same
+ * work function; the workqueue code assumes that the
+ * original work item cannot depend on the recycled work
+ * item in that case (see find_worker_executing_work()).
+ *
+ * Note that the work of one Btrfs filesystem may depend
+ * on the work of another Btrfs filesystem via, e.g., a
+ * loop device. Therefore, we must not allow the current
+ * work item to be recycled until we are really done,
+ * otherwise we break the above assumption and can
+ * deadlock.
+ */
+ free_self = true;
+ } else {
+ /*
+ * We don't want to call the ordered free functions with
+ * the lock held though. Save the work as tag for the
+ * trace event, because the callback could free the
+ * structure.
+ */
+ wtag = work;
+ work->ordered_free(work);
+ trace_btrfs_all_work_done(wq->fs_info, wtag);
+ }
}
spin_unlock_irqrestore(lock, flags);
+
+ if (free_self) {
+ wtag = self;
+ self->ordered_free(self);
+ trace_btrfs_all_work_done(wq->fs_info, wtag);
+ }
}
static void normal_work_helper(struct btrfs_work *work)
@@ -337,7 +369,7 @@ static void normal_work_helper(struct btrfs_work *work)
work->func(work);
if (need_order) {
set_bit(WORK_DONE_BIT, &work->flags);
- run_ordered_work(wq);
+ run_ordered_work(wq, work);
}
if (!need_order)
trace_btrfs_all_work_done(wq->fs_info, wtag);
@@ -415,3 +447,11 @@ void btrfs_set_work_high_priority(struct btrfs_work *work)
{
set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
}
+
+void btrfs_flush_workqueue(struct btrfs_workqueue *wq)
+{
+ if (wq->high)
+ flush_workqueue(wq->high->normal_wq);
+
+ flush_workqueue(wq->normal->normal_wq);
+}