@@ -538,18 +538,22 @@ static vm_fault_t gfs2_fault(struct vm_fault *vmf)
{
struct inode *inode = file_inode(vmf->vma->vm_file);
struct gfs2_inode *ip = GFS2_I(inode);
+ bool recursive = gfs2_glock_is_locked_by_me(ip->i_gl);
struct gfs2_holder gh;
vm_fault_t ret;
int err;
gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
- err = gfs2_glock_nq(&gh);
- if (err) {
- ret = block_page_mkwrite_return(err);
- goto out_uninit;
+ if (likely(!recursive)) {
+ err = gfs2_glock_nq(&gh);
+ if (err) {
+ ret = block_page_mkwrite_return(err);
+ goto out_uninit;
+ }
}
ret = filemap_fault(vmf);
- gfs2_glock_dq(&gh);
+ if (likely(!recursive))
+ gfs2_glock_dq(&gh);
out_uninit:
gfs2_holder_uninit(&gh);
return ret;
Commit 20f829999c38 has moved the inode glock taking from gfs2_readpage and gfs2_readahead into gfs2_file_read_iter and gfs2_fault. In gfs2_fault, we didn't take into account that page faults can occur while holding the inode glock, for example, gfs2_file_read_iter [grabs inode glock] generic_file_read_iter filemap_read copy_page_to_iter gfs2_fault [tries to grab inode glock again] gfs2_file_write_iter iomap_file_buffered_write iomap_apply iomap_ops->iomap_begin [grabs inode glock] iomap_write_actor iov_iter_fault_in_readable gfs2_fault [tries to grab inode glock again] Fix that by checking if we're holding the inode glock already. Reported-by: Jan Kara <jack@suse.cz> Fixes: 20f829999c38 ("gfs2: Rework read and page fault locking") Cc: stable@vger.kernel.org # v5.8+ Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com> --- fs/gfs2/file.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-)