@@ -262,7 +262,6 @@ prototypes::
int (*direct_IO)(struct kiocb *, struct iov_iter *iter);
int (*migrate_folio)(struct address_space *, struct folio *dst,
struct folio *src, enum migrate_mode);
- int (*launder_folio)(struct folio *);
bool (*is_partially_uptodate)(struct folio *, size_t from, size_t count);
int (*error_remove_folio)(struct address_space *, struct folio *);
int (*swap_activate)(struct swap_info_struct *sis, struct file *f, sector_t *span)
@@ -288,7 +287,6 @@ release_folio: yes
free_folio: yes
direct_IO:
migrate_folio: yes (both)
-launder_folio: yes
is_partially_uptodate: yes
error_remove_folio: yes
swap_activate: no
@@ -394,12 +392,6 @@ try_to_free_buffers().
->free_folio() is called when the kernel has dropped the folio
from the page cache.
-->launder_folio() may be called prior to releasing a folio if
-it is still found to be dirty. It returns zero if the folio was successfully
-cleaned, or an error value if not. Note that in order to prevent the folio
-getting mapped back in and redirtied, it needs to be kept locked
-across the entire operation.
-
->swap_activate() will be called to prepare the given file for swap. It
should perform any validation and preparation necessary to ensure that
writes can be performed with minimal memory allocation. It should call
@@ -818,7 +818,6 @@ cache in your filesystem. The following members are defined:
ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter);
int (*migrate_folio)(struct mapping *, struct folio *dst,
struct folio *src, enum migrate_mode);
- int (*launder_folio) (struct folio *);
bool (*is_partially_uptodate) (struct folio *, size_t from,
size_t count);
@@ -1012,11 +1011,6 @@ cache in your filesystem. The following members are defined:
folio to this function. migrate_folio should transfer any private
data across and update any references that it has to the folio.
-``launder_folio``
- Called before freeing a folio - it writes back the dirty folio.
- To prevent redirtying the folio, it is kept locked during the
- whole operation.
-
``is_partially_uptodate``
Called by the VM when reading a file through the pagecache when
the underlying blocksize is smaller than the size of the folio.
@@ -89,7 +89,6 @@ static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
bool writing = (rreq->origin == NETFS_READ_FOR_WRITE ||
rreq->origin == NETFS_WRITEBACK ||
rreq->origin == NETFS_WRITETHROUGH ||
- rreq->origin == NETFS_LAUNDER_WRITE ||
rreq->origin == NETFS_UNBUFFERED_WRITE ||
rreq->origin == NETFS_DIO_WRITE);
@@ -141,7 +140,6 @@ const struct address_space_operations v9fs_addr_operations = {
.dirty_folio = netfs_dirty_folio,
.release_folio = netfs_release_folio,
.invalidate_folio = netfs_invalidate_folio,
- .launder_folio = netfs_launder_folio,
.direct_IO = noop_direct_IO,
.writepages = netfs_writepages,
};
@@ -54,7 +54,6 @@ const struct address_space_operations afs_file_aops = {
.read_folio = netfs_read_folio,
.readahead = netfs_readahead,
.dirty_folio = netfs_dirty_folio,
- .launder_folio = netfs_launder_folio,
.release_folio = netfs_release_folio,
.invalidate_folio = netfs_invalidate_folio,
.migrate_folio = filemap_migrate_folio,
@@ -916,7 +916,6 @@ struct afs_operation {
loff_t pos;
loff_t size;
loff_t i_size;
- bool laundering; /* Laundering page, PG_writeback not set */
} store;
struct {
struct iattr *attr;
@@ -75,8 +75,7 @@ static void afs_store_data_success(struct afs_operation *op)
op->ctime = op->file[0].scb.status.mtime_client;
afs_vnode_commit_status(op, &op->file[0]);
if (!afs_op_error(op)) {
- if (!op->store.laundering)
- afs_pages_written_back(vnode, op->store.pos, op->store.size);
+ afs_pages_written_back(vnode, op->store.pos, op->store.size);
afs_stat_v(vnode, n_stores);
atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes);
}
@@ -91,8 +90,7 @@ static const struct afs_operation_ops afs_store_data_operation = {
/*
* write to a file
*/
-static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos,
- bool laundering)
+static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos)
{
struct afs_operation *op;
struct afs_wb_key *wbk = NULL;
@@ -123,7 +121,6 @@ static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t
op->file[0].modification = true;
op->store.pos = pos;
op->store.size = size;
- op->store.laundering = laundering;
op->flags |= AFS_OPERATION_UNINTR;
op->ops = &afs_store_data_operation;
@@ -168,8 +165,7 @@ static void afs_upload_to_server(struct netfs_io_subrequest *subreq)
subreq->rreq->debug_id, subreq->debug_index, subreq->io_iter.count);
trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
- ret = afs_store_data(vnode, &subreq->io_iter, subreq->start,
- subreq->rreq->origin == NETFS_LAUNDER_WRITE);
+ ret = afs_store_data(vnode, &subreq->io_iter, subreq->start);
netfs_write_subrequest_terminated(subreq, ret < 0 ? ret : subreq->len,
false);
}
@@ -1450,11 +1450,9 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
ceph_fscache_invalidate(inode, true);
- ret2 = invalidate_inode_pages2_range(inode->i_mapping,
- pos >> PAGE_SHIFT,
- (pos + count - 1) >> PAGE_SHIFT);
+ ret2 = kiocb_invalidate_pages(iocb, count);
if (ret2 < 0)
- doutc(cl, "invalidate_inode_pages2_range returned %d\n",
+ doutc(cl, "kiocb_invalidate_pages returned %d\n",
ret2);
flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
@@ -2393,21 +2393,6 @@ static int fuse_write_end(struct file *file, struct address_space *mapping,
return copied;
}
-static int fuse_launder_folio(struct folio *folio)
-{
- int err = 0;
- if (folio_clear_dirty_for_io(folio)) {
- struct inode *inode = folio->mapping->host;
-
- /* Serialize with pending writeback for the same page */
- fuse_wait_on_page_writeback(inode, folio->index);
- err = fuse_writepage_locked(&folio->page);
- if (!err)
- fuse_wait_on_page_writeback(inode, folio->index);
- }
- return err;
-}
-
/*
* Write back dirty data/metadata now (there may not be any suitable
* open files later for data)
@@ -3227,7 +3212,6 @@ static const struct address_space_operations fuse_file_aops = {
.readahead = fuse_readahead,
.writepage = fuse_writepage,
.writepages = fuse_writepages,
- .launder_folio = fuse_launder_folio,
.dirty_folio = filemap_dirty_folio,
.bmap = fuse_bmap,
.direct_IO = fuse_direct_IO,
@@ -1181,77 +1181,3 @@ int netfs_writepages(struct address_space *mapping,
return ret;
}
EXPORT_SYMBOL(netfs_writepages);
-
-/*
- * Deal with the disposition of a laundered folio.
- */
-static void netfs_cleanup_launder_folio(struct netfs_io_request *wreq)
-{
- if (wreq->error) {
- pr_notice("R=%08x Laundering error %d\n", wreq->debug_id, wreq->error);
- mapping_set_error(wreq->mapping, wreq->error);
- }
-}
-
-/**
- * netfs_launder_folio - Clean up a dirty folio that's being invalidated
- * @folio: The folio to clean
- *
- * This is called to write back a folio that's being invalidated when an inode
- * is getting torn down. Ideally, writepages would be used instead.
- */
-int netfs_launder_folio(struct folio *folio)
-{
- struct netfs_io_request *wreq;
- struct address_space *mapping = folio->mapping;
- struct netfs_folio *finfo = netfs_folio_info(folio);
- struct netfs_group *group = netfs_folio_group(folio);
- struct bio_vec bvec;
- unsigned long long i_size = i_size_read(mapping->host);
- unsigned long long start = folio_pos(folio);
- size_t offset = 0, len;
- int ret = 0;
-
- if (finfo) {
- offset = finfo->dirty_offset;
- start += offset;
- len = finfo->dirty_len;
- } else {
- len = folio_size(folio);
- }
- len = min_t(unsigned long long, len, i_size - start);
-
- wreq = netfs_alloc_request(mapping, NULL, start, len, NETFS_LAUNDER_WRITE);
- if (IS_ERR(wreq)) {
- ret = PTR_ERR(wreq);
- goto out;
- }
-
- if (!folio_clear_dirty_for_io(folio))
- goto out_put;
-
- trace_netfs_folio(folio, netfs_folio_trace_launder);
-
- _debug("launder %llx-%llx", start, start + len - 1);
-
- /* Speculatively write to the cache. We have to fix this up later if
- * the store fails.
- */
- wreq->cleanup = netfs_cleanup_launder_folio;
-
- bvec_set_folio(&bvec, folio, len, offset);
- iov_iter_bvec(&wreq->iter, ITER_SOURCE, &bvec, 1, len);
- __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
- ret = netfs_begin_write(wreq, true, netfs_write_trace_launder);
-
-out_put:
- folio_detach_private(folio);
- netfs_put_group(group);
- kfree(finfo);
- netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
-out:
- folio_wait_fscache(folio);
- _leave(" = %d", ret);
- return ret;
-}
-EXPORT_SYMBOL(netfs_launder_folio);
@@ -33,7 +33,6 @@ static const char *netfs_origins[nr__netfs_io_origin] = {
[NETFS_READ_FOR_WRITE] = "RW",
[NETFS_WRITEBACK] = "WB",
[NETFS_WRITETHROUGH] = "WT",
- [NETFS_LAUNDER_WRITE] = "LW",
[NETFS_UNBUFFERED_WRITE] = "UW",
[NETFS_DIO_READ] = "DR",
[NETFS_DIO_WRITE] = "DW",
@@ -484,28 +484,6 @@ static void nfs_check_dirty_writeback(struct folio *folio,
*dirty = true;
}
-/*
- * Attempt to clear the private state associated with a page when an error
- * occurs that requires the cached contents of an inode to be written back or
- * destroyed
- * - Called if either PG_private or fscache is set on the page
- * - Caller holds page lock
- * - Return 0 if successful, -error otherwise
- */
-static int nfs_launder_folio(struct folio *folio)
-{
- struct inode *inode = folio->mapping->host;
- int ret;
-
- dfprintk(PAGECACHE, "NFS: launder_folio(%ld, %llu)\n",
- inode->i_ino, folio_pos(folio));
-
- folio_wait_fscache(folio);
- ret = nfs_wb_folio(inode, folio);
- trace_nfs_launder_folio_done(inode, folio, ret);
- return ret;
-}
-
static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file,
sector_t *span)
{
@@ -564,7 +542,6 @@ const struct address_space_operations nfs_file_aops = {
.invalidate_folio = nfs_invalidate_folio,
.release_folio = nfs_release_folio,
.migrate_folio = nfs_migrate_folio,
- .launder_folio = nfs_launder_folio,
.is_dirty_writeback = nfs_check_dirty_writeback,
.error_remove_folio = generic_error_remove_folio,
.swap_activate = nfs_swap_activate,
@@ -1162,8 +1162,10 @@ void nfs_file_clear_open_context(struct file *filp)
* We fatal error on write before. Try to writeback
* every page again.
*/
- if (ctx->error < 0)
+ if (ctx->error < 0) {
+ filemap_fdatawrite(inode->i_mapping);
invalidate_inode_pages2(inode->i_mapping);
+ }
filp->private_data = NULL;
put_nfs_open_context_sync(ctx);
}
@@ -1039,7 +1039,6 @@ DEFINE_NFS_FOLIO_EVENT(nfs_writeback_folio);
DEFINE_NFS_FOLIO_EVENT_DONE(nfs_writeback_folio_done);
DEFINE_NFS_FOLIO_EVENT(nfs_invalidate_folio);
-DEFINE_NFS_FOLIO_EVENT_DONE(nfs_launder_folio_done);
TRACE_EVENT(nfs_aop_readahead,
TP_PROTO(
@@ -626,7 +626,6 @@ static const struct address_space_operations orangefs_address_operations = {
.invalidate_folio = orangefs_invalidate_folio,
.release_folio = orangefs_release_folio,
.free_folio = orangefs_free_folio,
- .launder_folio = orangefs_launder_folio,
.direct_IO = orangefs_direct_IO,
};
@@ -2561,64 +2561,6 @@ struct cifs_writedata *cifs_writedata_alloc(work_func_t complete)
return wdata;
}
-static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
-{
- struct address_space *mapping = page->mapping;
- loff_t offset = (loff_t)page->index << PAGE_SHIFT;
- char *write_data;
- int rc = -EFAULT;
- int bytes_written = 0;
- struct inode *inode;
- struct cifsFileInfo *open_file;
-
- if (!mapping || !mapping->host)
- return -EFAULT;
-
- inode = page->mapping->host;
-
- offset += (loff_t)from;
- write_data = kmap(page);
- write_data += from;
-
- if ((to > PAGE_SIZE) || (from > to)) {
- kunmap(page);
- return -EIO;
- }
-
- /* racing with truncate? */
- if (offset > mapping->host->i_size) {
- kunmap(page);
- return 0; /* don't care */
- }
-
- /* check to make sure that we are not extending the file */
- if (mapping->host->i_size - offset < (loff_t)to)
- to = (unsigned)(mapping->host->i_size - offset);
-
- rc = cifs_get_writable_file(CIFS_I(mapping->host), FIND_WR_ANY,
- &open_file);
- if (!rc) {
- bytes_written = cifs_write(open_file, open_file->pid,
- write_data, to - from, &offset);
- cifsFileInfo_put(open_file);
- /* Does mm or vfs already set times? */
- simple_inode_init_ts(inode);
- if ((bytes_written > 0) && (offset))
- rc = 0;
- else if (bytes_written < 0)
- rc = bytes_written;
- else
- rc = -EFAULT;
- } else {
- cifs_dbg(FYI, "No writable handle for write page rc=%d\n", rc);
- if (!is_retryable_error(rc))
- rc = -EIO;
- }
-
- kunmap(page);
- return rc;
-}
-
/*
* Extend the region to be written back to include subsequent contiguously
* dirty pages if possible, but don't sleep while doing so.
@@ -3001,47 +2943,6 @@ static int cifs_writepages(struct address_space *mapping,
return ret;
}
-static int
-cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
-{
- int rc;
- unsigned int xid;
-
- xid = get_xid();
-/* BB add check for wbc flags */
- get_page(page);
- if (!PageUptodate(page))
- cifs_dbg(FYI, "ppw - page not up to date\n");
-
- /*
- * Set the "writeback" flag, and clear "dirty" in the radix tree.
- *
- * A writepage() implementation always needs to do either this,
- * or re-dirty the page with "redirty_page_for_writepage()" in
- * the case of a failure.
- *
- * Just unlocking the page will cause the radix tree tag-bits
- * to fail to update with the state of the page correctly.
- */
- set_page_writeback(page);
-retry_write:
- rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
- if (is_retryable_error(rc)) {
- if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
- goto retry_write;
- redirty_page_for_writepage(wbc, page);
- } else if (rc != 0) {
- SetPageError(page);
- mapping_set_error(page->mapping, rc);
- } else {
- SetPageUptodate(page);
- }
- end_page_writeback(page);
- put_page(page);
- free_xid(xid);
- return rc;
-}
-
static int cifs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
@@ -4858,27 +4759,6 @@ static void cifs_invalidate_folio(struct folio *folio, size_t offset,
folio_wait_fscache(folio);
}
-static int cifs_launder_folio(struct folio *folio)
-{
- int rc = 0;
- loff_t range_start = folio_pos(folio);
- loff_t range_end = range_start + folio_size(folio);
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_ALL,
- .nr_to_write = 0,
- .range_start = range_start,
- .range_end = range_end,
- };
-
- cifs_dbg(FYI, "Launder page: %lu\n", folio->index);
-
- if (folio_clear_dirty_for_io(folio))
- rc = cifs_writepage_locked(&folio->page, &wbc);
-
- folio_wait_fscache(folio);
- return rc;
-}
-
void cifs_oplock_break(struct work_struct *work)
{
struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
@@ -5057,7 +4937,6 @@ const struct address_space_operations cifs_addr_ops = {
.release_folio = cifs_release_folio,
.direct_IO = cifs_direct_io,
.invalidate_folio = cifs_invalidate_folio,
- .launder_folio = cifs_launder_folio,
.migrate_folio = filemap_migrate_folio,
/*
* TODO: investigate and if useful we could add an is_dirty_writeback
@@ -5080,6 +4959,5 @@ const struct address_space_operations cifs_addr_ops_smallbuf = {
.dirty_folio = netfs_dirty_folio,
.release_folio = cifs_release_folio,
.invalidate_folio = cifs_invalidate_folio,
- .launder_folio = cifs_launder_folio,
.migrate_folio = filemap_migrate_folio,
};
@@ -432,7 +432,6 @@ struct address_space_operations {
*/
int (*migrate_folio)(struct address_space *, struct folio *dst,
struct folio *src, enum migrate_mode);
- int (*launder_folio)(struct folio *);
bool (*is_partially_uptodate) (struct folio *, size_t from,
size_t count);
void (*is_dirty_writeback) (struct folio *, bool *dirty, bool *wb);
@@ -232,7 +232,6 @@ enum netfs_io_origin {
NETFS_READ_FOR_WRITE, /* This read is to prepare a write */
NETFS_WRITEBACK, /* This write was triggered by writepages */
NETFS_WRITETHROUGH, /* This write was made by netfs_perform_write() */
- NETFS_LAUNDER_WRITE, /* This is triggered by ->launder_folio() */
NETFS_UNBUFFERED_WRITE, /* This is an unbuffered write */
NETFS_DIO_READ, /* This is a direct I/O read */
NETFS_DIO_WRITE, /* This is a direct I/O write */
@@ -410,7 +409,6 @@ int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc);
void netfs_clear_inode_writeback(struct inode *inode, const void *aux);
void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
bool netfs_release_folio(struct folio *folio, gfp_t gfp);
-int netfs_launder_folio(struct folio *folio);
/* VMA operations API. */
vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group);
@@ -25,7 +25,6 @@
#define netfs_write_traces \
EM(netfs_write_trace_dio_write, "DIO-WRITE") \
- EM(netfs_write_trace_launder, "LAUNDER ") \
EM(netfs_write_trace_unbuffered_write, "UNB-WRITE") \
EM(netfs_write_trace_writeback, "WRITEBACK") \
E_(netfs_write_trace_writethrough, "WRITETHRU")
@@ -36,7 +35,6 @@
EM(NETFS_READ_FOR_WRITE, "RW") \
EM(NETFS_WRITEBACK, "WB") \
EM(NETFS_WRITETHROUGH, "WT") \
- EM(NETFS_LAUNDER_WRITE, "LW") \
EM(NETFS_UNBUFFERED_WRITE, "UW") \
EM(NETFS_DIO_READ, "DR") \
E_(NETFS_DIO_WRITE, "DW")
@@ -131,7 +129,6 @@
EM(netfs_folio_trace_end_copy, "end-copy") \
EM(netfs_folio_trace_filled_gaps, "filled-gaps") \
EM(netfs_folio_trace_kill, "kill") \
- EM(netfs_folio_trace_launder, "launder") \
EM(netfs_folio_trace_mkwrite, "mkwrite") \
EM(netfs_folio_trace_mkwrite_plus, "mkwrite+") \
EM(netfs_folio_trace_read_gaps, "read-gaps") \
@@ -561,10 +561,10 @@ static int invalidate_complete_folio2(struct address_space *mapping,
struct folio *folio)
{
if (folio->mapping != mapping)
- return 0;
+ return -EBUSY;
if (!filemap_release_folio(folio, GFP_KERNEL))
- return 0;
+ return -EBUSY;
spin_lock(&mapping->host->i_lock);
xa_lock_irq(&mapping->i_pages);
@@ -579,20 +579,11 @@ static int invalidate_complete_folio2(struct address_space *mapping,
spin_unlock(&mapping->host->i_lock);
filemap_free_folio(mapping, folio);
- return 1;
+ return 0;
failed:
xa_unlock_irq(&mapping->i_pages);
spin_unlock(&mapping->host->i_lock);
- return 0;
-}
-
-static int folio_launder(struct address_space *mapping, struct folio *folio)
-{
- if (!folio_test_dirty(folio))
- return 0;
- if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL)
- return 0;
- return mapping->a_ops->launder_folio(folio);
+ return -EBUSY;
}
/**
@@ -657,12 +648,8 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
unmap_mapping_folio(folio);
BUG_ON(folio_mapped(folio));
- ret2 = folio_launder(mapping, folio);
- if (ret2 == 0) {
- if (!invalidate_complete_folio2(mapping, folio))
- ret2 = -EBUSY;
- }
- if (ret2 < 0)
+ ret2 = invalidate_complete_folio2(mapping, folio);
+ if (ret2)
ret = ret2;
folio_unlock(folio);
}
invalidate_inode_pages2_range() and its wrappers are frequently used to invalidate overlapping folios prior to and after doing direct I/O. This calls ->launder_folio() to flush dirty folios out to the backing store, keeping the folio lock across the I/O - presumably to prevent the folio from being redirtied and thereby prevent it from being removed. However... If we're doing this prior to doing DIO on a file, there may be nothing preventing an mmapped write from recreating and redirtying the folio the moment it is removed from the mapping lest the kernel deadlock on doing DIO to/from a buffer mmapped from the target file. Further, invalidate_inode_pages2_range() is permitted to fail - and half the callers don't even check to see if it *did* fail, probably not unreasonably. In which case, there's no point doing the laundry there; better to call something like filemap_fdatawrite() beforehand. If mmap is going to interfere, we can't stop it. There are some other cases in which this is used: (1) In fuse_do_setattr(), when the size of a file is changed. Calling invalidate_inode_pages2() here is probably the wrong thing to do as the preceding truncate_pagecache() should do the appropriate page trimming and this would just seem to reduce performance for no good reason. (2) In some network filesystems, when the server informs the client of a third-party modification to a file, the local pagecache is zapped with invalidate_inode_pages2() rather than invalidate_remote_inode(). The former writes back the dirty data whereas the latter retains it plus surrounding obsolete data in the same folio. Maybe this should be done by filemap_fdatawrite() followed by invalidate_inode_pages2(). Possibly, ->page_mkwrite() could be used to hold off mmap writes until remote invalidation-induced writeback is achieved. (3) In NFS, this is used to attempt to save the data when some sort of fatal error occurs. It may be sufficient to do a filemap_fdatawrite() before calling invalidate_inode_pages2(). nfs_writepages() can observe the error state and do the laundering thing. Again, maybe, ->page_mkwrite() could be used to hold off mmap writes until the pagecache has been invalidated. Note that this only affects 9p, afs, cifs, fuse, nfs and orangefs. Signed-off-by: David Howells <dhowells@redhat.com> cc: Trond Myklebust <trond.myklebust@hammerspace.com> cc: Matthew Wilcox <willy@infradead.org> cc: Christoph Hellwig <hch@lst.de> cc: Andrew Morton <akpm@linux-foundation.org> cc: Alexander Viro <viro@zeniv.linux.org.uk> cc: Christian Brauner <brauner@kernel.org> cc: Jeff Layton <jlayton@kernel.org> cc: linux-mm@kvack.org cc: linux-fsdevel@vger.kernel.org cc: netfs@lists.linux.dev cc: v9fs@lists.linux.dev cc: linux-afs@lists.infradead.org cc: ceph-devel@vger.kernel.org cc: linux-cifs@vger.kernel.org cc: linux-nfs@vger.kernel.org cc: devel@lists.orangefs.org --- Documentation/filesystems/locking.rst | 8 -- Documentation/filesystems/vfs.rst | 6 - fs/9p/vfs_addr.c | 2 fs/afs/file.c | 1 fs/afs/internal.h | 1 fs/afs/write.c | 10 -- fs/ceph/file.c | 6 - fs/fuse/file.c | 16 ---- fs/netfs/buffered_write.c | 74 -------------------- fs/netfs/main.c | 1 fs/nfs/file.c | 23 ------ fs/nfs/inode.c | 4 - fs/nfs/nfstrace.h | 1 fs/orangefs/inode.c | 1 fs/smb/client/file.c | 122 ---------------------------------- include/linux/fs.h | 1 include/linux/netfs.h | 2 include/trace/events/netfs.h | 3 mm/truncate.c | 25 +----- 19 files changed, 14 insertions(+), 293 deletions(-)