@@ -515,8 +515,8 @@ static void afs_invalidate_dirty(struct page *page, unsigned int offset,
return;
/* We may need to shorten the dirty region */
- f = afs_page_dirty_from(priv);
- t = afs_page_dirty_to(priv);
+ f = afs_page_dirty_from(page, priv);
+ t = afs_page_dirty_to(page, priv);
if (t <= offset || f >= end)
return; /* Doesn't overlap */
@@ -534,17 +534,17 @@ static void afs_invalidate_dirty(struct page *page, unsigned int offset,
if (f == t)
goto undirty;
- priv = afs_page_dirty(f, t);
+ priv = afs_page_dirty(page, f, t);
set_page_private(page, priv);
- trace_afs_page_dirty(vnode, tracepoint_string("trunc"), page->index, priv);
+ trace_afs_page_dirty(vnode, tracepoint_string("trunc"), page);
return;
undirty:
- trace_afs_page_dirty(vnode, tracepoint_string("undirty"), page->index, priv);
+ trace_afs_page_dirty(vnode, tracepoint_string("undirty"), page);
clear_page_dirty_for_io(page);
full_invalidate:
- priv = (unsigned long)detach_page_private(page);
- trace_afs_page_dirty(vnode, tracepoint_string("inval"), page->index, priv);
+ detach_page_private(page);
+ trace_afs_page_dirty(vnode, tracepoint_string("inval"), page);
}
/*
@@ -572,7 +572,6 @@ static void afs_invalidatepage(struct page *page, unsigned int offset,
static int afs_releasepage(struct page *page, gfp_t gfp_flags)
{
struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
- unsigned long priv;
_enter("{{%llx:%llu}[%lu],%lx},%x",
vnode->fid.vid, vnode->fid.vnode, page->index, page->flags,
@@ -581,9 +580,8 @@ static int afs_releasepage(struct page *page, gfp_t gfp_flags)
/* deny if page is being written to the cache and the caller hasn't
* elected to wait */
if (PagePrivate(page)) {
- priv = (unsigned long)detach_page_private(page);
- trace_afs_page_dirty(vnode, tracepoint_string("rel"),
- page->index, priv);
+ detach_page_private(page);
+ trace_afs_page_dirty(vnode, tracepoint_string("rel"), page);
}
/* indicate that the page can be released */
@@ -875,31 +875,31 @@ struct afs_vnode_cache_aux {
#define __AFS_PAGE_PRIV_MMAPPED 0x8000UL
#endif
-static inline unsigned int afs_page_dirty_resolution(void)
+static inline unsigned int afs_page_dirty_resolution(struct page *page)
{
- int shift = PAGE_SHIFT - (__AFS_PAGE_PRIV_SHIFT - 1);
+ int shift = thp_order(page) + PAGE_SHIFT - (__AFS_PAGE_PRIV_SHIFT - 1);
return (shift > 0) ? shift : 0;
}
-static inline size_t afs_page_dirty_from(unsigned long priv)
+static inline size_t afs_page_dirty_from(struct page *page, unsigned long priv)
{
unsigned long x = priv & __AFS_PAGE_PRIV_MASK;
/* The lower bound is inclusive */
- return x << afs_page_dirty_resolution();
+ return x << afs_page_dirty_resolution(page);
}
-static inline size_t afs_page_dirty_to(unsigned long priv)
+static inline size_t afs_page_dirty_to(struct page *page, unsigned long priv)
{
unsigned long x = (priv >> __AFS_PAGE_PRIV_SHIFT) & __AFS_PAGE_PRIV_MASK;
/* The upper bound is immediately beyond the region */
- return (x + 1) << afs_page_dirty_resolution();
+ return (x + 1) << afs_page_dirty_resolution(page);
}
-static inline unsigned long afs_page_dirty(size_t from, size_t to)
+static inline unsigned long afs_page_dirty(struct page *page, size_t from, size_t to)
{
- unsigned int res = afs_page_dirty_resolution();
+ unsigned int res = afs_page_dirty_resolution(page);
from >>= res;
to = (to - 1) >> res;
return (to << __AFS_PAGE_PRIV_SHIFT) | from;
@@ -112,15 +112,14 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
t = f = 0;
if (PagePrivate(page)) {
priv = page_private(page);
- f = afs_page_dirty_from(priv);
- t = afs_page_dirty_to(priv);
+ f = afs_page_dirty_from(page, priv);
+ t = afs_page_dirty_to(page, priv);
ASSERTCMP(f, <=, t);
}
if (f != t) {
if (PageWriteback(page)) {
- trace_afs_page_dirty(vnode, tracepoint_string("alrdy"),
- page->index, priv);
+ trace_afs_page_dirty(vnode, tracepoint_string("alrdy"), page);
goto flush_conflicting_write;
}
/* If the file is being filled locally, allow inter-write
@@ -204,21 +203,19 @@ int afs_write_end(struct file *file, struct address_space *mapping,
if (PagePrivate(page)) {
priv = page_private(page);
- f = afs_page_dirty_from(priv);
- t = afs_page_dirty_to(priv);
+ f = afs_page_dirty_from(page, priv);
+ t = afs_page_dirty_to(page, priv);
if (from < f)
f = from;
if (to > t)
t = to;
- priv = afs_page_dirty(f, t);
+ priv = afs_page_dirty(page, f, t);
set_page_private(page, priv);
- trace_afs_page_dirty(vnode, tracepoint_string("dirty+"),
- page->index, priv);
+ trace_afs_page_dirty(vnode, tracepoint_string("dirty+"), page);
} else {
- priv = afs_page_dirty(from, to);
+ priv = afs_page_dirty(page, from, to);
attach_page_private(page, (void *)priv);
- trace_afs_page_dirty(vnode, tracepoint_string("dirty"),
- page->index, priv);
+ trace_afs_page_dirty(vnode, tracepoint_string("dirty"), page);
}
set_page_dirty(page);
@@ -321,7 +318,6 @@ static void afs_pages_written_back(struct afs_vnode *vnode,
pgoff_t first, pgoff_t last)
{
struct pagevec pv;
- unsigned long priv;
unsigned count, loop;
_enter("{%llx:%llu},{%lx-%lx}",
@@ -340,9 +336,9 @@ static void afs_pages_written_back(struct afs_vnode *vnode,
ASSERTCMP(pv.nr, ==, count);
for (loop = 0; loop < count; loop++) {
- priv = (unsigned long)detach_page_private(pv.pages[loop]);
+ detach_page_private(pv.pages[loop]);
trace_afs_page_dirty(vnode, tracepoint_string("clear"),
- pv.pages[loop]->index, priv);
+ pv.pages[loop]);
end_page_writeback(pv.pages[loop]);
}
first += count;
@@ -516,15 +512,13 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
*/
start = primary_page->index;
priv = page_private(primary_page);
- offset = afs_page_dirty_from(priv);
- to = afs_page_dirty_to(priv);
- trace_afs_page_dirty(vnode, tracepoint_string("store"),
- primary_page->index, priv);
+ offset = afs_page_dirty_from(primary_page, priv);
+ to = afs_page_dirty_to(primary_page, priv);
+ trace_afs_page_dirty(vnode, tracepoint_string("store"), primary_page);
WARN_ON(offset == to);
if (offset == to)
- trace_afs_page_dirty(vnode, tracepoint_string("WARN"),
- primary_page->index, priv);
+ trace_afs_page_dirty(vnode, tracepoint_string("WARN"), primary_page);
if (start >= final_page ||
(to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)))
@@ -562,8 +556,8 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
}
priv = page_private(page);
- f = afs_page_dirty_from(priv);
- t = afs_page_dirty_to(priv);
+ f = afs_page_dirty_from(page, priv);
+ t = afs_page_dirty_to(page, priv);
if (f != 0 &&
!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) {
unlock_page(page);
@@ -571,8 +565,7 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
}
to = t;
- trace_afs_page_dirty(vnode, tracepoint_string("store+"),
- page->index, priv);
+ trace_afs_page_dirty(vnode, tracepoint_string("store+"), page);
if (!clear_page_dirty_for_io(page))
BUG();
@@ -861,14 +854,13 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
*/
wait_on_page_writeback(vmf->page);
- priv = afs_page_dirty(0, PAGE_SIZE);
+ priv = afs_page_dirty(vmf->page, 0, PAGE_SIZE);
priv = afs_page_dirty_mmapped(priv);
- trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
- vmf->page->index, priv);
if (PagePrivate(vmf->page))
set_page_private(vmf->page, priv);
else
attach_page_private(vmf->page, (void *)priv);
+ trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"), vmf->page);
file_update_time(file);
sb_end_pagefault(inode->i_sb);
@@ -921,17 +913,15 @@ int afs_launder_page(struct page *page)
f = 0;
t = PAGE_SIZE;
if (PagePrivate(page)) {
- f = afs_page_dirty_from(priv);
- t = afs_page_dirty_to(priv);
+ f = afs_page_dirty_from(page, priv);
+ t = afs_page_dirty_to(page, priv);
}
- trace_afs_page_dirty(vnode, tracepoint_string("launder"),
- page->index, priv);
+ trace_afs_page_dirty(vnode, tracepoint_string("launder"), page);
ret = afs_store_data(mapping, page->index, page->index, t, f, true);
}
- priv = (unsigned long)detach_page_private(page);
- trace_afs_page_dirty(vnode, tracepoint_string("laundered"),
- page->index, priv);
+ detach_page_private(page);
+ trace_afs_page_dirty(vnode, tracepoint_string("laundered"), page);
return ret;
}
@@ -969,30 +969,33 @@ TRACE_EVENT(afs_dir_check_failed,
);
TRACE_EVENT(afs_page_dirty,
- TP_PROTO(struct afs_vnode *vnode, const char *where,
- pgoff_t page, unsigned long priv),
+ TP_PROTO(struct afs_vnode *vnode, const char *where, struct page *page),
- TP_ARGS(vnode, where, page, priv),
+ TP_ARGS(vnode, where, page),
TP_STRUCT__entry(
__field(struct afs_vnode *, vnode )
__field(const char *, where )
__field(pgoff_t, page )
- __field(unsigned long, priv )
+ __field(unsigned long, from )
+ __field(unsigned long, to )
),
TP_fast_assign(
__entry->vnode = vnode;
__entry->where = where;
- __entry->page = page;
- __entry->priv = priv;
+ __entry->page = page->index;
+ __entry->from = afs_page_dirty_from(page, page->private);
+ __entry->to = afs_page_dirty_to(page, page->private);
+ __entry->to |= (afs_is_page_dirty_mmapped(page->private) ?
+ (1UL << (BITS_PER_LONG - 1)) : 0);
),
- TP_printk("vn=%p %lx %s %zx-%zx%s",
+ TP_printk("vn=%p %lx %s %lx-%lx%s",
__entry->vnode, __entry->page, __entry->where,
- afs_page_dirty_from(__entry->priv),
- afs_page_dirty_to(__entry->priv),
- afs_is_page_dirty_mmapped(__entry->priv) ? " M" : "")
+ __entry->from,
+ __entry->to & ~(1UL << (BITS_PER_LONG - 1)),
+ __entry->to & (1UL << (BITS_PER_LONG - 1)) ? " M" : "")
);
TRACE_EVENT(afs_call_state,
Pass a pointer to the page being accessed into the dirty region helpers so that the size of the page can be determined in case it's a transparent huge page. This also required the page to be passed into the afs_page_dirty trace point - so there's no need to specifically pass in the index or private data as these can be retrieved directly from the page struct. Signed-off-by: David Howells <dhowells@redhat.com> cc: linux-afs@lists.infradead.org cc: linux-cachefs@redhat.com cc: linux-fsdevel@vger.kernel.org --- fs/afs/file.c | 20 +++++++-------- fs/afs/internal.h | 16 ++++++------ fs/afs/write.c | 60 ++++++++++++++++++-------------------------- include/trace/events/afs.h | 23 ++++++++++------- 4 files changed, 55 insertions(+), 64 deletions(-)