diff mbox series

[v6,2/3] tracing/hist: Support POLLPRI event for poll on histogram

Message ID 172907577331.470540.11394819971376123947.stgit@mhiramat.roam.corp.google.com
State New
Headers show
Series tracing: Support poll on event hist file | expand

Commit Message

Masami Hiramatsu (Google) Oct. 16, 2024, 10:49 a.m. UTC
From: Masami Hiramatsu (Google) <mhiramat@kernel.org>

Since POLLIN will not be flashed until read the hist file, user needs
to repeat read() and poll() on hist for monitoring the event
continuously. But the read() is somewhat redundant only for monitoring
events.

This add POLLPRI poll event on hist, this event returns when a histogram
is updated after open(), poll() or read(). Thus it is possible to wait
next event without read().

Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Reviewed-by: Tom Zanussi <zanussi@kernel.org>
---
 kernel/trace/trace_events_hist.c |   29 +++++++++++++++++++++++++++--
 1 file changed, 27 insertions(+), 2 deletions(-)

Comments

Steven Rostedt Dec. 19, 2024, 4:25 p.m. UTC | #1
On Wed, 16 Oct 2024 19:49:33 +0900
"Masami Hiramatsu (Google)" <mhiramat@kernel.org> wrote:

> From: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> 
> Since POLLIN will not be flashed until read the hist file, user needs
> to repeat read() and poll() on hist for monitoring the event
> continuously. But the read() is somewhat redundant only for monitoring
> events.
> 
> This add POLLPRI poll event on hist, this event returns when a histogram
> is updated after open(), poll() or read(). Thus it is possible to wait
> next event without read().

I would reword the above to:

    Since POLLIN will not be flushed until the hist file is read, the user
    needs to repeatedly read() and poll() on the hist file for monitoring the
    event continuously. But the read() is somewhat redundant when the user is
    only monitoring for event updates.
    
    Add POLLPRI poll event on the hist file so the event returns when a
    histogram is updated after open(), poll() or read(). Thus it is possible
    to wait for the next event without having to issue a read().

> 
> Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> Reviewed-by: Tom Zanussi <zanussi@kernel.org>
> ---
>  kernel/trace/trace_events_hist.c |   29 +++++++++++++++++++++++++++--
>  1 file changed, 27 insertions(+), 2 deletions(-)
> 
> diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
> index 107eaa0f40f1..8819a8cc4d53 100644
> --- a/kernel/trace/trace_events_hist.c
> +++ b/kernel/trace/trace_events_hist.c
> @@ -5598,6 +5598,7 @@ static void hist_trigger_show(struct seq_file *m,
>  struct hist_file_data {
>  	struct file *file;
>  	u64 last_read;
> +	u64 last_act;
>  };
>  
>  static u64 get_hist_hit_count(struct trace_event_file *event_file)
> @@ -5635,6 +5636,11 @@ static int hist_show(struct seq_file *m, void *v)
>  			hist_trigger_show(m, data, n++);
>  	}
>  	hist_file->last_read = get_hist_hit_count(event_file);
> +	/*
> +	 * Update last_act too so that poll()/POLLPRI can wait for the next
> +	 * event after any syscall on hist file.
> +	 */
> +	hist_file->last_act = hist_file->last_read;
>  
>   out_unlock:
>  	mutex_unlock(&event_mutex);
> @@ -5648,6 +5654,7 @@ static __poll_t event_hist_poll(struct file *file, struct poll_table_struct *wai
>  	struct seq_file *m = file->private_data;
>  	struct hist_file_data *hist_file = m->private;
>  	__poll_t ret = 0;
> +	u64 cnt;
>  
>  	mutex_lock(&event_mutex);
>  
> @@ -5659,8 +5666,13 @@ static __poll_t event_hist_poll(struct file *file, struct poll_table_struct *wai
>  
>  	hist_poll_wait(file, wait);
>  
> -	if (hist_file->last_read != get_hist_hit_count(event_file))
> -		ret = EPOLLIN | EPOLLRDNORM;
> +	cnt = get_hist_hit_count(event_file);
> +	if (hist_file->last_read != cnt)
> +		ret |= EPOLLIN | EPOLLRDNORM;
> +	if (hist_file->last_act != cnt) {
> +		hist_file->last_act = cnt;
> +		ret |= EPOLLPRI;
> +	}
>  
>  out_unlock:
>  	mutex_unlock(&event_mutex);
> @@ -5679,6 +5691,7 @@ static int event_hist_release(struct inode *inode, struct file *file)
>  
>  static int event_hist_open(struct inode *inode, struct file *file)
>  {
> +	struct trace_event_file *event_file;
>  	struct hist_file_data *hist_file;
>  	int ret;
>  
> @@ -5689,13 +5702,25 @@ static int event_hist_open(struct inode *inode, struct file *file)
>  	hist_file = kzalloc(sizeof(*hist_file), GFP_KERNEL);
>  	if (!hist_file)
>  		return -ENOMEM;
> +
> +	mutex_lock(&event_mutex);

And switch this over to guard() as well.

Thanks,

-- Steve

> +	event_file = event_file_data(file);
> +	if (!event_file) {
> +		ret = -ENODEV;
> +		goto out_unlock;
> +	}
> +
>  	hist_file->file = file;
> +	hist_file->last_act = get_hist_hit_count(event_file);
>  
>  	/* Clear private_data to avoid warning in single_open() */
>  	file->private_data = NULL;
>  	ret = single_open(file, hist_show, hist_file);
> +
> +out_unlock:
>  	if (ret)
>  		kfree(hist_file);
> +	mutex_unlock(&event_mutex);
>  	return ret;
>  }
>
diff mbox series

Patch

diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 107eaa0f40f1..8819a8cc4d53 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -5598,6 +5598,7 @@  static void hist_trigger_show(struct seq_file *m,
 struct hist_file_data {
 	struct file *file;
 	u64 last_read;
+	u64 last_act;
 };
 
 static u64 get_hist_hit_count(struct trace_event_file *event_file)
@@ -5635,6 +5636,11 @@  static int hist_show(struct seq_file *m, void *v)
 			hist_trigger_show(m, data, n++);
 	}
 	hist_file->last_read = get_hist_hit_count(event_file);
+	/*
+	 * Update last_act too so that poll()/POLLPRI can wait for the next
+	 * event after any syscall on hist file.
+	 */
+	hist_file->last_act = hist_file->last_read;
 
  out_unlock:
 	mutex_unlock(&event_mutex);
@@ -5648,6 +5654,7 @@  static __poll_t event_hist_poll(struct file *file, struct poll_table_struct *wai
 	struct seq_file *m = file->private_data;
 	struct hist_file_data *hist_file = m->private;
 	__poll_t ret = 0;
+	u64 cnt;
 
 	mutex_lock(&event_mutex);
 
@@ -5659,8 +5666,13 @@  static __poll_t event_hist_poll(struct file *file, struct poll_table_struct *wai
 
 	hist_poll_wait(file, wait);
 
-	if (hist_file->last_read != get_hist_hit_count(event_file))
-		ret = EPOLLIN | EPOLLRDNORM;
+	cnt = get_hist_hit_count(event_file);
+	if (hist_file->last_read != cnt)
+		ret |= EPOLLIN | EPOLLRDNORM;
+	if (hist_file->last_act != cnt) {
+		hist_file->last_act = cnt;
+		ret |= EPOLLPRI;
+	}
 
 out_unlock:
 	mutex_unlock(&event_mutex);
@@ -5679,6 +5691,7 @@  static int event_hist_release(struct inode *inode, struct file *file)
 
 static int event_hist_open(struct inode *inode, struct file *file)
 {
+	struct trace_event_file *event_file;
 	struct hist_file_data *hist_file;
 	int ret;
 
@@ -5689,13 +5702,25 @@  static int event_hist_open(struct inode *inode, struct file *file)
 	hist_file = kzalloc(sizeof(*hist_file), GFP_KERNEL);
 	if (!hist_file)
 		return -ENOMEM;
+
+	mutex_lock(&event_mutex);
+	event_file = event_file_data(file);
+	if (!event_file) {
+		ret = -ENODEV;
+		goto out_unlock;
+	}
+
 	hist_file->file = file;
+	hist_file->last_act = get_hist_hit_count(event_file);
 
 	/* Clear private_data to avoid warning in single_open() */
 	file->private_data = NULL;
 	ret = single_open(file, hist_show, hist_file);
+
+out_unlock:
 	if (ret)
 		kfree(hist_file);
+	mutex_unlock(&event_mutex);
 	return ret;
 }