From patchwork Sun Dec 3 02:00:44 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Wang Nan X-Patchwork-Id: 120440 Delivered-To: patch@linaro.org Received: by 10.140.22.227 with SMTP id 90csp2903641qgn; Sat, 2 Dec 2017 18:03:20 -0800 (PST) X-Google-Smtp-Source: AGs4zMb7CvzqXiqZmu2HFT5uTiheisCKs4mJyTA0gAjhzOuwyFrl2iDTv7+kI3Xfd+k7S/o+LIpv X-Received: by 10.159.242.196 with SMTP id x4mr10575326plw.342.1512266600682; Sat, 02 Dec 2017 18:03:20 -0800 (PST) ARC-Seal: i=1; a=rsa-sha256; t=1512266600; cv=none; d=google.com; s=arc-20160816; b=DYTInokHk2ao89b62ZUAMeziStZ/JE1d12vqqdVeKxMJ2+02C4sEswTdcN5CTHIGac Eo4Js/EoFkJxhlkeVQMMJeC5+cmZ00xaJt8J2AZAERWfHgHgzlW2JRlXHWbN/7it8VqZ vOwgtv5FJTnbpFG6Q8Rx9Epiz8cX+Ty/U+BdQMK2xg3JjBF6ZzKmZckq6AoEW+Wie/OQ Ar54nbDRzKlaGJFeJY3fSfoF8ZVW0YcQ3pBVtDp/QdZJ04D0xGLeMCzR+SLdiqySheZV 0wy8Nyijfr0e6Q2+6EI639fLhzby+v01Vt0JIq9OCl7Mg58Xj+d9ezBDSTvg3Ayreik1 RqJw== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:sender:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:arc-authentication-results; bh=wCaKXazS0Vj5NBteowqMn7tSYnHsCGW2ALTxp6zpU48=; b=TO0YFZkc8XhA92R0gy1CWX3WfAaFEramNPkWmSizdGGmIqI0zRGhtY+EKgyD8iFzWh ZfxOmpVKb5FvpDNRoy8bMcrczGQY/jNdoFKiJDmms4KJ5d9VrsVWJhd5r2rcTxlkZP38 pTNVhuI16kfUQucmgkCtMmfbis1ZpGI+BnF6LOiLd4yvv4441+/g9tj5De2wp99lTxbn yBeHcjqp0UsFr6ol9ARpjcLWGKDLks67dBJsONCFraOTrZp23GaDwRJO1j8QiT0Slu/K PECLngVMEovgLWOtKv6KHhxNX9ohuAc+GR2FLu714rg9Y5u6Z0XRHweFs2SkF4nTVC9W aekQ== ARC-Authentication-Results: i=1; mx.google.com; spf=pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org Return-Path: Received: from vger.kernel.org (vger.kernel.org. [209.132.180.67]) by mx.google.com with ESMTP id w23si7279450plk.779.2017.12.02.18.03.20; Sat, 02 Dec 2017 18:03:20 -0800 (PST) Received-SPF: pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) client-ip=209.132.180.67; Authentication-Results: mx.google.com; spf=pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752732AbdLCCDO (ORCPT + 28 others); Sat, 2 Dec 2017 21:03:14 -0500 Received: from szxga04-in.huawei.com ([45.249.212.190]:2197 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1752644AbdLCCDI (ORCPT ); Sat, 2 Dec 2017 21:03:08 -0500 Received: from DGGEMS408-HUB.china.huawei.com (unknown [172.30.72.59]) by Forcepoint Email with ESMTP id 040B0A155997B; Sun, 3 Dec 2017 10:02:51 +0800 (CST) Received: from linux-4hy3.site (10.107.193.248) by DGGEMS408-HUB.china.huawei.com (10.3.19.208) with Microsoft SMTP Server id 14.3.361.1; Sun, 3 Dec 2017 10:02:44 +0800 From: Wang Nan To: , , , , CC: Wang Nan Subject: [PATCH v2 8/8] perf tools: Replace 'backward' to 'overwrite' in evlist, mmap and record Date: Sun, 3 Dec 2017 02:00:44 +0000 Message-ID: <20171203020044.81680-9-wangnan0@huawei.com> X-Mailer: git-send-email 2.10.1 In-Reply-To: <20171203020044.81680-1-wangnan0@huawei.com> References: <20171203020044.81680-1-wangnan0@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.107.193.248] X-CFilter-Loop: Reflected Sender: linux-kernel-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Remove the backward/forward concept to make it uniform with user interface (the '--overwrite' option). Signed-off-by: Wang Nan --- tools/perf/builtin-record.c | 14 +++++++------- tools/perf/tests/backward-ring-buffer.c | 4 ++-- tools/perf/util/evlist.c | 30 +++++++++++++++--------------- tools/perf/util/evlist.h | 2 +- tools/perf/util/mmap.c | 22 +++++++++++----------- 5 files changed, 36 insertions(+), 36 deletions(-) -- 2.10.1 diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 26b8571..0a5749e 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -479,7 +479,7 @@ static struct perf_event_header finished_round_event = { }; static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist, - bool backward) + bool overwrite) { u64 bytes_written = rec->bytes_written; int i; @@ -489,18 +489,18 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli if (!evlist) return 0; - maps = backward ? evlist->backward_mmap : evlist->mmap; + maps = overwrite ? evlist->overwrite_mmap : evlist->mmap; if (!maps) return 0; - if (backward && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING) + if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING) return 0; for (i = 0; i < evlist->nr_mmaps; i++) { struct auxtrace_mmap *mm = &maps[i].auxtrace_mmap; if (maps[i].base) { - if (perf_mmap__push(&maps[i], backward, rec, record__pushfn) != 0) { + if (perf_mmap__push(&maps[i], overwrite, rec, record__pushfn) != 0) { rc = -1; goto out; } @@ -520,7 +520,7 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli if (bytes_written != rec->bytes_written) rc = record__write(rec, &finished_round_event, sizeof(finished_round_event)); - if (backward) + if (overwrite) perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY); out: return rc; @@ -692,8 +692,8 @@ perf_evlist__pick_pc(struct perf_evlist *evlist) if (evlist) { if (evlist->mmap && evlist->mmap[0].base) return evlist->mmap[0].base; - if (evlist->backward_mmap && evlist->backward_mmap[0].base) - return evlist->backward_mmap[0].base; + if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].base) + return evlist->overwrite_mmap[0].base; } return NULL; } diff --git a/tools/perf/tests/backward-ring-buffer.c b/tools/perf/tests/backward-ring-buffer.c index cf37e43..4035d43 100644 --- a/tools/perf/tests/backward-ring-buffer.c +++ b/tools/perf/tests/backward-ring-buffer.c @@ -33,8 +33,8 @@ static int count_samples(struct perf_evlist *evlist, int *sample_count, for (i = 0; i < evlist->nr_mmaps; i++) { union perf_event *event; - perf_mmap__read_catchup(&evlist->backward_mmap[i]); - while ((event = perf_mmap__read_backward(&evlist->backward_mmap[i])) != NULL) { + perf_mmap__read_catchup(&evlist->overwrite_mmap[i]); + while ((event = perf_mmap__read_backward(&evlist->overwrite_mmap[i])) != NULL) { const u32 type = event->header.type; switch (type) { diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index bb70aef..2774528a 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -125,7 +125,7 @@ static void perf_evlist__purge(struct perf_evlist *evlist) void perf_evlist__exit(struct perf_evlist *evlist) { zfree(&evlist->mmap); - zfree(&evlist->backward_mmap); + zfree(&evlist->overwrite_mmap); fdarray__exit(&evlist->pollfd); } @@ -675,11 +675,11 @@ static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value) { int i; - if (!evlist->backward_mmap) + if (!evlist->overwrite_mmap) return 0; for (i = 0; i < evlist->nr_mmaps; i++) { - int fd = evlist->backward_mmap[i].fd; + int fd = evlist->overwrite_mmap[i].fd; int err; if (fd < 0) @@ -749,16 +749,16 @@ static void perf_evlist__munmap_nofree(struct perf_evlist *evlist) for (i = 0; i < evlist->nr_mmaps; i++) perf_mmap__munmap(&evlist->mmap[i]); - if (evlist->backward_mmap) + if (evlist->overwrite_mmap) for (i = 0; i < evlist->nr_mmaps; i++) - perf_mmap__munmap(&evlist->backward_mmap[i]); + perf_mmap__munmap(&evlist->overwrite_mmap[i]); } void perf_evlist__munmap(struct perf_evlist *evlist) { perf_evlist__munmap_nofree(evlist); zfree(&evlist->mmap); - zfree(&evlist->backward_mmap); + zfree(&evlist->overwrite_mmap); } static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist) @@ -800,7 +800,7 @@ perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused, static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, struct mmap_params mp, int cpu_idx, - int thread, int *_output, int *_output_backward) + int thread, int *_output, int *_output_overwrite) { struct perf_evsel *evsel; int revent; @@ -813,14 +813,14 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, int cpu; if (evsel->attr.write_backward) { - output = _output_backward; - maps = evlist->backward_mmap; + output = _output_overwrite; + maps = evlist->overwrite_mmap; if (!maps) { maps = perf_evlist__alloc_mmap(evlist); if (!maps) return -1; - evlist->backward_mmap = maps; + evlist->overwrite_mmap = maps; if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY) perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING); } @@ -885,14 +885,14 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, pr_debug2("perf event ring buffer mmapped per cpu\n"); for (cpu = 0; cpu < nr_cpus; cpu++) { int output = -1; - int output_backward = -1; + int output_overwrite = -1; auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu, true); for (thread = 0; thread < nr_threads; thread++) { if (perf_evlist__mmap_per_evsel(evlist, cpu, *mp, cpu, - thread, &output, &output_backward)) + thread, &output, &output_overwrite)) goto out_unmap; } } @@ -913,13 +913,13 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, pr_debug2("perf event ring buffer mmapped per thread\n"); for (thread = 0; thread < nr_threads; thread++) { int output = -1; - int output_backward = -1; + int output_overwrite = -1; auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread, false); if (perf_evlist__mmap_per_evsel(evlist, thread, *mp, 0, thread, - &output, &output_backward)) + &output, &output_overwrite)) goto out_unmap; } @@ -1749,7 +1749,7 @@ void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist, RESUME, } action = NONE; - if (!evlist->backward_mmap) + if (!evlist->overwrite_mmap) return; switch (old_state) { diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index eec3377..7516066 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -44,7 +44,7 @@ struct perf_evlist { } workload; struct fdarray pollfd; struct perf_mmap *mmap; - struct perf_mmap *backward_mmap; + struct perf_mmap *overwrite_mmap; struct thread_map *threads; struct cpu_map *cpus; struct perf_evsel *selected; diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c index 5f8cb15..05076e6 100644 --- a/tools/perf/util/mmap.c +++ b/tools/perf/util/mmap.c @@ -234,18 +234,18 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd) return 0; } -static int backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end) +static int overwrite_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end) { struct perf_event_header *pheader; u64 evt_head = head; int size = mask + 1; - pr_debug2("backward_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head); + pr_debug2("overwrite_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head); pheader = (struct perf_event_header *)(buf + (head & mask)); *start = head; while (true) { if (evt_head - head >= (unsigned int)size) { - pr_debug("Finished reading backward ring buffer: rewind\n"); + pr_debug("Finished reading overwrite ring buffer: rewind\n"); if (evt_head - head > (unsigned int)size) evt_head -= pheader->size; *end = evt_head; @@ -255,7 +255,7 @@ static int backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 pheader = (struct perf_event_header *)(buf + (evt_head & mask)); if (pheader->size == 0) { - pr_debug("Finished reading backward ring buffer: get start\n"); + pr_debug("Finished reading overwrite ring buffer: get start\n"); *end = evt_head; return 0; } @@ -267,7 +267,7 @@ static int backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 return -1; } -int perf_mmap__push(struct perf_mmap *md, bool backward, +int perf_mmap__push(struct perf_mmap *md, bool overwrite, void *to, int push(void *to, void *buf, size_t size)) { u64 head = perf_mmap__read_head(md); @@ -278,19 +278,19 @@ int perf_mmap__push(struct perf_mmap *md, bool backward, void *buf; int rc = 0; - start = backward ? head : old; - end = backward ? old : head; + start = overwrite ? head : old; + end = overwrite ? old : head; if (start == end) return 0; size = end - start; if (size > (unsigned long)(md->mask) + 1) { - if (!backward) { + if (!overwrite) { WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n"); md->prev = head; - perf_mmap__consume(md, backward); + perf_mmap__consume(md, overwrite); return 0; } @@ -298,7 +298,7 @@ int perf_mmap__push(struct perf_mmap *md, bool backward, * Backward ring buffer is full. We still have a chance to read * most of data from it. */ - if (backward_rb_find_range(data, md->mask, head, &start, &end)) + if (overwrite_rb_find_range(data, md->mask, head, &start, &end)) return -1; } @@ -323,7 +323,7 @@ int perf_mmap__push(struct perf_mmap *md, bool backward, } md->prev = head; - perf_mmap__consume(md, backward); + perf_mmap__consume(md, overwrite); out: return rc; }