@@ -737,6 +737,9 @@ endif # KBUILD_EXTMOD
# Defaults to vmlinux, but the arch makefile usually adds further targets
all: vmlinux
+CFLAGS_LLVM_COV := -fprofile-instr-generate -fcoverage-mapping
+export CFLAGS_LLVM_COV
+
CFLAGS_GCOV := -fprofile-arcs -ftest-coverage
ifdef CONFIG_CC_IS_GCC
CFLAGS_GCOV += -fno-tree-loop-im
@@ -1601,6 +1601,7 @@ config ARCH_HAS_KERNEL_FPU_SUPPORT
the kernel, as described in Documentation/core-api/floating-point.rst.
source "kernel/gcov/Kconfig"
+source "kernel/llvm-cov/Kconfig"
source "scripts/gcc-plugins/Kconfig"
@@ -85,6 +85,7 @@ config X86
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_KCOV if X86_64
+ select ARCH_HAS_LLVM_COV if X86_64
select ARCH_HAS_KERNEL_FPU_SUPPORT
select ARCH_HAS_MEM_ENCRYPT
select ARCH_HAS_MEMBARRIER_SYNC_CORE
@@ -191,6 +191,8 @@ SECTIONS
BUG_TABLE
+ LLVM_COV_DATA
+
ORC_UNWIND_TABLE
. = ALIGN(PAGE_SIZE);
@@ -334,6 +334,44 @@
#define THERMAL_TABLE(name)
#endif
+#ifdef CONFIG_LLVM_COV_KERNEL
+#define LLVM_COV_DATA \
+ __llvm_prf_data : AT(ADDR(__llvm_prf_data) - LOAD_OFFSET) { \
+ __llvm_prf_start = .; \
+ __llvm_prf_data_start = .; \
+ *(__llvm_prf_data) \
+ __llvm_prf_data_end = .; \
+ } \
+ __llvm_prf_cnts : AT(ADDR(__llvm_prf_cnts) - LOAD_OFFSET) { \
+ __llvm_prf_cnts_start = .; \
+ *(__llvm_prf_cnts) \
+ __llvm_prf_cnts_end = .; \
+ } \
+ __llvm_prf_names : AT(ADDR(__llvm_prf_names) - LOAD_OFFSET) { \
+ __llvm_prf_names_start = .; \
+ *(__llvm_prf_names) \
+ __llvm_prf_names_end = .; \
+ } \
+ __llvm_prf_bits : AT(ADDR(__llvm_prf_bits) - LOAD_OFFSET) { \
+ __llvm_prf_bits_start = .; \
+ *(__llvm_prf_bits) \
+ __llvm_prf_bits_end = .; \
+ } \
+ __llvm_covfun : AT(ADDR(__llvm_covfun) - LOAD_OFFSET) { \
+ __llvm_covfun_start = .; \
+ *(__llvm_covfun) \
+ __llvm_covfun_end = .; \
+ } \
+ __llvm_covmap : AT(ADDR(__llvm_covmap) - LOAD_OFFSET) { \
+ __llvm_covmap_start = .; \
+ *(__llvm_covmap) \
+ __llvm_covmap_end = .; \
+ __llvm_prf_end = .; \
+ }
+#else
+#define LLVM_COV_DATA
+#endif
+
#define KERNEL_DTB() \
STRUCT_ALIGN(); \
__dtb_start = .; \
@@ -117,6 +117,7 @@ obj-$(CONFIG_HAVE_STATIC_CALL) += static_call.o
obj-$(CONFIG_HAVE_STATIC_CALL_INLINE) += static_call_inline.o
obj-$(CONFIG_CFI_CLANG) += cfi.o
obj-$(CONFIG_NUMA) += numa.o
+obj-$(CONFIG_LLVM_COV_KERNEL) += llvm-cov/
obj-$(CONFIG_PERF_EVENTS) += events/
new file mode 100644
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "Clang's source-based kernel coverage measurement (EXPERIMENTAL)"
+
+config ARCH_HAS_LLVM_COV
+ bool
+
+config LLVM_COV_KERNEL
+ bool "Enable Clang's source-based kernel coverage measurement"
+ depends on DEBUG_FS
+ depends on ARCH_HAS_LLVM_COV
+ depends on CC_IS_CLANG && CLANG_VERSION >= 180000
+ default n
+ help
+ This option enables Clang's Source-based Code Coverage.
+
+ If unsure, say N.
+
+ On a kernel compiled with this option, run your test suites, and
+ download the raw profile from /sys/kernel/debug/llvm-cov/profraw.
+ This file can then be converted into the indexed format with
+ llvm-profdata and used to generate coverage reports with llvm-cov.
+
+ Note that a kernel compiled with coverage flags will be significantly
+ larger and run slower.
+
+ Note that the debugfs filesystem has to be mounted to access the raw
+ profile.
+
+endmenu
new file mode 100644
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+GCOV_PROFILE := n
+LLVM_COV_PROFILE := n
+
+obj-y += fs.o
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Sami Tolvanen <samitolvanen@google.com>, Google, Inc.
+ * Copyright (C) 2024 Jinghao Jia <jinghao7@illinois.edu>, UIUC
+ * Copyright (C) 2024 Wentao Zhang <wentaoz5@illinois.edu>, UIUC
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "llvm-cov: " fmt
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/spinlock.h>
+#include "llvm-cov.h"
+
+/*
+ * This lock guards both counter/bitmap reset and serialization of the
+ * raw profile data. Keeping both of these activities separate via locking
+ * ensures that we don't try to serialize data that's being reset.
+ */
+DEFINE_SPINLOCK(llvm_cov_lock);
+
+static struct dentry *directory;
+
+struct llvm_cov_private_data {
+ char *buffer;
+ unsigned long size;
+};
+
+/*
+ * Raw profile data format:
+ * https://llvm.org/docs/InstrProfileFormat.html#raw-profile-format. We will
+ * only populate information that's relevant to basic Source-based Code Coverage
+ * before serialization. Other features like binary IDs, continuous mode,
+ * single-byte mode, value profiling, type profiling etc are not implemented.
+ */
+
+static void llvm_cov_fill_raw_profile_header(void **buffer)
+{
+ struct __llvm_profile_header *header = *(struct __llvm_profile_header **)buffer;
+
+ header->magic = INSTR_PROF_RAW_MAGIC_64;
+ header->version = INSTR_PROF_RAW_VERSION;
+ header->binary_ids_size = 0;
+ header->num_data = __llvm_prf_data_count();
+ header->padding_bytes_before_counters = 0;
+ header->num_counters = __llvm_prf_cnts_count();
+ header->padding_bytes_after_counters =
+ __llvm_prf_get_padding(__llvm_prf_cnts_size());
+ header->num_bitmap_bytes = __llvm_prf_bits_size();
+ header->padding_bytes_after_bitmap_bytes =
+ __llvm_prf_get_padding(__llvm_prf_bits_size());
+ header->names_size = __llvm_prf_names_size();
+ header->counters_delta = (u64)__llvm_prf_cnts_start -
+ (u64)__llvm_prf_data_start;
+ header->bitmap_delta = (u64)__llvm_prf_bits_start -
+ (u64)__llvm_prf_data_start;
+ header->names_delta = (u64)__llvm_prf_names_start;
+#if defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION >= 190000
+ header->num_v_tables = 0;
+ header->v_names_size = 0;
+#endif
+ header->value_kind_last = IPVK_LAST;
+
+ *buffer += sizeof(*header);
+}
+
+/*
+ * Copy the source into the buffer, incrementing the pointer into buffer in the
+ * process.
+ */
+static void llvm_cov_copy_section_to_buffer(void **buffer, void *src,
+ unsigned long size)
+{
+ memcpy(*buffer, src, size);
+ *buffer += size;
+}
+
+static unsigned long llvm_cov_get_raw_profile_size(void)
+{
+ return sizeof(struct __llvm_profile_header) +
+ __llvm_prf_data_size() +
+ __llvm_prf_cnts_size() +
+ __llvm_prf_get_padding(__llvm_prf_cnts_size()) +
+ __llvm_prf_bits_size() +
+ __llvm_prf_get_padding(__llvm_prf_bits_size()) +
+ __llvm_prf_names_size() +
+ __llvm_prf_get_padding(__llvm_prf_names_size());
+}
+
+/*
+ * Serialize in-memory data into a format LLVM tools can understand
+ * (https://llvm.org/docs/InstrProfileFormat.html#raw-profile-format)
+ */
+static int llvm_cov_serialize_raw_profile(struct llvm_cov_private_data *p)
+{
+ int err = 0;
+ void *buffer;
+
+ p->size = llvm_cov_get_raw_profile_size();
+ p->buffer = vzalloc(p->size);
+
+ if (!p->buffer) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ buffer = p->buffer;
+
+ llvm_cov_fill_raw_profile_header(&buffer);
+ llvm_cov_copy_section_to_buffer(&buffer, __llvm_prf_data_start,
+ __llvm_prf_data_size());
+ llvm_cov_copy_section_to_buffer(&buffer, __llvm_prf_cnts_start,
+ __llvm_prf_cnts_size());
+ buffer += __llvm_prf_get_padding(__llvm_prf_cnts_size());
+ llvm_cov_copy_section_to_buffer(&buffer, __llvm_prf_bits_start,
+ __llvm_prf_bits_size());
+ buffer += __llvm_prf_get_padding(__llvm_prf_bits_size());
+ llvm_cov_copy_section_to_buffer(&buffer, __llvm_prf_names_start,
+ __llvm_prf_names_size());
+ buffer += __llvm_prf_get_padding(__llvm_prf_names_size());
+
+out:
+ return err;
+}
+
+/* open() implementation for llvm-cov data file. */
+static int llvm_cov_open(struct inode *inode, struct file *file)
+{
+ struct llvm_cov_private_data *data;
+ unsigned long flags;
+ int err;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ flags = llvm_cov_claim_lock();
+
+ err = llvm_cov_serialize_raw_profile(data);
+ if (unlikely(err)) {
+ kfree(data);
+ goto out_unlock;
+ }
+
+ file->private_data = data;
+
+out_unlock:
+ llvm_cov_release_lock(flags);
+out:
+ return err;
+}
+
+/* read() implementation for llvm-cov data file. */
+static ssize_t llvm_cov_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct llvm_cov_private_data *data = file->private_data;
+
+ if (!data)
+ return -EBADF;
+
+ return simple_read_from_buffer(buf, count, ppos, data->buffer,
+ data->size);
+}
+
+/* release() implementation for llvm-cov data file. */
+static int llvm_cov_release(struct inode *inode, struct file *file)
+{
+ struct llvm_cov_private_data *data = file->private_data;
+
+ if (data) {
+ vfree(data->buffer);
+ kfree(data);
+ }
+
+ return 0;
+}
+
+static const struct file_operations llvm_cov_data_fops = {
+ .owner = THIS_MODULE,
+ .open = llvm_cov_open,
+ .read = llvm_cov_read,
+ .llseek = default_llseek,
+ .release = llvm_cov_release
+};
+
+/* write() implementation for llvm-cov reset file */
+static ssize_t reset_write(struct file *file, const char __user *addr,
+ size_t len, loff_t *pos)
+{
+ unsigned long flags;
+
+ flags = llvm_cov_claim_lock();
+ memset(__llvm_prf_cnts_start, 0, __llvm_prf_cnts_size());
+ memset(__llvm_prf_bits_start, 0, __llvm_prf_bits_size());
+ llvm_cov_release_lock(flags);
+
+ return len;
+}
+
+static const struct file_operations llvm_cov_reset_fops = {
+ .owner = THIS_MODULE,
+ .write = reset_write,
+ .llseek = noop_llseek,
+};
+
+/* Create debugfs entries. */
+static int __init llvm_cov_init(void)
+{
+ directory = debugfs_create_dir("llvm-cov", NULL);
+ if (!directory)
+ goto err_remove;
+
+ if (!debugfs_create_file("profraw", 0400, directory, NULL,
+ &llvm_cov_data_fops))
+ goto err_remove;
+
+ if (!debugfs_create_file("reset", 0200, directory, NULL,
+ &llvm_cov_reset_fops))
+ goto err_remove;
+
+ return 0;
+
+err_remove:
+ debugfs_remove_recursive(directory);
+ pr_err("initialization failed\n");
+ return -EIO;
+}
+
+/* Remove debugfs entries. */
+static void __exit llvm_cov_exit(void)
+{
+ debugfs_remove_recursive(directory);
+}
+
+module_init(llvm_cov_init);
+module_exit(llvm_cov_exit);
new file mode 100644
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Sami Tolvanen <samitolvanen@google.com>, Google, Inc.
+ * Copyright (C) 2024 Jinghao Jia <jinghao7@illinois.edu>, UIUC
+ * Copyright (C) 2024 Wentao Zhang <wentaoz5@illinois.edu>, UIUC
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LLVM_COV_H
+#define _LLVM_COV_H
+
+extern spinlock_t llvm_cov_lock;
+
+static __always_inline unsigned long llvm_cov_claim_lock(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&llvm_cov_lock, flags);
+
+ return flags;
+}
+
+static __always_inline void llvm_cov_release_lock(unsigned long flags)
+{
+ spin_unlock_irqrestore(&llvm_cov_lock, flags);
+}
+
+/*
+ * Note: These internal LLVM definitions must match the compiler version.
+ * See llvm/include/llvm/ProfileData/InstrProfData.inc in LLVM's source code.
+ */
+
+#define INSTR_PROF_RAW_MAGIC_64 \
+ ((u64)255 << 56 | \
+ (u64)'l' << 48 | \
+ (u64)'p' << 40 | \
+ (u64)'r' << 32 | \
+ (u64)'o' << 24 | \
+ (u64)'f' << 16 | \
+ (u64)'r' << 8 | \
+ (u64)129)
+
+#if defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION >= 190000
+#define INSTR_PROF_RAW_VERSION 10
+#define INSTR_PROF_DATA_ALIGNMENT 8
+#define IPVK_LAST 2
+#elif defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION >= 180000
+#define INSTR_PROF_RAW_VERSION 9
+#define INSTR_PROF_DATA_ALIGNMENT 8
+#define IPVK_LAST 1
+#endif
+
+/**
+ * struct __llvm_profile_header - represents the raw profile header data
+ * structure. Description of each member can be found here:
+ * https://llvm.org/docs/InstrProfileFormat.html#header.
+ */
+struct __llvm_profile_header {
+ u64 magic;
+ u64 version;
+ u64 binary_ids_size;
+ u64 num_data;
+ u64 padding_bytes_before_counters;
+ u64 num_counters;
+ u64 padding_bytes_after_counters;
+ u64 num_bitmap_bytes;
+ u64 padding_bytes_after_bitmap_bytes;
+ u64 names_size;
+ u64 counters_delta;
+ u64 bitmap_delta;
+ u64 names_delta;
+#if defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION >= 190000
+ u64 num_v_tables;
+ u64 v_names_size;
+#endif
+ u64 value_kind_last;
+};
+
+/**
+ * struct __llvm_profile_data - represents the per-function control structure.
+ * Description of each member can be found here:
+ * https://llvm.org/docs/InstrProfileFormat.html#profile-metadata. To measure
+ * Source-based Code Coverage, the internals of this struct don't matter at run
+ * time. The only purpose of the definition below is to run sizeof() against it
+ * so that we can calculate the "num_data" field in header.
+ */
+struct __llvm_profile_data {
+ const u64 name_ref;
+ const u64 func_hash;
+ const void *counter_ptr;
+ const void *bitmap_ptr;
+ const void *function_pointer;
+ void *values;
+ const u32 num_counters;
+ const u16 num_value_sites[IPVK_LAST + 1];
+ const u32 num_bitmap_bytes;
+} __aligned(INSTR_PROF_DATA_ALIGNMENT);
+
+/* Payload sections */
+
+extern struct __llvm_profile_data __llvm_prf_data_start[];
+extern struct __llvm_profile_data __llvm_prf_data_end[];
+
+extern u64 __llvm_prf_cnts_start[];
+extern u64 __llvm_prf_cnts_end[];
+
+extern char __llvm_prf_names_start[];
+extern char __llvm_prf_names_end[];
+
+extern char __llvm_prf_bits_start[];
+extern char __llvm_prf_bits_end[];
+
+#define __DEFINE_SECTION_SIZE(s) \
+ static inline unsigned long __llvm_prf_ ## s ## _size(void) \
+ { \
+ unsigned long start = \
+ (unsigned long)__llvm_prf_ ## s ## _start; \
+ unsigned long end = \
+ (unsigned long)__llvm_prf_ ## s ## _end; \
+ return end - start; \
+ }
+#define __DEFINE_SECTION_COUNT(s) \
+ static inline unsigned long __llvm_prf_ ## s ## _count(void) \
+ { \
+ return __llvm_prf_ ## s ## _size() / \
+ sizeof(__llvm_prf_ ## s ## _start[0]); \
+ }
+
+__DEFINE_SECTION_SIZE(data)
+__DEFINE_SECTION_SIZE(cnts)
+__DEFINE_SECTION_SIZE(names)
+__DEFINE_SECTION_SIZE(bits)
+
+__DEFINE_SECTION_COUNT(data)
+__DEFINE_SECTION_COUNT(cnts)
+__DEFINE_SECTION_COUNT(names)
+__DEFINE_SECTION_COUNT(bits)
+
+#undef __DEFINE_SECTION_SIZE
+#undef __DEFINE_SECTION_COUNT
+
+static inline unsigned long __llvm_prf_get_padding(unsigned long size)
+{
+ return 7 & (sizeof(u64) - size % sizeof(u64));
+}
+
+#endif /* _LLVM_COV_H */
@@ -158,6 +158,16 @@ _c_flags += $(if $(patsubst n%,, \
$(CFLAGS_GCOV))
endif
+#
+# Enable Clang's Source-based Code Coverage flags for a file or directory
+# depending on variables LLVM_COV_PROFILE_obj.o and LLVM_COV_PROFILE.
+#
+ifeq ($(CONFIG_LLVM_COV_KERNEL),y)
+_c_flags += $(if $(patsubst n%,, \
+ $(LLVM_COV_PROFILE_$(basetarget).o)$(LLVM_COV_PROFILE)y), \
+ $(CFLAGS_LLVM_COV))
+endif
+
#
# Enable address sanitizer flags for kernel except some files or directories
# we don't want to check (depends on variables KASAN_SANITIZE_obj.o, KASAN_SANITIZE)
@@ -743,6 +743,8 @@ static const char *const section_white_list[] =
".gnu.lto*",
".discard.*",
".llvm.call-graph-profile", /* call graph */
+ "__llvm_covfun",
+ "__llvm_covmap",
NULL
};