@@ -1327,6 +1327,11 @@ defines += -D_LIBC_REENTRANT
libio-mtsafe = -D_IO_MTSAFE_IO
endif
+# Enable memory tagging, if supported by the architecture
+ifeq (yes,$(memory-tagging))
+defines += -D_LIBC_MTAG
+endif
+
# The name to give to a test in test results summaries.
test-name = $(strip $(patsubst %.out, %, $(patsubst $(common-objpfx)%, %, $@)))
@@ -84,6 +84,8 @@ mach-interface-list = @mach_interface_list@
experimental-malloc = @experimental_malloc@
+memory-tagging = @memory_tagging@
+
nss-crypt = @libc_cv_nss_crypt@
static-nss-crypt = @libc_cv_static_nss_crypt@
@@ -678,6 +678,7 @@ link_obsolete_rpc
libc_cv_static_nss_crypt
libc_cv_nss_crypt
build_crypt
+memory_tagging
experimental_malloc
enable_werror
all_warnings
@@ -783,6 +784,7 @@ enable_all_warnings
enable_werror
enable_multi_arch
enable_experimental_malloc
+enable_memory_tagging
enable_crypt
enable_nss_crypt
enable_obsolete_rpc
@@ -1454,6 +1456,8 @@ Optional Features:
architectures
--disable-experimental-malloc
disable experimental malloc features
+ --enable-memory-tagging enable memory tagging if supported by the
+ architecture
--disable-crypt do not build nor install the passphrase hashing
library, libcrypt
--enable-nss-crypt enable libcrypt to use nss
@@ -3527,6 +3531,15 @@ fi
+# Check whether --enable-memory-tagging was given.
+if test "${enable_memory_tagging+set}" = set; then :
+ enableval=$enable_memory_tagging; memory_tagging=$enableval
+else
+ memory_tagging=no
+fi
+
+
+
# Check whether --enable-crypt was given.
if test "${enable_crypt+set}" = set; then :
enableval=$enable_crypt; build_crypt=$enableval
@@ -310,6 +310,13 @@ AC_ARG_ENABLE([experimental-malloc],
[experimental_malloc=yes])
AC_SUBST(experimental_malloc)
+AC_ARG_ENABLE([memory-tagging],
+ AC_HELP_STRING([--enable-memory-tagging],
+ [enable memory tagging if supported by the architecture]),
+ [memory_tagging=$enableval],
+ [memory_tagging=no])
+AC_SUBST(memory_tagging)
+
AC_ARG_ENABLE([crypt],
AC_HELP_STRING([--disable-crypt],
[do not build nor install the passphrase hashing library, libcrypt]),
@@ -242,6 +242,9 @@
/* For DIAG_PUSH/POP_NEEDS_COMMENT et al. */
#include <libc-diag.h>
+/* For memory tagging. */
+#include <libc-mtag.h>
+
#include <malloc/malloc-internal.h>
/* For SINGLE_THREAD_P. */
@@ -279,6 +282,13 @@
#define MALLOC_DEBUG 0
#endif
+/* When using tagged memory, we cannot share the end of the user block
+ with the header for the next chunk, so ensure that we allocate
+ blocks that are rounded up to the granue size. */
+#define ROUND_UP_ALLOCATION_SIZE(BYTES) \
+ ((size_t) (((INTERNAL_SIZE_T)(BYTES) + __MTAG_GRANULE_SIZE - 1) \
+ & ~(__MTAG_GRANULE_SIZE - 1)))
+
#ifndef NDEBUG
# define __assert_fail(assertion, file, line, function) \
__malloc_assert(assertion, file, line, function)
@@ -1173,7 +1183,8 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/* conversion from malloc headers to user pointers, and back */
#define chunk2mem(p) ((void*)((char*)(p) + 2*SIZE_SZ))
-#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
+#define mem2chunk(mem) \
+ ((mchunkptr)__libc_mtag_address_get_tag (((char*)(mem) - 2*SIZE_SZ)))
/* The smallest possible chunk */
#define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize))
@@ -3031,6 +3042,7 @@ __libc_malloc (size_t bytes)
= atomic_forced_read (__malloc_hook);
if (__builtin_expect (hook != NULL, 0))
return (*hook)(bytes, RETURN_ADDRESS (0));
+ bytes = ROUND_UP_ALLOCATION_SIZE (bytes);
#if USE_TCACHE
/* int_free also calls request2size, be careful to not pad twice. */
size_t tbytes;
@@ -3048,7 +3060,9 @@ __libc_malloc (size_t bytes)
&& tcache
&& tcache->counts[tc_idx] > 0)
{
- return tcache_get (tc_idx);
+ victim = tcache_get (tc_idx);
+ return __libc_mtag_tag_region (__libc_mtag_new_tag (victim),
+ __malloc_usable_size (victim));
}
DIAG_POP_NEEDS_COMMENT;
#endif
@@ -3056,6 +3070,9 @@ __libc_malloc (size_t bytes)
if (SINGLE_THREAD_P)
{
victim = _int_malloc (&main_arena, bytes);
+ if (victim)
+ victim = __libc_mtag_tag_region (__libc_mtag_new_tag (victim),
+ __malloc_usable_size (victim));
assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
&main_arena == arena_for_chunk (mem2chunk (victim)));
return victim;
@@ -3076,6 +3093,10 @@ __libc_malloc (size_t bytes)
if (ar_ptr != NULL)
__libc_lock_unlock (ar_ptr->mutex);
+ if (victim)
+ victim = __libc_mtag_tag_region (__libc_mtag_new_tag (victim),
+ __malloc_usable_size (victim));
+
assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
ar_ptr == arena_for_chunk (mem2chunk (victim)));
return victim;
@@ -3099,8 +3120,17 @@ __libc_free (void *mem)
if (mem == 0) /* free(0) has no effect */
return;
+#ifdef _LIBC_MTAG
+ /* Quickly check that the freed pointer matches the tag for the memory.
+ This gives a useful double-free detection. */
+ *(volatile char *)mem;
+#endif
+
p = mem2chunk (mem);
+ /* Mark the chunk as belonging to the library again. */
+ (void)__libc_mtag_tag_region (chunk2mem (p), __malloc_usable_size (mem));
+
if (chunk_is_mmapped (p)) /* release mmapped memory. */
{
/* See if the dynamic brk/mmap threshold needs adjusting.
@@ -3150,6 +3180,13 @@ __libc_realloc (void *oldmem, size_t bytes)
if (oldmem == 0)
return __libc_malloc (bytes);
+ bytes = ROUND_UP_ALLOCATION_SIZE (bytes);
+#ifdef _LIBC_MTAG
+ /* Perform a quick check to ensure that the pointer's tag matches the
+ memory's tag. */
+ *(volatile char*) oldmem;
+#endif
+
/* chunk corresponding to oldmem */
const mchunkptr oldp = mem2chunk (oldmem);
/* its size */
@@ -3205,7 +3242,17 @@ __libc_realloc (void *oldmem, size_t bytes)
#if HAVE_MREMAP
newp = mremap_chunk (oldp, nb);
if (newp)
- return chunk2mem (newp);
+ {
+ void *newmem = chunk2mem (newp);
+ /* Give the new block a different tag. This helps to ensure
+ that stale handles to the previous mapping are not
+ reused. There's a performance hit for both us and the
+ caller for doing this, so we might want to
+ reconsider. */
+ newmem = __libc_mtag_tag_region (__libc_mtag_new_tag (newmem),
+ __malloc_usable_size (newmem));
+ return newmem;
+ }
#endif
/* Note the extra SIZE_SZ overhead. */
if (oldsize - SIZE_SZ >= nb)
@@ -3288,7 +3335,7 @@ _mid_memalign (size_t alignment, size_t bytes, void *address)
return 0;
}
-
+ bytes = ROUND_UP_ALLOCATION_SIZE (bytes);
/* Make sure alignment is power of 2. */
if (!powerof2 (alignment))
{
@@ -3303,8 +3350,8 @@ _mid_memalign (size_t alignment, size_t bytes, void *address)
p = _int_memalign (&main_arena, alignment, bytes);
assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
&main_arena == arena_for_chunk (mem2chunk (p)));
-
- return p;
+ return p ? __libc_mtag_tag_region (__libc_mtag_new_tag (p),
+ __malloc_usable_size (p)) : NULL;
}
arena_get (ar_ptr, bytes + alignment + MINSIZE);
@@ -3322,6 +3369,8 @@ _mid_memalign (size_t alignment, size_t bytes, void *address)
assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
ar_ptr == arena_for_chunk (mem2chunk (p)));
+ return p ? __libc_mtag_tag_region (__libc_mtag_new_tag (p),
+ __malloc_usable_size (p)) : NULL;
return p;
}
/* For ISO C11. */
@@ -3331,20 +3380,28 @@ libc_hidden_def (__libc_memalign)
void *
__libc_valloc (size_t bytes)
{
+ void *p;
+
if (__malloc_initialized < 0)
ptmalloc_init ();
+ bytes = ROUND_UP_ALLOCATION_SIZE (bytes);
void *address = RETURN_ADDRESS (0);
size_t pagesize = GLRO (dl_pagesize);
- return _mid_memalign (pagesize, bytes, address);
+ p = _mid_memalign (pagesize, bytes, address);
+ return p ? __libc_mtag_tag_region (__libc_mtag_new_tag (p),
+ __malloc_usable_size (p)) : NULL;
}
void *
__libc_pvalloc (size_t bytes)
{
+ void *p;
+
if (__malloc_initialized < 0)
ptmalloc_init ();
+ bytes = ROUND_UP_ALLOCATION_SIZE (bytes);
void *address = RETURN_ADDRESS (0);
size_t pagesize = GLRO (dl_pagesize);
size_t rounded_bytes;
@@ -3358,7 +3415,9 @@ __libc_pvalloc (size_t bytes)
}
rounded_bytes = rounded_bytes & -(pagesize - 1);
- return _mid_memalign (pagesize, rounded_bytes, address);
+ p = _mid_memalign (pagesize, rounded_bytes, address);
+ return p ? __libc_mtag_tag_region (__libc_mtag_new_tag (p),
+ __malloc_usable_size (p)) : NULL;
}
void *
@@ -3368,9 +3427,11 @@ __libc_calloc (size_t n, size_t elem_size)
mchunkptr oldtop, p;
INTERNAL_SIZE_T sz, csz, oldtopsize;
void *mem;
+#ifndef _LIBC_MTAG
unsigned long clearsize;
unsigned long nclears;
INTERNAL_SIZE_T *d;
+#endif
ptrdiff_t bytes;
if (__glibc_unlikely (__builtin_mul_overflow (n, elem_size, &bytes)))
@@ -3378,6 +3439,7 @@ __libc_calloc (size_t n, size_t elem_size)
__set_errno (ENOMEM);
return NULL;
}
+
sz = bytes;
void *(*hook) (size_t, const void *) =
@@ -3393,6 +3455,7 @@ __libc_calloc (size_t n, size_t elem_size)
MAYBE_INIT_TCACHE ();
+ sz = ROUND_UP_ALLOCATION_SIZE (sz);
if (SINGLE_THREAD_P)
av = &main_arena;
else
@@ -3447,7 +3510,15 @@ __libc_calloc (size_t n, size_t elem_size)
if (mem == 0)
return 0;
+ /* If we are using memory tagging, then we need to set the tags
+ regardless of MORECORE_CLEARS, so we zero the whole block while
+ doing so. */
+#ifdef _LIBC_MTAG
+ return __libc_mtag_memset_with_tag (__libc_mtag_new_tag (mem), 0,
+ __malloc_usable_size (mem));
+#else
p = mem2chunk (mem);
+ csz = chunksize (p);
/* Two optional cases in which clearing not necessary */
if (chunk_is_mmapped (p))
@@ -3458,8 +3529,6 @@ __libc_calloc (size_t n, size_t elem_size)
return mem;
}
- csz = chunksize (p);
-
#if MORECORE_CLEARS
if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize))
{
@@ -3502,6 +3571,7 @@ __libc_calloc (size_t n, size_t elem_size)
}
return mem;
+#endif
}
/*
@@ -4581,7 +4651,9 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
av->top = chunk_at_offset (oldp, nb);
set_head (av->top, (newsize - nb) | PREV_INUSE);
check_inuse_chunk (av, oldp);
- return chunk2mem (oldp);
+ newmem = chunk2mem (oldp);
+ return __libc_mtag_tag_region (__libc_mtag_new_tag (newmem),
+ __malloc_usable_size (newmem));
}
/* Try to expand forward into next chunk; split off remainder below */
@@ -4614,7 +4686,10 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
}
else
{
+ newmem = __libc_mtag_tag_region (__libc_mtag_new_tag (newmem),
+ __malloc_usable_size (newmem));
memcpy (newmem, chunk2mem (oldp), oldsize - SIZE_SZ);
+ (void) __libc_mtag_tag_region (chunk2mem (oldp), oldsize);
_int_free (av, oldp, 1);
check_inuse_chunk (av, newp);
return chunk2mem (newp);
@@ -4636,6 +4711,8 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
else /* split remainder */
{
remainder = chunk_at_offset (newp, nb);
+ /* Clear any user-space tags before writing the header. */
+ remainder = __libc_mtag_tag_region (remainder, remainder_size);
set_head_size (newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
set_head (remainder, remainder_size | PREV_INUSE |
(av != &main_arena ? NON_MAIN_ARENA : 0));
@@ -4645,7 +4722,9 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
}
check_inuse_chunk (av, newp);
- return chunk2mem (newp);
+ newmem = chunk2mem (newp);
+ return __libc_mtag_tag_region (__libc_mtag_new_tag (newmem),
+ __malloc_usable_size (newmem));
}
/*
@@ -4865,7 +4944,7 @@ __malloc_usable_size (void *m)
size_t result;
result = musable (m);
- return result;
+ return (size_t) (((INTERNAL_SIZE_T)result) & ~(__MTAG_GRANULE_SIZE - 1));
}
/*
@@ -16,3 +16,11 @@ endif
ifeq ($(subdir),math)
CPPFLAGS += -I../soft-fp
endif
+
+ifeq ($(subdir),string)
+sysdep_routines += __mtag_memset_tag
+endif
+
+ifeq ($(subdir),misc)
+sysdep_routines += __mtag_tag_region __mtag_new_tag __mtag_address_get_tag
+endif
new file mode 100644
@@ -0,0 +1,31 @@
+/* Copyright (C) 2019 Free Software Foundation, Inc.
+
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+
+#define ptr x0
+
+ .arch armv8.5-a
+ .arch_extension memtag
+
+ENTRY (__libc_mtag_address_get_tag)
+
+ ldg ptr, [ptr]
+ ret
+END (__libc_mtag_address_get_tag)
+libc_hidden_builtin_def (__libc_mtag_address_get_tag)
new file mode 100644
@@ -0,0 +1,46 @@
+/* Copyright (C) 2019 Free Software Foundation, Inc.
+
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+/* Use the same register names and assignments as memset. */
+#include "memset-reg.h"
+
+ .arch armv8.5-a
+ .arch_extension memtag
+
+/* NB, only supported on variants with 64-bit pointers. */
+
+/* FIXME: This is a minimal implementation. We could do much better than
+ this for large values of COUNT. */
+
+ENTRY_ALIGN(__libc_mtag_memset_with_tag, 6)
+
+ and valw, valw, 255
+ orr valw, valw, valw, lsl 8
+ orr valw, valw, valw, lsl 16
+ orr val, val, val, lsl 32
+ mov dst, dstin
+
+L(loop):
+ stgp val, val, [dst], #16
+ subs count, count, 16
+ bne L(loop)
+ ldg dstin, [dstin] // Recover the tag created (might be untagged).
+ ret
+END (__libc_mtag_memset_with_tag)
+libc_hidden_builtin_def (__libc_mtag_memset_with_tag)
new file mode 100644
@@ -0,0 +1,38 @@
+/* Copyright (C) 2019 Free Software Foundation, Inc.
+
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+
+ .arch armv8.5-a
+ .arch_extension memtag
+
+/* NB, only supported on variants with 64-bit pointers. */
+
+/* FIXME: This is a minimal implementation. We could do better than
+ this for larger values of COUNT. */
+
+#define ptr x0
+#define xset x1
+
+ENTRY(__libc_mtag_new_tag)
+ // Guarantee that the new tag is not the same as now.
+ gmi xset, ptr, xzr
+ irg ptr, ptr, xset
+ ret
+END (__libc_mtag_new_tag)
+libc_hidden_builtin_def (__libc_mtag_new_tag)
new file mode 100644
@@ -0,0 +1,44 @@
+/* Copyright (C) 2019 Free Software Foundation, Inc.
+
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+/* Use the same register names and assignments as memset. */
+
+ .arch armv8.5-a
+ .arch_extension memtag
+
+/* NB, only supported on variants with 64-bit pointers. */
+
+/* FIXME: This is a minimal implementation. We could do better than
+ this for larger values of COUNT. */
+
+#define dstin x0
+#define count x1
+#define dst x2
+
+ENTRY_ALIGN(__libc_mtag_tag_region, 6)
+
+ mov dst, dstin
+L(loop):
+ stg dst, [dst], #16
+ subs count, count, 16
+ bne L(loop)
+ ldg dstin, [dstin] // Recover the tag created (might be untagged).
+ ret
+END (__libc_mtag_tag_region)
+libc_hidden_builtin_def (__libc_mtag_tag_region)
new file mode 100644
@@ -0,0 +1,54 @@
+/* libc-internal interface for tagged (colored) memory support.
+ Copyright (C) 2019 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _AARCH64_LIBC_MTAG_H
+#define _AARCH64_LIBC_MTAG_H 1
+
+#ifndef _LIBC_MTAG
+/* Generic bindings for systems that do not support memory tagging. */
+#include_next "libc-mtag.h"
+#else
+
+/* Used to ensure additional alignment when objects need to have distinct
+ tags. */
+#define __MTAG_GRANULE_SIZE 16
+
+/* Non-zero if memory obtained via morecore (sbrk) is not tagged. */
+#define __MTAG_SBRK_UNTAGGED 0
+
+/* Set the tags for a region of memory, which must have size and alignment
+ that are multiples of __MTAG_GRANULE_SIZE. Size cannot be zero.
+ void *__libc_mtag_tag_region (const void *, size_t) */
+void *__libc_mtag_tag_region (void *, size_t);
+
+/* Optimized equivalent to __libc_mtag_tag_region followed by memset. */
+void *__libc_mtag_memset_with_tag(void *, int, size_t);
+
+/* Convert address P to a pointer that is tagged correctly for that
+ location.
+ void *__libc_mtag_address_get_tag (void*) */
+void *__libc_mtag_address_get_tag(void *);
+
+/* Assign a new (random) tag to a pointer P (does not adjust the tag on
+ the memory addressed).
+ void *__libc_mtag_new_tag (void*) */
+void *__libc_mtag_new_tag(void *);
+
+#endif /* _LIBC_MTAG */
+
+#endif /* _AARCH64_LIBC_MTAG_H */
@@ -64,6 +64,25 @@
*/
ENTRY (MEMCHR)
+#ifdef _LIBC_MTAG
+ /* Quick-and-dirty implementation for MTE. Needs a rewrite as
+ granules are only 16 bytes in size. */
+ /* Do not dereference srcin if no bytes to compare. */
+ cbz cntin, L(zero_length)
+ and chrin, chrin, #255
+L(next_byte):
+ ldrb wtmp2, [srcin], #1
+ cmp wtmp2, chrin
+ b.eq L(found)
+ subs cntin, cntin, #1
+ b.ne L(next_byte)
+L(zero_length):
+ mov result, #0
+ ret
+L(found):
+ sub result, srcin, #1
+ ret
+#else
/* Do not dereference srcin if no bytes to compare. */
cbz cntin, L(zero_length)
/*
@@ -152,10 +171,10 @@ L(tail):
/* Select result or NULL */
csel result, xzr, result, eq
ret
-
L(zero_length):
mov result, #0
ret
+#endif /* _LIBC_MTAG */
END (MEMCHR)
weak_alias (MEMCHR, memchr)
libc_hidden_builtin_def (memchr)
@@ -48,7 +48,7 @@
#define dataq2 q3
#define datav2 v3
-#ifdef TEST_PAGE_CROSS
+#if defined _LIBC_MTAG || defined TEST_PAGE_CROSS
# define MIN_PAGE_SIZE 16
#else
# define MIN_PAGE_SIZE 4096
@@ -63,6 +63,20 @@
ENTRY (strchr)
DELOUSE (0)
+#ifdef _LIBC_MTAG
+ /* Quick and dirty implementation for MTE */
+ and chrin, chrin, #255
+L(next_byte):
+ ldrb wtmp2, [srcin], #1
+ cbz wtmp2, L(end)
+ cmp wtmp2, chrin
+ b.ne L(next_byte)
+ sub result, srcin, #1
+ ret
+L(end):
+ mov result, #0
+ ret
+#else
mov wtmp2, #0x0401
movk wtmp2, #0x4010, lsl #16
dup vrepchr.16b, chrin
@@ -134,6 +148,7 @@ L(tail):
add result, src, tmp1, lsr #1
csel result, result, xzr, eq
ret
+#endif
END (strchr)
libc_hidden_builtin_def (strchr)
weak_alias (strchr, index)
@@ -61,6 +61,18 @@
ENTRY (__strchrnul)
DELOUSE (0)
+#ifdef _LIBC_MTAG
+ /* Quick and dirty implementation for MTE */
+ and chrin, chrin, #255
+L(next_byte):
+ ldrb wtmp2, [srcin], #1
+ cmp wtmp2, #0
+ ccmp wtmp2, chrin, #4, ne /* NZCV = 0x0100 */
+ b.ne L(next_byte)
+
+ sub result, srcin, #1
+ ret
+#else
/* Magic constant 0x40100401 to allow us to identify which lane
matches the termination condition. */
mov wtmp2, #0x0401
@@ -126,6 +138,6 @@ L(tail):
/* tmp1 is twice the offset into the fragment. */
add result, src, tmp1, lsr #1
ret
-
+#endif /* _LIBC_MTAG */
END(__strchrnul)
weak_alias (__strchrnul, strchrnul)
@@ -46,6 +46,12 @@
#define zeroones x10
#define pos x11
+#if defined _LIBC_MTAG || defined TEST_PAGE_CROSS
+# define MIN_PAGE_SIZE 16
+#else
+# define MIN_PAGE_SIZE 4096
+#endif
+
/* Start of performance-critical section -- one 64B cache line. */
ENTRY_ALIGN(strcmp, 6)
@@ -161,10 +167,10 @@ L(do_misaligned):
b.ne L(do_misaligned)
L(loop_misaligned):
- /* Test if we are within the last dword of the end of a 4K page. If
+ /* Test if we are within the last dword of the end of a page. If
yes then jump back to the misaligned loop to copy a byte at a time. */
- and tmp1, src2, #0xff8
- eor tmp1, tmp1, #0xff8
+ and tmp1, src2, #(MIN_PAGE_SIZE - 8)
+ eor tmp1, tmp1, #(MIN_PAGE_SIZE - 8)
cbz tmp1, L(do_misaligned)
ldr data1, [src1], #8
ldr data2, [src2], #8
@@ -81,7 +81,7 @@
misaligned, crosses a page boundary - after that we move to aligned
fetches for the remainder of the string. */
-#ifdef STRCPY_TEST_PAGE_CROSS
+#if defined _LIBC_MTAG || defined STRCPY_TEST_PAGE_CROSS
/* Make everything that isn't Qword aligned look like a page cross. */
#define MIN_PAGE_P2 4
#else
@@ -57,7 +57,7 @@
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080
-#ifdef TEST_PAGE_CROSS
+#if defined _LIBC_MTAG || defined TEST_PAGE_CROSS
# define MIN_PAGE_SIZE 16
#else
# define MIN_PAGE_SIZE 4096
@@ -51,6 +51,12 @@
#define endloop x15
#define count mask
+#if defined _LIBC_MTAG || defined TEST_PAGE_CROSS
+# define MIN_PAGE_SIZE 16
+#else
+# define MIN_PAGE_SIZE 4096
+#endif
+
ENTRY_ALIGN_AND_PAD (strncmp, 6, 7)
DELOUSE (0)
DELOUSE (1)
@@ -233,8 +239,8 @@ L(do_misaligned):
subs limit_wd, limit_wd, #1
b.lo L(done_loop)
L(loop_misaligned):
- and tmp2, src2, #0xff8
- eor tmp2, tmp2, #0xff8
+ and tmp2, src2, #(MIN_PAGE_SIZE - 8)
+ eor tmp2, tmp2, #(MIN_PAGE_SIZE - 8)
cbz tmp2, L(page_end_loop)
ldr data1, [src1], #8
@@ -70,6 +70,19 @@
ENTRY(strrchr)
DELOUSE (0)
cbz x1, L(null_search)
+#ifdef _LIBC_MTAG
+ /* Quick and dirty version for MTE. */
+ and chrin, chrin, #255
+ mov src_match, #0
+L(next_byte):
+ ldrb wtmp2, [srcin]
+ cmp wtmp2, chrin
+ csel src_match, src_match, srcin, ne
+ add srcin, srcin, #1
+ cbnz wtmp2, L(next_byte)
+ mov result, src_match
+ ret
+#else
/* Magic constant 0x40100401 to allow us to identify which lane
matches the requested byte. Magic constant 0x80200802 used
similarly for NUL termination. */
@@ -158,9 +171,9 @@ L(tail):
csel result, result, xzr, ne
ret
+#endif
L(null_search):
b __strchrnul
-
END(strrchr)
weak_alias (strrchr, rindex)
libc_hidden_builtin_def (strrchr)
new file mode 100644
@@ -0,0 +1,49 @@
+/* libc-internal interface for tagged (colored) memory support.
+ Copyright (C) 2019 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _GENERIC_LIBC_MTAG_H
+#define _GENERIC_LIBC_MTAG_H 1
+
+/* Generic bindings for systems that do not support memory tagging. */
+
+/* Used to ensure additional alignment when objects need to have distinct
+ tags. */
+#define __MTAG_GRANULE_SIZE 1
+
+/* Non-zero if memory obtained via morecore (sbrk) is not tagged. */
+#define __MTAG_SBRK_UNTAGGED 0
+
+/* Set the tags for a region of memory, which must have size and alignment
+ that are multiples of __MTAG_GRANULE_SIZE. Size cannot be zero.
+ void *__libc_mtag_tag_region (const void *, size_t) */
+#define __libc_mtag_tag_region(p, s) (p)
+
+/* Optimized equivalent to __libc_mtag_tag_region followed by memset. */
+#define __libc_mtag_memset_with_tag memset
+
+/* Convert address P to a pointer that is tagged correctly for that
+ location.
+ void *__libc_mtag_address_get_tag (void*) */
+#define __libc_mtag_address_get_tag(p) (p)
+
+/* Assign a new (random) tag to a pointer P (does not adjust the tag on
+ the memory addressed).
+ void *__libc_mtag_new_tag (void*) */
+#define __libc_mtag_new_tag(p) (p)
+
+#endif /* _GENERIC_LIBC_MTAG_H */