From patchwork Mon Mar 30 19:44:43 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Eugeniy Paltsev X-Patchwork-Id: 244570 List-Id: U-Boot discussion From: Eugeniy.Paltsev at synopsys.com (Eugeniy Paltsev) Date: Mon, 30 Mar 2020 22:44:43 +0300 Subject: [PATCH v2 1/3] ARC: IO: add volatile to accessors In-Reply-To: <20200330194445.5923-1-Eugeniy.Paltsev@synopsys.com> References: <20200330194445.5923-1-Eugeniy.Paltsev@synopsys.com> Message-ID: <20200330194445.5923-2-Eugeniy.Paltsev@synopsys.com> We must use 'volatile' in C-version read/write IO accessors implementation to avoid merging several reads (writes) into one read (write), or optimizing them out by compiler. Fixes commit 07906b3dad15 ("ARC: Switch to generic accessors") Signed-off-by: Eugeniy Paltsev --- arch/arc/include/asm/io.h | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h index 70d050590de..1f1ae889f3a 100644 --- a/arch/arc/include/asm/io.h +++ b/arch/arc/include/asm/io.h @@ -54,15 +54,21 @@ static inline void sync(void) /* Not yet implemented */ } -#define __arch_getb(a) (*(unsigned char *)(a)) -#define __arch_getw(a) (*(unsigned short *)(a)) -#define __arch_getl(a) (*(unsigned int *)(a)) -#define __arch_getq(a) (*(unsigned long long *)(a)) - -#define __arch_putb(v, a) (*(unsigned char *)(a) = (v)) -#define __arch_putw(v, a) (*(unsigned short *)(a) = (v)) -#define __arch_putl(v, a) (*(unsigned int *)(a) = (v)) -#define __arch_putq(v, a) (*(unsigned long long *)(a) = (v)) +/* + * We must use 'volatile' in C-version read/write IO accessors implementation + * to avoid merging several reads (writes) into one read (write), or optimizing + * them out by compiler. + */ +#define __arch_getb(a) (*(volatile u8 *)(a)) +#define __arch_getw(a) (*(volatile u16 *)(a)) +#define __arch_getl(a) (*(volatile u32 *)(a)) +#define __arch_getq(a) (*(volatile u64 *)(a)) + +#define __arch_putb(v, a) (*(volatile u8 *)(a) = (v)) +#define __arch_putw(v, a) (*(volatile u16 *)(a) = (v)) +#define __arch_putl(v, a) (*(volatile u32 *)(a) = (v)) +#define __arch_putq(v, a) (*(volatile u64 *)(a) = (v)) + #define __raw_writeb(v, a) __arch_putb(v, a) #define __raw_writew(v, a) __arch_putw(v, a) From patchwork Mon Mar 30 19:44:44 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Eugeniy Paltsev X-Patchwork-Id: 244571 List-Id: U-Boot discussion From: Eugeniy.Paltsev at synopsys.com (Eugeniy Paltsev) Date: Mon, 30 Mar 2020 22:44:44 +0300 Subject: [PATCH v2 2/3] ARC: IO: add compiler barriers to IO accessors In-Reply-To: <20200330194445.5923-1-Eugeniy.Paltsev@synopsys.com> References: <20200330194445.5923-1-Eugeniy.Paltsev@synopsys.com> Message-ID: <20200330194445.5923-3-Eugeniy.Paltsev@synopsys.com> We must use compiler barriers in C-version read/write IO accessors before and after operation (read or write) so it won't be reordered by compiler. Fixes commit 07906b3dad15 ("ARC: Switch to generic accessors") Signed-off-by: Eugeniy Paltsev --- arch/arc/include/asm/io.h | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h index 1f1ae889f3a..50c88456002 100644 --- a/arch/arc/include/asm/io.h +++ b/arch/arc/include/asm/io.h @@ -9,6 +9,13 @@ #include #include + +/* + * Compiler barrier. It prevents compiler from reordering instructions before + * and after it. It doesn't prevent HW (CPU) from any reordering though. + */ +#define __comp_b() asm volatile("" : : : "memory") + #ifdef __ARCHS__ /* @@ -45,8 +52,8 @@ #define __iormb() rmb() #define __iowmb() wmb() #else -#define __iormb() asm volatile("" : : : "memory") -#define __iowmb() asm volatile("" : : : "memory") +#define __iormb() __comp_b() +#define __iowmb() __comp_b() #endif static inline void sync(void) @@ -58,16 +65,18 @@ static inline void sync(void) * We must use 'volatile' in C-version read/write IO accessors implementation * to avoid merging several reads (writes) into one read (write), or optimizing * them out by compiler. + * We must use compiler barriers before and after operation (read or write) so + * it won't be reordered by compiler. */ -#define __arch_getb(a) (*(volatile u8 *)(a)) -#define __arch_getw(a) (*(volatile u16 *)(a)) -#define __arch_getl(a) (*(volatile u32 *)(a)) -#define __arch_getq(a) (*(volatile u64 *)(a)) - -#define __arch_putb(v, a) (*(volatile u8 *)(a) = (v)) -#define __arch_putw(v, a) (*(volatile u16 *)(a) = (v)) -#define __arch_putl(v, a) (*(volatile u32 *)(a) = (v)) -#define __arch_putq(v, a) (*(volatile u64 *)(a) = (v)) +#define __arch_getb(a) ({ u8 __v; __comp_b(); __v = *(volatile u8 *)(a); __comp_b(); __v; }) +#define __arch_getw(a) ({ u16 __v; __comp_b(); __v = *(volatile u16 *)(a); __comp_b(); __v; }) +#define __arch_getl(a) ({ u32 __v; __comp_b(); __v = *(volatile u32 *)(a); __comp_b(); __v; }) +#define __arch_getq(a) ({ u64 __v; __comp_b(); __v = *(volatile u64 *)(a); __comp_b(); __v; }) + +#define __arch_putb(v, a) ({ __comp_b(); *(volatile u8 *)(a) = (v); __comp_b(); }) +#define __arch_putw(v, a) ({ __comp_b(); *(volatile u16 *)(a) = (v); __comp_b(); }) +#define __arch_putl(v, a) ({ __comp_b(); *(volatile u32 *)(a) = (v); __comp_b(); }) +#define __arch_putq(v, a) ({ __comp_b(); *(volatile u64 *)(a) = (v); __comp_b(); }) #define __raw_writeb(v, a) __arch_putb(v, a) From patchwork Mon Mar 30 19:44:45 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Eugeniy Paltsev X-Patchwork-Id: 244569 List-Id: U-Boot discussion From: Eugeniy.Paltsev at synopsys.com (Eugeniy Paltsev) Date: Mon, 30 Mar 2020 22:44:45 +0300 Subject: [PATCH v2 3/3] ARC: IO: add MB for __raw_* memory accessors In-Reply-To: <20200330194445.5923-1-Eugeniy.Paltsev@synopsys.com> References: <20200330194445.5923-1-Eugeniy.Paltsev@synopsys.com> Message-ID: <20200330194445.5923-4-Eugeniy.Paltsev@synopsys.com> We add memory barriers for __raw_readX / __raw_writeX accessors same way as it is done for readX and writeX accessors as lots of U-boot driver uses __raw_readX / __raw_writeX instead of proper accessor with barrier. It will save us from lot's of debugging in the future and it is OK as U-Boot is not that performance oriented as real run-time software like OS or user bare-metal app so we may afford being not super fast as we only being executed once. Signed-off-by: Eugeniy Paltsev --- arch/arc/include/asm/io.h | 58 +++++++++++++++++++++++---------------- 1 file changed, 35 insertions(+), 23 deletions(-) diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h index 50c88456002..e770a1fe4b8 100644 --- a/arch/arc/include/asm/io.h +++ b/arch/arc/include/asm/io.h @@ -79,21 +79,29 @@ static inline void sync(void) #define __arch_putq(v, a) ({ __comp_b(); *(volatile u64 *)(a) = (v); __comp_b(); }) -#define __raw_writeb(v, a) __arch_putb(v, a) -#define __raw_writew(v, a) __arch_putw(v, a) -#define __raw_writel(v, a) __arch_putl(v, a) -#define __raw_writeq(v, a) __arch_putq(v, a) +/* + * We add memory barriers for __raw_readX / __raw_writeX accessors same way as + * it is done for readX and writeX accessors as lots of U-boot driver uses + * __raw_readX / __raw_writeX instead of proper accessor with barrier. + */ +#define __raw_writeb(v, c) ({ __iowmb(); __arch_putb(v, c); }) +#define __raw_writew(v, c) ({ __iowmb(); __arch_putw(v, c); }) +#define __raw_writel(v, c) ({ __iowmb(); __arch_putl(v, c); }) +#define __raw_writeq(v, c) ({ __iowmb(); __arch_putq(v, c); }) + +#define __raw_readb(c) ({ u8 __v = __arch_getb(c); __iormb(); __v; }) +#define __raw_readw(c) ({ u16 __v = __arch_getw(c); __iormb(); __v; }) +#define __raw_readl(c) ({ u32 __v = __arch_getl(c); __iormb(); __v; }) +#define __raw_readq(c) ({ u64 __v = __arch_getq(c); __iormb(); __v; }) -#define __raw_readb(a) __arch_getb(a) -#define __raw_readw(a) __arch_getw(a) -#define __raw_readl(a) __arch_getl(a) -#define __raw_readq(a) __arch_getq(a) static inline void __raw_writesb(unsigned long addr, const void *data, int bytelen) { u8 *buf = (uint8_t *)data; + __iowmb(); + while (bytelen--) __arch_putb(*buf++, addr); } @@ -103,6 +111,8 @@ static inline void __raw_writesw(unsigned long addr, const void *data, { u16 *buf = (uint16_t *)data; + __iowmb(); + while (wordlen--) __arch_putw(*buf++, addr); } @@ -112,6 +122,8 @@ static inline void __raw_writesl(unsigned long addr, const void *data, { u32 *buf = (uint32_t *)data; + __iowmb(); + while (longlen--) __arch_putl(*buf++, addr); } @@ -122,6 +134,8 @@ static inline void __raw_readsb(unsigned long addr, void *data, int bytelen) while (bytelen--) *buf++ = __arch_getb(addr); + + __iormb(); } static inline void __raw_readsw(unsigned long addr, void *data, int wordlen) @@ -130,6 +144,8 @@ static inline void __raw_readsw(unsigned long addr, void *data, int wordlen) while (wordlen--) *buf++ = __arch_getw(addr); + + __iormb(); } static inline void __raw_readsl(unsigned long addr, void *data, int longlen) @@ -138,6 +154,8 @@ static inline void __raw_readsl(unsigned long addr, void *data, int longlen) while (longlen--) *buf++ = __arch_getl(addr); + + __iormb(); } /* @@ -145,21 +163,15 @@ static inline void __raw_readsl(unsigned long addr, void *data, int longlen) * ordering rules but do not guarantee any ordering relative to Normal memory * accesses. */ -#define readb_relaxed(c) ({ u8 __r = __raw_readb(c); __r; }) -#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \ - __raw_readw(c)); __r; }) -#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \ - __raw_readl(c)); __r; }) -#define readq_relaxed(c) ({ u64 __r = le64_to_cpu((__force __le64) \ - __raw_readq(c)); __r; }) - -#define writeb_relaxed(v, c) ((void)__raw_writeb((v), (c))) -#define writew_relaxed(v, c) ((void)__raw_writew((__force u16) \ - cpu_to_le16(v), (c))) -#define writel_relaxed(v, c) ((void)__raw_writel((__force u32) \ - cpu_to_le32(v), (c))) -#define writeq_relaxed(v, c) ((void)__raw_writeq((__force u64) \ - cpu_to_le64(v), (c))) +#define readb_relaxed(c) ({ u8 __r = __arch_getb(c); __r; }) +#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16)__arch_getw(c)); __r; }) +#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32)__arch_getl(c)); __r; }) +#define readq_relaxed(c) ({ u64 __r = le64_to_cpu((__force __le64)__arch_getq(c)); __r; }) + +#define writeb_relaxed(v, c) ((void)__arch_putb((v), (c))) +#define writew_relaxed(v, c) ((void)__arch_putw((__force u16)cpu_to_le16(v), (c))) +#define writel_relaxed(v, c) ((void)__arch_putl((__force u32)cpu_to_le32(v), (c))) +#define writeq_relaxed(v, c) ((void)__arch_putq((__force u64)cpu_to_le64(v), (c))) /* * MMIO can also get buffered/optimized in micro-arch, so barriers needed