summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorBart Van Assche <bvanassche@acm.org>2020-03-13 13:30:59 -0700
committerMartin K. Petersen <martin.petersen@oracle.com>2020-03-16 22:08:32 -0400
commit7251c0a41053631ac66795e7f7ddf4d4cba1f467 (patch)
tree94a9d4fdff650862979febb3629cdbfa843c2853 /arch
parent19f747f7370fcf4ced4988ed795ccd4a28f2b530 (diff)
scsi: c6x: Include <linux/unaligned/generic.h> instead of duplicating it
Use the generic __{get,put}_unaligned_[bl]e() definitions instead of duplicating these. Since a later patch will add more definitions into <linux/unaligned/generic.h>, this patch ensures that these definitions have to be added only once. See also commit a7f626c1948a ("C6X: headers"). See also commit 6510d41954dc ("kernel: Move arches to use common unaligned access"). Link: https://lore.kernel.org/r/20200313203102.16613-3-bvanassche@acm.org Cc: Aurelien Jacquiot <jacquiot.aurelien@gmail.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Acked-by: Mark Salter <msalter@redhat.com> Signed-off-by: Bart Van Assche <bvanassche@acm.org> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/c6x/include/asm/unaligned.h65
1 files changed, 1 insertions, 64 deletions
diff --git a/arch/c6x/include/asm/unaligned.h b/arch/c6x/include/asm/unaligned.h
index b56ba7110f5a..d628cc170564 100644
--- a/arch/c6x/include/asm/unaligned.h
+++ b/arch/c6x/include/asm/unaligned.h
@@ -10,6 +10,7 @@
#define _ASM_C6X_UNALIGNED_H
#include <linux/swab.h>
+#include <linux/unaligned/generic.h>
/*
* The C64x+ can do unaligned word and dword accesses in hardware
@@ -100,68 +101,4 @@ static inline void put_unaligned64(u64 val, const void *p)
#endif
-/*
- * Cause a link-time error if we try an unaligned access other than
- * 1,2,4 or 8 bytes long
- */
-extern int __bad_unaligned_access_size(void);
-
-#define __get_unaligned_le(ptr) (typeof(*(ptr)))({ \
- sizeof(*(ptr)) == 1 ? *(ptr) : \
- (sizeof(*(ptr)) == 2 ? get_unaligned_le16((ptr)) : \
- (sizeof(*(ptr)) == 4 ? get_unaligned_le32((ptr)) : \
- (sizeof(*(ptr)) == 8 ? get_unaligned_le64((ptr)) : \
- __bad_unaligned_access_size()))); \
- })
-
-#define __get_unaligned_be(ptr) (__force typeof(*(ptr)))({ \
- sizeof(*(ptr)) == 1 ? *(ptr) : \
- (sizeof(*(ptr)) == 2 ? get_unaligned_be16((ptr)) : \
- (sizeof(*(ptr)) == 4 ? get_unaligned_be32((ptr)) : \
- (sizeof(*(ptr)) == 8 ? get_unaligned_be64((ptr)) : \
- __bad_unaligned_access_size()))); \
- })
-
-#define __put_unaligned_le(val, ptr) ({ \
- void *__gu_p = (ptr); \
- switch (sizeof(*(ptr))) { \
- case 1: \
- *(u8 *)__gu_p = (__force u8)(val); \
- break; \
- case 2: \
- put_unaligned_le16((__force u16)(val), __gu_p); \
- break; \
- case 4: \
- put_unaligned_le32((__force u32)(val), __gu_p); \
- break; \
- case 8: \
- put_unaligned_le64((__force u64)(val), __gu_p); \
- break; \
- default: \
- __bad_unaligned_access_size(); \
- break; \
- } \
- (void)0; })
-
-#define __put_unaligned_be(val, ptr) ({ \
- void *__gu_p = (ptr); \
- switch (sizeof(*(ptr))) { \
- case 1: \
- *(u8 *)__gu_p = (__force u8)(val); \
- break; \
- case 2: \
- put_unaligned_be16((__force u16)(val), __gu_p); \
- break; \
- case 4: \
- put_unaligned_be32((__force u32)(val), __gu_p); \
- break; \
- case 8: \
- put_unaligned_be64((__force u64)(val), __gu_p); \
- break; \
- default: \
- __bad_unaligned_access_size(); \
- break; \
- } \
- (void)0; })
-
#endif /* _ASM_C6X_UNALIGNED_H */