| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *  linux/fs/ufs/swab.h | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 1997, 1998 Francois-Rene Rideau <fare@tunes.org> | 
|  | 5 | * Copyright (C) 1998 Jakub Jelinek <jj@ultra.linux.cz> | 
|  | 6 | * Copyright (C) 2001 Christoph Hellwig <hch@infradead.org> | 
|  | 7 | */ | 
|  | 8 |  | 
|  | 9 | #ifndef _UFS_SWAB_H | 
|  | 10 | #define _UFS_SWAB_H | 
|  | 11 |  | 
|  | 12 | /* | 
|  | 13 | * Notes: | 
|  | 14 | *    HERE WE ASSUME EITHER BIG OR LITTLE ENDIAN UFSes | 
|  | 15 | *    in case there are ufs implementations that have strange bytesexes, | 
|  | 16 | *    you'll need to modify code here as well as in ufs_super.c and ufs_fs.h | 
|  | 17 | *    to support them. | 
|  | 18 | */ | 
|  | 19 |  | 
|  | 20 | enum { | 
|  | 21 | BYTESEX_LE, | 
|  | 22 | BYTESEX_BE | 
|  | 23 | }; | 
|  | 24 |  | 
|  | 25 | static inline u64 | 
|  | 26 | fs64_to_cpu(struct super_block *sbp, __fs64 n) | 
|  | 27 | { | 
|  | 28 | if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) | 
|  | 29 | return le64_to_cpu((__force __le64)n); | 
|  | 30 | else | 
|  | 31 | return be64_to_cpu((__force __be64)n); | 
|  | 32 | } | 
|  | 33 |  | 
|  | 34 | static inline __fs64 | 
|  | 35 | cpu_to_fs64(struct super_block *sbp, u64 n) | 
|  | 36 | { | 
|  | 37 | if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) | 
|  | 38 | return (__force __fs64)cpu_to_le64(n); | 
|  | 39 | else | 
|  | 40 | return (__force __fs64)cpu_to_be64(n); | 
|  | 41 | } | 
|  | 42 |  | 
|  | 43 | static __inline u32 | 
|  | 44 | fs64_add(struct super_block *sbp, u32 *n, int d) | 
|  | 45 | { | 
|  | 46 | if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) | 
|  | 47 | return *n = cpu_to_le64(le64_to_cpu(*n)+d); | 
|  | 48 | else | 
|  | 49 | return *n = cpu_to_be64(be64_to_cpu(*n)+d); | 
|  | 50 | } | 
|  | 51 |  | 
|  | 52 | static __inline u32 | 
|  | 53 | fs64_sub(struct super_block *sbp, u32 *n, int d) | 
|  | 54 | { | 
|  | 55 | if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) | 
|  | 56 | return *n = cpu_to_le64(le64_to_cpu(*n)-d); | 
|  | 57 | else | 
|  | 58 | return *n = cpu_to_be64(be64_to_cpu(*n)-d); | 
|  | 59 | } | 
|  | 60 |  | 
|  | 61 | static __inline u32 | 
|  | 62 | fs32_to_cpu(struct super_block *sbp, __fs32 n) | 
|  | 63 | { | 
|  | 64 | if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) | 
|  | 65 | return le32_to_cpu((__force __le32)n); | 
|  | 66 | else | 
|  | 67 | return be32_to_cpu((__force __be32)n); | 
|  | 68 | } | 
|  | 69 |  | 
|  | 70 | static inline __fs32 | 
|  | 71 | cpu_to_fs32(struct super_block *sbp, u32 n) | 
|  | 72 | { | 
|  | 73 | if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) | 
|  | 74 | return (__force __fs32)cpu_to_le32(n); | 
|  | 75 | else | 
|  | 76 | return (__force __fs32)cpu_to_be32(n); | 
|  | 77 | } | 
|  | 78 |  | 
|  | 79 | static inline void | 
|  | 80 | fs32_add(struct super_block *sbp, __fs32 *n, int d) | 
|  | 81 | { | 
|  | 82 | if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) | 
|  | 83 | *(__le32 *)n = cpu_to_le32(le32_to_cpu(*(__le32 *)n)+d); | 
|  | 84 | else | 
|  | 85 | *(__be32 *)n = cpu_to_be32(be32_to_cpu(*(__be32 *)n)+d); | 
|  | 86 | } | 
|  | 87 |  | 
|  | 88 | static inline void | 
|  | 89 | fs32_sub(struct super_block *sbp, __fs32 *n, int d) | 
|  | 90 | { | 
|  | 91 | if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) | 
|  | 92 | *(__le32 *)n = cpu_to_le32(le32_to_cpu(*(__le32 *)n)-d); | 
|  | 93 | else | 
|  | 94 | *(__be32 *)n = cpu_to_be32(be32_to_cpu(*(__be32 *)n)-d); | 
|  | 95 | } | 
|  | 96 |  | 
|  | 97 | static inline u16 | 
|  | 98 | fs16_to_cpu(struct super_block *sbp, __fs16 n) | 
|  | 99 | { | 
|  | 100 | if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) | 
|  | 101 | return le16_to_cpu((__force __le16)n); | 
|  | 102 | else | 
|  | 103 | return be16_to_cpu((__force __be16)n); | 
|  | 104 | } | 
|  | 105 |  | 
|  | 106 | static inline __fs16 | 
|  | 107 | cpu_to_fs16(struct super_block *sbp, u16 n) | 
|  | 108 | { | 
|  | 109 | if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) | 
|  | 110 | return (__force __fs16)cpu_to_le16(n); | 
|  | 111 | else | 
|  | 112 | return (__force __fs16)cpu_to_be16(n); | 
|  | 113 | } | 
|  | 114 |  | 
|  | 115 | static inline void | 
|  | 116 | fs16_add(struct super_block *sbp, __fs16 *n, int d) | 
|  | 117 | { | 
|  | 118 | if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) | 
|  | 119 | *(__le16 *)n = cpu_to_le16(le16_to_cpu(*(__le16 *)n)+d); | 
|  | 120 | else | 
|  | 121 | *(__be16 *)n = cpu_to_be16(be16_to_cpu(*(__be16 *)n)+d); | 
|  | 122 | } | 
|  | 123 |  | 
|  | 124 | static inline void | 
|  | 125 | fs16_sub(struct super_block *sbp, __fs16 *n, int d) | 
|  | 126 | { | 
|  | 127 | if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) | 
|  | 128 | *(__le16 *)n = cpu_to_le16(le16_to_cpu(*(__le16 *)n)-d); | 
|  | 129 | else | 
|  | 130 | *(__be16 *)n = cpu_to_be16(be16_to_cpu(*(__be16 *)n)-d); | 
|  | 131 | } | 
|  | 132 |  | 
|  | 133 | #endif /* _UFS_SWAB_H */ |