@@ -24,6 +24,8 @@ SOFTWARE.
24
24
25
25
*/
26
26
27
+ #pragma once
28
+
27
29
#include < stdint.h>
28
30
29
31
#include < concepts>
@@ -36,6 +38,9 @@ namespace Utilities {
36
38
namespace BitFieldInternal {
37
39
38
40
template <typename T>
41
+ concept IntegralLike = std::is_integral_v<T> || std::is_enum_v<T>;
42
+
43
+ template <IntegralLike T>
39
44
struct DefaultBitSize {
40
45
static constexpr unsigned size = sizeof (T) * 8 ;
41
46
};
@@ -80,63 +85,143 @@ struct ComputeOffset {
80
85
}
81
86
};
82
87
88
+ template <std::integral T, std::integral U = T>
89
+ using SignedType = typename std::conditional_t <std::is_signed_v<T>, std::make_signed_t <U>, std::make_unsigned_t <U>>;
90
+
91
+ template <unsigned span>
92
+ using StorageType = typename std::conditional_t <
93
+ span <= 8 , uint8_t ,
94
+ typename std::conditional_t <span <= 16 , uint16_t , typename std::conditional_t <span <= 32 , uint32_t , void >>>;
95
+
96
+ template <unsigned span, std::integral T>
97
+ using SignedStorageType = SignedType<T, StorageType<span>>;
98
+
99
+ template <unsigned Offset, unsigned Width, unsigned storageSize, std::integral T>
100
+ struct BitFieldHelper {
101
+ static constexpr unsigned offset = Offset;
102
+ static constexpr unsigned width = Width;
103
+ static constexpr unsigned firstByteOffset = offset / 8 ;
104
+ static constexpr unsigned lastByteOffset = (offset + width - 1 ) / 8 ;
105
+ static constexpr unsigned bytesCount = lastByteOffset - firstByteOffset + 1 ;
106
+ static constexpr unsigned shift = offset % 8 ;
107
+ static constexpr uint32_t mask = (1 << width) - 1 ;
108
+ static constexpr bool isAlignedAndSafe =
109
+ ((firstByteOffset % sizeof (T)) == 0 ) && (firstByteOffset + sizeof (T)) <= storageSize;
110
+ static constexpr bool fullBytes = ((width % 8 ) == 0 ) && ((offset % 8 ) == 0 );
111
+ BitFieldHelper () {
112
+ static_assert (bytesCount <= 4 , " Type too large" );
113
+ static_assert (width > 0 , " Width must be greater than 0" );
114
+ static_assert (width <= 32 , " Width must be less than or equal to 32" );
115
+ static_assert (offset + width <= storageSize * 8 , " Offset + Width must be less than or equal to storage size" );
116
+ }
117
+ };
118
+
119
+ enum Dummy : int ;
120
+
83
121
} // namespace BitFieldInternal
84
122
85
- template <std::integral T, unsigned width = BitFieldInternal::DefaultBitSize<T>::size>
123
+ /* *
124
+ * @brief A bit field element to be used in a BitField.
125
+ *
126
+ * @tparam T The type of the field. This can be any integral type or enum type.
127
+ * @tparam width The width of the field in bits.
128
+ */
129
+ template <BitFieldInternal::IntegralLike T, unsigned width = BitFieldInternal::DefaultBitSize<T>::size>
86
130
struct BitSpan {
87
131
static constexpr unsigned Width = width;
88
- using Underlying = T;
132
+ using Type = T;
133
+ using Underlying =
134
+ std::conditional_t <std::is_enum_v<T>,
135
+ std::underlying_type_t <std::conditional_t <std::is_enum_v<T>, T, BitFieldInternal::Dummy>>,
136
+ T>;
89
137
};
90
138
139
+ /* *
140
+ * @brief A bit field that can hold multiple bit field elements of different types.
141
+ *
142
+ * @details This class is used to hold multiple bit field elements of different types. The
143
+ * elements are stored in a single byte array, and the offsets of each element are computed
144
+ * at compile time. The elements can be accessed using the get() and set() methods.
145
+ * The get() method returns the value of the element, and the set() method sets the value
146
+ * of the element. The bit field elements are stored in the order they are defined in the template
147
+ * parameter pack. The order of the elements is important, as the offsets are computed based on the order
148
+ * of the elements. The maximum size of a single element is technically 32 bits, but this
149
+ * actually varies depending on the alignment of the element. One element can only span a maximum
150
+ * of 4 bytes. There is no limit on the number of elements that can be stored in the bit field.
151
+ *
152
+ * @tparam... T The types of the bit field elements. These need to be BitSpan types.
153
+ */
91
154
template <typename ... T>
92
155
struct BitField {
93
- template <typename One>
94
- constexpr typename One::Underlying get () {
95
- if constexpr (std::is_signed_v<typename One::Underlying>) {
96
- return get<BitFieldInternal::ComputeOffset<One, T...>::offset (), One::Width, signed >();
97
- } else if constexpr (std::is_unsigned_v<typename One::Underlying>) {
98
- return get<BitFieldInternal::ComputeOffset<One, T...>::offset (), One::Width, unsigned >();
99
- }
100
- return 0 ;
156
+ template <typename Field>
157
+ constexpr Field::Type get () const {
158
+ constexpr unsigned offset = BitFieldInternal::ComputeOffset<Field, T...>::offset ();
159
+ auto ret = get<offset, Field::Width,
160
+ BitFieldInternal::SignedStorageType<(offset % 8 ) + Field::Width, typename Field::Underlying>>();
161
+ return static_cast <Field::Type>(ret);
162
+ }
163
+ template <typename Field>
164
+ constexpr void set (Field::Type v_) {
165
+ constexpr unsigned offset = BitFieldInternal::ComputeOffset<Field, T...>::offset ();
166
+ auto v = static_cast <Field::Underlying>(v_);
167
+ set<offset, Field::Width,
168
+ BitFieldInternal::SignedStorageType<(offset % 8 ) + Field::Width, typename Field::Underlying>>(v);
101
169
}
102
- template <typename One>
103
- constexpr void set (typename One::Underlying v) {
104
- if constexpr (std::is_signed_v<typename One::Underlying>) {
105
- set<BitFieldInternal::ComputeOffset<One, T...>::offset (), One::Width, signed >(v);
106
- } else if constexpr (std::is_unsigned_v<typename One::Underlying>) {
107
- set<BitFieldInternal::ComputeOffset<One, T...>::offset (), One::Width, unsigned >(v);
170
+ void clear () {
171
+ for (unsigned i = 0 ; i < sizeof (storage); i++) {
172
+ storage[i] = 0 ;
108
173
}
109
174
}
110
175
111
176
private:
112
177
template <unsigned offset, unsigned width, std::integral U>
113
- constexpr U get () {
114
- constexpr unsigned firstByteOffset = offset / 8 ;
115
- constexpr unsigned lastByteOffset = (offset + width - 1 ) / 8 ;
116
- constexpr unsigned shift = offset % 8 ;
117
- constexpr uint32_t mask = (1 << width) - 1 ;
118
- if constexpr ((firstByteOffset % 4 ) == 0 ) {
119
- return reinterpret_cast <const U*>(storage)[firstByteOffset / 4 ] >> shift & mask;
120
- } else if constexpr ((firstByteOffset % 4 ) != 0 ) {
121
- return (loadUnaligned<U>(storage + firstByteOffset, lastByteOffset - firstByteOffset + 1 ) >> shift) & mask;
178
+ constexpr U get () const {
179
+ using helper = BitFieldInternal::BitFieldHelper<offset, width, sizeof (storage), U>;
180
+ if constexpr (helper::isAlignedAndSafe) {
181
+ return reinterpret_cast <const U*>(storage)[helper::firstByteOffset / sizeof (U)] >> helper::shift &
182
+ helper::mask;
183
+ } else {
184
+ return (loadUnaligned<U, helper::bytesCount>(storage + helper::firstByteOffset) >> helper::shift) &
185
+ helper::mask;
122
186
}
123
187
return 0 ;
124
188
}
125
189
template <unsigned offset, unsigned width, std::integral U>
126
190
constexpr void set (U v) {
127
- constexpr unsigned firstByteOffset = offset / 8 ;
128
- constexpr unsigned lastByteOffset = (offset + width - 1 ) / 8 ;
129
- constexpr unsigned shift = offset % 8 ;
130
- constexpr uint32_t mask = (1 << width) - 1 ;
131
- if constexpr ((firstByteOffset % 4 ) == 0 ) {
191
+ using helper = BitFieldInternal::BitFieldHelper<offset, width, sizeof (storage), U>;
192
+ if constexpr (helper::fullBytes) {
193
+ if constexpr (helper::bytesCount == 1 ) {
194
+ storage[helper::firstByteOffset] = static_cast <uint8_t >(v);
195
+ } else if constexpr (helper::bytesCount == 2 ) {
196
+ if constexpr (helper::isAlignedAndSafe) {
197
+ *reinterpret_cast <U*>(storage + helper::firstByteOffset) = v;
198
+ } else {
199
+ storeUnaligned<U>(storage + helper::firstByteOffset, v);
200
+ }
201
+ } else if constexpr (helper::bytesCount == 3 ) {
202
+ if constexpr ((helper::firstByteOffset % 2 ) == 0 ) {
203
+ *reinterpret_cast <uint16_t *>(storage + helper::firstByteOffset) = static_cast <uint16_t >(v);
204
+ storage[helper::firstByteOffset + 2 ] = static_cast <uint8_t >(v >> 16 );
205
+ } else {
206
+ storage[helper::firstByteOffset] = static_cast <uint8_t >(v);
207
+ *reinterpret_cast <uint16_t *>(storage + helper::firstByteOffset + 1 ) = static_cast <uint16_t >(v >> 8 );
208
+ }
209
+ } else if constexpr (helper::bytesCount == 4 ) {
210
+ if constexpr (helper::isAlignedAndSafe) {
211
+ *reinterpret_cast <U*>(storage + helper::firstByteOffset) = v;
212
+ } else {
213
+ storeUnaligned<U>(storage + helper::firstByteOffset, v);
214
+ }
215
+ }
216
+ } else if constexpr (helper::isAlignedAndSafe) {
132
217
U* ptr = reinterpret_cast <U*>(storage);
133
- ptr[firstByteOffset / 4 ] &= ~(mask << shift);
134
- ptr[firstByteOffset / 4 ] |= (v & mask) << shift;
135
- } else if constexpr ((firstByteOffset % 4 ) != 0 ) {
136
- U span = loadUnaligned<U>(storage + firstByteOffset, lastByteOffset - firstByteOffset + 1 );
137
- span &= ~(mask << shift);
138
- span |= (v & mask) << shift;
139
- storeUnaligned<U>(storage + firstByteOffset, span, lastByteOffset - firstByteOffset + 1 );
218
+ ptr[helper:: firstByteOffset / sizeof (U) ] &= ~(helper:: mask << helper:: shift);
219
+ ptr[helper:: firstByteOffset / sizeof (U) ] |= (v & helper:: mask) << helper:: shift;
220
+ } else {
221
+ U span = loadUnaligned<U, helper::bytesCount >(storage + helper:: firstByteOffset);
222
+ span &= ~(helper:: mask << helper:: shift);
223
+ span |= (v & helper:: mask) << helper:: shift;
224
+ storeUnaligned<U, helper::bytesCount >(storage + helper:: firstByteOffset, span);
140
225
}
141
226
}
142
227
uint8_t storage[BitFieldInternal::ComputeStorage<T...>::size()];
0 commit comments