00001 #ifndef CRYPTOPP_MISC_H
00002 #define CRYPTOPP_MISC_H
00003
00004 #include "cryptlib.h"
00005 #include "smartptr.h"
00006 #include <string.h>
00007
00008 #ifdef _MSC_VER
00009 #if _MSC_VER >= 1400
00010
00011 #define _interlockedbittestandset CRYPTOPP_DISABLED_INTRINSIC_1
00012 #define _interlockedbittestandreset CRYPTOPP_DISABLED_INTRINSIC_2
00013 #define _interlockedbittestandset64 CRYPTOPP_DISABLED_INTRINSIC_3
00014 #define _interlockedbittestandreset64 CRYPTOPP_DISABLED_INTRINSIC_4
00015 #include <intrin.h>
00016 #undef _interlockedbittestandset
00017 #undef _interlockedbittestandreset
00018 #undef _interlockedbittestandset64
00019 #undef _interlockedbittestandreset64
00020 #define CRYPTOPP_FAST_ROTATE(x) 1
00021 #elif _MSC_VER >= 1300
00022 #define CRYPTOPP_FAST_ROTATE(x) ((x) == 32 | (x) == 64)
00023 #else
00024 #define CRYPTOPP_FAST_ROTATE(x) ((x) == 32)
00025 #endif
00026 #elif (defined(__MWERKS__) && TARGET_CPU_PPC) || \
00027 (defined(__GNUC__) && (defined(_ARCH_PWR2) || defined(_ARCH_PWR) || defined(_ARCH_PPC) || defined(_ARCH_PPC64) || defined(_ARCH_COM)))
00028 #define CRYPTOPP_FAST_ROTATE(x) ((x) == 32)
00029 #elif defined(__GNUC__) && (CRYPTOPP_BOOL_X64 || CRYPTOPP_BOOL_X86) // depend on GCC's peephole optimization to generate rotate instructions
00030 #define CRYPTOPP_FAST_ROTATE(x) 1
00031 #else
00032 #define CRYPTOPP_FAST_ROTATE(x) 0
00033 #endif
00034
00035 #ifdef __BORLANDC__
00036 #include <mem.h>
00037 #endif
00038
00039 #if defined(__GNUC__) && defined(__linux__)
00040 #define CRYPTOPP_BYTESWAP_AVAILABLE
00041 #include <byteswap.h>
00042 #endif
00043
00044 NAMESPACE_BEGIN(CryptoPP)
00045
00046
00047
00048 template <bool b>
00049 struct CompileAssert
00050 {
00051 static char dummy[2*b-1];
00052 };
00053
00054 #define CRYPTOPP_COMPILE_ASSERT(assertion) CRYPTOPP_COMPILE_ASSERT_INSTANCE(assertion, __LINE__)
00055 #if defined(CRYPTOPP_EXPORTS) || defined(CRYPTOPP_IMPORTS)
00056 #define CRYPTOPP_COMPILE_ASSERT_INSTANCE(assertion, instance)
00057 #else
00058 #define CRYPTOPP_COMPILE_ASSERT_INSTANCE(assertion, instance) static CompileAssert<(assertion)> CRYPTOPP_ASSERT_JOIN(cryptopp_assert_, instance)
00059 #endif
00060 #define CRYPTOPP_ASSERT_JOIN(X, Y) CRYPTOPP_DO_ASSERT_JOIN(X, Y)
00061 #define CRYPTOPP_DO_ASSERT_JOIN(X, Y) X##Y
00062
00063
00064
00065 class CRYPTOPP_DLL Empty
00066 {
00067 };
00068
00069
00070 template <class BASE1, class BASE2>
00071 class CRYPTOPP_NO_VTABLE TwoBases : public BASE1, public BASE2
00072 {
00073 };
00074
00075
00076 template <class BASE1, class BASE2, class BASE3>
00077 class CRYPTOPP_NO_VTABLE ThreeBases : public BASE1, public BASE2, public BASE3
00078 {
00079 };
00080
00081 template <class T>
00082 class ObjectHolder
00083 {
00084 protected:
00085 T m_object;
00086 };
00087
00088 class NotCopyable
00089 {
00090 public:
00091 NotCopyable() {}
00092 private:
00093 NotCopyable(const NotCopyable &);
00094 void operator=(const NotCopyable &);
00095 };
00096
00097 template <class T>
00098 struct NewObject
00099 {
00100 T* operator()() const {return new T;}
00101 };
00102
00103
00104
00105
00106
00107 template <class T, class F = NewObject<T>, int instance=0>
00108 class Singleton
00109 {
00110 public:
00111 Singleton(F objectFactory = F()) : m_objectFactory(objectFactory) {}
00112
00113
00114 CRYPTOPP_NOINLINE const T & Ref(CRYPTOPP_NOINLINE_DOTDOTDOT) const;
00115
00116 private:
00117 F m_objectFactory;
00118 };
00119
00120 template <class T, class F, int instance>
00121 const T & Singleton<T, F, instance>::Ref(CRYPTOPP_NOINLINE_DOTDOTDOT) const
00122 {
00123 static volatile simple_ptr<T> s_pObject;
00124 T *p = s_pObject.m_p;
00125
00126 if (p)
00127 return *p;
00128
00129 T *newObject = m_objectFactory();
00130 p = s_pObject.m_p;
00131
00132 if (p)
00133 {
00134 delete newObject;
00135 return *p;
00136 }
00137
00138 s_pObject.m_p = newObject;
00139 return *newObject;
00140 }
00141
00142
00143
00144 #if (!__STDC_WANT_SECURE_LIB__ && !defined(_MEMORY_S_DEFINED))
00145 inline void memcpy_s(void *dest, size_t sizeInBytes, const void *src, size_t count)
00146 {
00147 if (count > sizeInBytes)
00148 throw InvalidArgument("memcpy_s: buffer overflow");
00149 memcpy(dest, src, count);
00150 }
00151
00152 inline void memmove_s(void *dest, size_t sizeInBytes, const void *src, size_t count)
00153 {
00154 if (count > sizeInBytes)
00155 throw InvalidArgument("memmove_s: buffer overflow");
00156 memmove(dest, src, count);
00157 }
00158
00159 #if __BORLANDC__ >= 0x620
00160
00161 #define memcpy_s CryptoPP::memcpy_s
00162 #define memmove_s CryptoPP::memmove_s
00163 #endif
00164 #endif
00165
00166 inline void * memset_z(void *ptr, int value, size_t num)
00167 {
00168
00169 #if CRYPTOPP_GCC_VERSION >= 30001
00170 if (__builtin_constant_p(num) && num==0)
00171 return ptr;
00172 #endif
00173 return memset(ptr, value, num);
00174 }
00175
00176
00177 template <class T> inline const T& STDMIN(const T& a, const T& b)
00178 {
00179 return b < a ? b : a;
00180 }
00181
00182 template <class T1, class T2> inline const T1 UnsignedMin(const T1& a, const T2& b)
00183 {
00184 CRYPTOPP_COMPILE_ASSERT((sizeof(T1)<=sizeof(T2) && T2(-1)>0) || (sizeof(T1)>sizeof(T2) && T1(-1)>0));
00185 assert(a==0 || a>0);
00186 assert(b>=0);
00187
00188 if (sizeof(T1)<=sizeof(T2))
00189 return b < (T2)a ? (T1)b : a;
00190 else
00191 return (T1)b < a ? (T1)b : a;
00192 }
00193
00194 template <class T> inline const T& STDMAX(const T& a, const T& b)
00195 {
00196 return a < b ? b : a;
00197 }
00198
00199 #define RETURN_IF_NONZERO(x) size_t returnedValue = x; if (returnedValue) return returnedValue
00200
00201
00202 #define GETBYTE(x, y) (unsigned int)byte((x)>>(8*(y)))
00203
00204
00205
00206
00207 #define CRYPTOPP_GET_BYTE_AS_BYTE(x, y) byte((x)>>(8*(y)))
00208
00209 template <class T>
00210 unsigned int Parity(T value)
00211 {
00212 for (unsigned int i=8*sizeof(value)/2; i>0; i/=2)
00213 value ^= value >> i;
00214 return (unsigned int)value&1;
00215 }
00216
00217 template <class T>
00218 unsigned int BytePrecision(const T &value)
00219 {
00220 if (!value)
00221 return 0;
00222
00223 unsigned int l=0, h=8*sizeof(value);
00224
00225 while (h-l > 8)
00226 {
00227 unsigned int t = (l+h)/2;
00228 if (value >> t)
00229 l = t;
00230 else
00231 h = t;
00232 }
00233
00234 return h/8;
00235 }
00236
00237 template <class T>
00238 unsigned int BitPrecision(const T &value)
00239 {
00240 if (!value)
00241 return 0;
00242
00243 unsigned int l=0, h=8*sizeof(value);
00244
00245 while (h-l > 1)
00246 {
00247 unsigned int t = (l+h)/2;
00248 if (value >> t)
00249 l = t;
00250 else
00251 h = t;
00252 }
00253
00254 return h;
00255 }
00256
00257 inline unsigned int TrailingZeros(word32 v)
00258 {
00259 #if defined(__GNUC__) && CRYPTOPP_GCC_VERSION >= 30400
00260 return __builtin_ctz(v);
00261 #elif defined(_MSC_VER) && _MSC_VER >= 1400
00262 unsigned long result;
00263 _BitScanForward(&result, v);
00264 return result;
00265 #else
00266
00267 static const int MultiplyDeBruijnBitPosition[32] =
00268 {
00269 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
00270 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
00271 };
00272 return MultiplyDeBruijnBitPosition[((word32)((v & -v) * 0x077CB531U)) >> 27];
00273 #endif
00274 }
00275
00276 inline unsigned int TrailingZeros(word64 v)
00277 {
00278 #if defined(__GNUC__) && CRYPTOPP_GCC_VERSION >= 30400
00279 return __builtin_ctzll(v);
00280 #elif defined(_MSC_VER) && _MSC_VER >= 1400 && (defined(_M_X64) || defined(_M_IA64))
00281 unsigned long result;
00282 _BitScanForward64(&result, v);
00283 return result;
00284 #else
00285 return word32(v) ? TrailingZeros(word32(v)) : 32 + TrailingZeros(word32(v>>32));
00286 #endif
00287 }
00288
00289 template <class T>
00290 inline T Crop(T value, size_t size)
00291 {
00292 if (size < 8*sizeof(value))
00293 return T(value & ((T(1) << size) - 1));
00294 else
00295 return value;
00296 }
00297
00298 template <class T1, class T2>
00299 inline bool SafeConvert(T1 from, T2 &to)
00300 {
00301 to = (T2)from;
00302 if (from != to || (from > 0) != (to > 0))
00303 return false;
00304 return true;
00305 }
00306
00307 inline size_t BitsToBytes(size_t bitCount)
00308 {
00309 return ((bitCount+7)/(8));
00310 }
00311
00312 inline size_t BytesToWords(size_t byteCount)
00313 {
00314 return ((byteCount+WORD_SIZE-1)/WORD_SIZE);
00315 }
00316
00317 inline size_t BitsToWords(size_t bitCount)
00318 {
00319 return ((bitCount+WORD_BITS-1)/(WORD_BITS));
00320 }
00321
00322 inline size_t BitsToDwords(size_t bitCount)
00323 {
00324 return ((bitCount+2*WORD_BITS-1)/(2*WORD_BITS));
00325 }
00326
00327 CRYPTOPP_DLL void CRYPTOPP_API xorbuf(byte *buf, const byte *mask, size_t count);
00328 CRYPTOPP_DLL void CRYPTOPP_API xorbuf(byte *output, const byte *input, const byte *mask, size_t count);
00329
00330 CRYPTOPP_DLL bool CRYPTOPP_API VerifyBufsEqual(const byte *buf1, const byte *buf2, size_t count);
00331
00332 template <class T>
00333 inline bool IsPowerOf2(const T &n)
00334 {
00335 return n > 0 && (n & (n-1)) == 0;
00336 }
00337
00338 template <class T1, class T2>
00339 inline T2 ModPowerOf2(const T1 &a, const T2 &b)
00340 {
00341 assert(IsPowerOf2(b));
00342 return T2(a) & (b-1);
00343 }
00344
00345 template <class T1, class T2>
00346 inline T1 RoundDownToMultipleOf(const T1 &n, const T2 &m)
00347 {
00348 if (IsPowerOf2(m))
00349 return n - ModPowerOf2(n, m);
00350 else
00351 return n - n%m;
00352 }
00353
00354 template <class T1, class T2>
00355 inline T1 RoundUpToMultipleOf(const T1 &n, const T2 &m)
00356 {
00357 if (n+m-1 < n)
00358 throw InvalidArgument("RoundUpToMultipleOf: integer overflow");
00359 return RoundDownToMultipleOf(n+m-1, m);
00360 }
00361
00362 template <class T>
00363 inline unsigned int GetAlignmentOf(T *dummy=NULL)
00364 {
00365 #ifdef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS
00366 if (sizeof(T) < 16)
00367 return 1;
00368 #endif
00369
00370 #if (_MSC_VER >= 1300)
00371 return __alignof(T);
00372 #elif defined(__GNUC__)
00373 return __alignof__(T);
00374 #elif CRYPTOPP_BOOL_SLOW_WORD64
00375 return UnsignedMin(4U, sizeof(T));
00376 #else
00377 return sizeof(T);
00378 #endif
00379 }
00380
00381 inline bool IsAlignedOn(const void *p, unsigned int alignment)
00382 {
00383 return alignment==1 || (IsPowerOf2(alignment) ? ModPowerOf2((size_t)p, alignment) == 0 : (size_t)p % alignment == 0);
00384 }
00385
00386 template <class T>
00387 inline bool IsAligned(const void *p, T *dummy=NULL)
00388 {
00389 return IsAlignedOn(p, GetAlignmentOf<T>());
00390 }
00391
00392 #ifdef IS_LITTLE_ENDIAN
00393 typedef LittleEndian NativeByteOrder;
00394 #else
00395 typedef BigEndian NativeByteOrder;
00396 #endif
00397
00398 inline ByteOrder GetNativeByteOrder()
00399 {
00400 return NativeByteOrder::ToEnum();
00401 }
00402
00403 inline bool NativeByteOrderIs(ByteOrder order)
00404 {
00405 return order == GetNativeByteOrder();
00406 }
00407
00408 template <class T>
00409 std::string IntToString(T a, unsigned int base = 10)
00410 {
00411 if (a == 0)
00412 return "0";
00413 bool negate = false;
00414 if (a < 0)
00415 {
00416 negate = true;
00417 a = 0-a;
00418 }
00419 std::string result;
00420 while (a > 0)
00421 {
00422 T digit = a % base;
00423 result = char((digit < 10 ? '0' : ('a' - 10)) + digit) + result;
00424 a /= base;
00425 }
00426 if (negate)
00427 result = "-" + result;
00428 return result;
00429 }
00430
00431 template <class T1, class T2>
00432 inline T1 SaturatingSubtract(const T1 &a, const T2 &b)
00433 {
00434 return T1((a > b) ? (a - b) : 0);
00435 }
00436
00437 template <class T>
00438 inline CipherDir GetCipherDir(const T &obj)
00439 {
00440 return obj.IsForwardTransformation() ? ENCRYPTION : DECRYPTION;
00441 }
00442
00443 CRYPTOPP_DLL void CRYPTOPP_API CallNewHandler();
00444
00445 inline void IncrementCounterByOne(byte *inout, unsigned int s)
00446 {
00447 for (int i=s-1, carry=1; i>=0 && carry; i--)
00448 carry = !++inout[i];
00449 }
00450
00451 inline void IncrementCounterByOne(byte *output, const byte *input, unsigned int s)
00452 {
00453 int i, carry;
00454 for (i=s-1, carry=1; i>=0 && carry; i--)
00455 carry = ((output[i] = input[i]+1) == 0);
00456 memcpy_s(output, s, input, i+1);
00457 }
00458
00459 template <class T>
00460 inline void ConditionalSwap(bool c, T &a, T &b)
00461 {
00462 T t = c * (a ^ b);
00463 a ^= t;
00464 b ^= t;
00465 }
00466
00467 template <class T>
00468 inline void ConditionalSwapPointers(bool c, T &a, T &b)
00469 {
00470 ptrdiff_t t = c * (a - b);
00471 a -= t;
00472 b += t;
00473 }
00474
00475
00476
00477 template <class T>
00478 void SecureWipeBuffer(T *buf, size_t n)
00479 {
00480
00481 volatile T *p = buf+n;
00482 while (n--)
00483 *(--p) = 0;
00484 }
00485
00486 #if (_MSC_VER >= 1400 || defined(__GNUC__)) && (CRYPTOPP_BOOL_X64 || CRYPTOPP_BOOL_X86)
00487
00488 template<> inline void SecureWipeBuffer(byte *buf, size_t n)
00489 {
00490 volatile byte *p = buf;
00491 #ifdef __GNUC__
00492 asm volatile("rep stosb" : "+c"(n), "+D"(p) : "a"(0) : "memory");
00493 #else
00494 __stosb((byte *)(size_t)p, 0, n);
00495 #endif
00496 }
00497
00498 template<> inline void SecureWipeBuffer(word16 *buf, size_t n)
00499 {
00500 volatile word16 *p = buf;
00501 #ifdef __GNUC__
00502 asm volatile("rep stosw" : "+c"(n), "+D"(p) : "a"(0) : "memory");
00503 #else
00504 __stosw((word16 *)(size_t)p, 0, n);
00505 #endif
00506 }
00507
00508 template<> inline void SecureWipeBuffer(word32 *buf, size_t n)
00509 {
00510 volatile word32 *p = buf;
00511 #ifdef __GNUC__
00512 asm volatile("rep stosl" : "+c"(n), "+D"(p) : "a"(0) : "memory");
00513 #else
00514 __stosd((unsigned long *)(size_t)p, 0, n);
00515 #endif
00516 }
00517
00518 template<> inline void SecureWipeBuffer(word64 *buf, size_t n)
00519 {
00520 #if CRYPTOPP_BOOL_X64
00521 volatile word64 *p = buf;
00522 #ifdef __GNUC__
00523 asm volatile("rep stosq" : "+c"(n), "+D"(p) : "a"(0) : "memory");
00524 #else
00525 __stosq((word64 *)(size_t)p, 0, n);
00526 #endif
00527 #else
00528 SecureWipeBuffer((word32 *)buf, 2*n);
00529 #endif
00530 }
00531
00532 #endif // #if (_MSC_VER >= 1400 || defined(__GNUC__)) && (CRYPTOPP_BOOL_X64 || CRYPTOPP_BOOL_X86)
00533
00534 template <class T>
00535 inline void SecureWipeArray(T *buf, size_t n)
00536 {
00537 if (sizeof(T) % 8 == 0 && GetAlignmentOf<T>() % GetAlignmentOf<word64>() == 0)
00538 SecureWipeBuffer((word64 *)buf, n * (sizeof(T)/8));
00539 else if (sizeof(T) % 4 == 0 && GetAlignmentOf<T>() % GetAlignmentOf<word32>() == 0)
00540 SecureWipeBuffer((word32 *)buf, n * (sizeof(T)/4));
00541 else if (sizeof(T) % 2 == 0 && GetAlignmentOf<T>() % GetAlignmentOf<word16>() == 0)
00542 SecureWipeBuffer((word16 *)buf, n * (sizeof(T)/2));
00543 else
00544 SecureWipeBuffer((byte *)buf, n * sizeof(T));
00545 }
00546
00547
00548 static std::string StringNarrow(const wchar_t *str, bool throwOnError = true)
00549 {
00550 #ifdef _MSC_VER
00551 #pragma warning(push)
00552 #pragma warning(disable: 4996) // 'wcstombs': This function or variable may be unsafe.
00553 #endif
00554 size_t size = wcstombs(NULL, str, 0);
00555 if (size == size_t(0)-1)
00556 {
00557 if (throwOnError)
00558 throw InvalidArgument("StringNarrow: wcstombs() call failed");
00559 else
00560 return std::string();
00561 }
00562 std::string result(size, 0);
00563 wcstombs(&result[0], str, size);
00564 return result;
00565 #ifdef _MSC_VER
00566 #pragma warning(pop)
00567 #endif
00568 }
00569
00570 #if CRYPTOPP_BOOL_ALIGN16_ENABLED
00571 CRYPTOPP_DLL void * CRYPTOPP_API AlignedAllocate(size_t size);
00572 CRYPTOPP_DLL void CRYPTOPP_API AlignedDeallocate(void *p);
00573 #endif
00574
00575 CRYPTOPP_DLL void * CRYPTOPP_API UnalignedAllocate(size_t size);
00576 CRYPTOPP_DLL void CRYPTOPP_API UnalignedDeallocate(void *p);
00577
00578
00579
00580 template <class T> inline T rotlFixed(T x, unsigned int y)
00581 {
00582 assert(y < sizeof(T)*8);
00583 return y ? T((x<<y) | (x>>(sizeof(T)*8-y))) : x;
00584 }
00585
00586 template <class T> inline T rotrFixed(T x, unsigned int y)
00587 {
00588 assert(y < sizeof(T)*8);
00589 return y ? T((x>>y) | (x<<(sizeof(T)*8-y))) : x;
00590 }
00591
00592 template <class T> inline T rotlVariable(T x, unsigned int y)
00593 {
00594 assert(y < sizeof(T)*8);
00595 return T((x<<y) | (x>>(sizeof(T)*8-y)));
00596 }
00597
00598 template <class T> inline T rotrVariable(T x, unsigned int y)
00599 {
00600 assert(y < sizeof(T)*8);
00601 return T((x>>y) | (x<<(sizeof(T)*8-y)));
00602 }
00603
00604 template <class T> inline T rotlMod(T x, unsigned int y)
00605 {
00606 y %= sizeof(T)*8;
00607 return T((x<<y) | (x>>(sizeof(T)*8-y)));
00608 }
00609
00610 template <class T> inline T rotrMod(T x, unsigned int y)
00611 {
00612 y %= sizeof(T)*8;
00613 return T((x>>y) | (x<<(sizeof(T)*8-y)));
00614 }
00615
00616 #ifdef _MSC_VER
00617
00618 template<> inline word32 rotlFixed<word32>(word32 x, unsigned int y)
00619 {
00620 assert(y < 8*sizeof(x));
00621 return y ? _lrotl(x, y) : x;
00622 }
00623
00624 template<> inline word32 rotrFixed<word32>(word32 x, unsigned int y)
00625 {
00626 assert(y < 8*sizeof(x));
00627 return y ? _lrotr(x, y) : x;
00628 }
00629
00630 template<> inline word32 rotlVariable<word32>(word32 x, unsigned int y)
00631 {
00632 assert(y < 8*sizeof(x));
00633 return _lrotl(x, y);
00634 }
00635
00636 template<> inline word32 rotrVariable<word32>(word32 x, unsigned int y)
00637 {
00638 assert(y < 8*sizeof(x));
00639 return _lrotr(x, y);
00640 }
00641
00642 template<> inline word32 rotlMod<word32>(word32 x, unsigned int y)
00643 {
00644 return _lrotl(x, y);
00645 }
00646
00647 template<> inline word32 rotrMod<word32>(word32 x, unsigned int y)
00648 {
00649 return _lrotr(x, y);
00650 }
00651
00652 #endif // #ifdef _MSC_VER
00653
00654 #if _MSC_VER >= 1300 && !defined(__INTEL_COMPILER)
00655
00656
00657 template<> inline word64 rotlFixed<word64>(word64 x, unsigned int y)
00658 {
00659 assert(y < 8*sizeof(x));
00660 return y ? _rotl64(x, y) : x;
00661 }
00662
00663 template<> inline word64 rotrFixed<word64>(word64 x, unsigned int y)
00664 {
00665 assert(y < 8*sizeof(x));
00666 return y ? _rotr64(x, y) : x;
00667 }
00668
00669 template<> inline word64 rotlVariable<word64>(word64 x, unsigned int y)
00670 {
00671 assert(y < 8*sizeof(x));
00672 return _rotl64(x, y);
00673 }
00674
00675 template<> inline word64 rotrVariable<word64>(word64 x, unsigned int y)
00676 {
00677 assert(y < 8*sizeof(x));
00678 return _rotr64(x, y);
00679 }
00680
00681 template<> inline word64 rotlMod<word64>(word64 x, unsigned int y)
00682 {
00683 return _rotl64(x, y);
00684 }
00685
00686 template<> inline word64 rotrMod<word64>(word64 x, unsigned int y)
00687 {
00688 return _rotr64(x, y);
00689 }
00690
00691 #endif // #if _MSC_VER >= 1310
00692
00693 #if _MSC_VER >= 1400 && !defined(__INTEL_COMPILER)
00694
00695
00696 template<> inline word16 rotlFixed<word16>(word16 x, unsigned int y)
00697 {
00698 assert(y < 8*sizeof(x));
00699 return y ? _rotl16(x, y) : x;
00700 }
00701
00702 template<> inline word16 rotrFixed<word16>(word16 x, unsigned int y)
00703 {
00704 assert(y < 8*sizeof(x));
00705 return y ? _rotr16(x, y) : x;
00706 }
00707
00708 template<> inline word16 rotlVariable<word16>(word16 x, unsigned int y)
00709 {
00710 assert(y < 8*sizeof(x));
00711 return _rotl16(x, y);
00712 }
00713
00714 template<> inline word16 rotrVariable<word16>(word16 x, unsigned int y)
00715 {
00716 assert(y < 8*sizeof(x));
00717 return _rotr16(x, y);
00718 }
00719
00720 template<> inline word16 rotlMod<word16>(word16 x, unsigned int y)
00721 {
00722 return _rotl16(x, y);
00723 }
00724
00725 template<> inline word16 rotrMod<word16>(word16 x, unsigned int y)
00726 {
00727 return _rotr16(x, y);
00728 }
00729
00730 template<> inline byte rotlFixed<byte>(byte x, unsigned int y)
00731 {
00732 assert(y < 8*sizeof(x));
00733 return y ? _rotl8(x, y) : x;
00734 }
00735
00736 template<> inline byte rotrFixed<byte>(byte x, unsigned int y)
00737 {
00738 assert(y < 8*sizeof(x));
00739 return y ? _rotr8(x, y) : x;
00740 }
00741
00742 template<> inline byte rotlVariable<byte>(byte x, unsigned int y)
00743 {
00744 assert(y < 8*sizeof(x));
00745 return _rotl8(x, y);
00746 }
00747
00748 template<> inline byte rotrVariable<byte>(byte x, unsigned int y)
00749 {
00750 assert(y < 8*sizeof(x));
00751 return _rotr8(x, y);
00752 }
00753
00754 template<> inline byte rotlMod<byte>(byte x, unsigned int y)
00755 {
00756 return _rotl8(x, y);
00757 }
00758
00759 template<> inline byte rotrMod<byte>(byte x, unsigned int y)
00760 {
00761 return _rotr8(x, y);
00762 }
00763
00764 #endif // #if _MSC_VER >= 1400
00765
00766 #if (defined(__MWERKS__) && TARGET_CPU_PPC)
00767
00768 template<> inline word32 rotlFixed<word32>(word32 x, unsigned int y)
00769 {
00770 assert(y < 32);
00771 return y ? __rlwinm(x,y,0,31) : x;
00772 }
00773
00774 template<> inline word32 rotrFixed<word32>(word32 x, unsigned int y)
00775 {
00776 assert(y < 32);
00777 return y ? __rlwinm(x,32-y,0,31) : x;
00778 }
00779
00780 template<> inline word32 rotlVariable<word32>(word32 x, unsigned int y)
00781 {
00782 assert(y < 32);
00783 return (__rlwnm(x,y,0,31));
00784 }
00785
00786 template<> inline word32 rotrVariable<word32>(word32 x, unsigned int y)
00787 {
00788 assert(y < 32);
00789 return (__rlwnm(x,32-y,0,31));
00790 }
00791
00792 template<> inline word32 rotlMod<word32>(word32 x, unsigned int y)
00793 {
00794 return (__rlwnm(x,y,0,31));
00795 }
00796
00797 template<> inline word32 rotrMod<word32>(word32 x, unsigned int y)
00798 {
00799 return (__rlwnm(x,32-y,0,31));
00800 }
00801
00802 #endif // #if (defined(__MWERKS__) && TARGET_CPU_PPC)
00803
00804
00805
00806 template <class T>
00807 inline unsigned int GetByte(ByteOrder order, T value, unsigned int index)
00808 {
00809 if (order == LITTLE_ENDIAN_ORDER)
00810 return GETBYTE(value, index);
00811 else
00812 return GETBYTE(value, sizeof(T)-index-1);
00813 }
00814
00815 inline byte ByteReverse(byte value)
00816 {
00817 return value;
00818 }
00819
00820 inline word16 ByteReverse(word16 value)
00821 {
00822 #ifdef CRYPTOPP_BYTESWAP_AVAILABLE
00823 return bswap_16(value);
00824 #elif defined(_MSC_VER) && _MSC_VER >= 1300
00825 return _byteswap_ushort(value);
00826 #else
00827 return rotlFixed(value, 8U);
00828 #endif
00829 }
00830
00831 inline word32 ByteReverse(word32 value)
00832 {
00833 #if defined(__GNUC__) && defined(CRYPTOPP_X86_ASM_AVAILABLE)
00834 __asm__ ("bswap %0" : "=r" (value) : "0" (value));
00835 return value;
00836 #elif defined(CRYPTOPP_BYTESWAP_AVAILABLE)
00837 return bswap_32(value);
00838 #elif defined(__MWERKS__) && TARGET_CPU_PPC
00839 return (word32)__lwbrx(&value,0);
00840 #elif _MSC_VER >= 1400 || (_MSC_VER >= 1300 && !defined(_DLL))
00841 return _byteswap_ulong(value);
00842 #elif CRYPTOPP_FAST_ROTATE(32)
00843
00844 return (rotrFixed(value, 8U) & 0xff00ff00) | (rotlFixed(value, 8U) & 0x00ff00ff);
00845 #else
00846
00847 value = ((value & 0xFF00FF00) >> 8) | ((value & 0x00FF00FF) << 8);
00848 return rotlFixed(value, 16U);
00849 #endif
00850 }
00851
00852 inline word64 ByteReverse(word64 value)
00853 {
00854 #if defined(__GNUC__) && defined(CRYPTOPP_X86_ASM_AVAILABLE) && defined(__x86_64__)
00855 __asm__ ("bswap %0" : "=r" (value) : "0" (value));
00856 return value;
00857 #elif defined(CRYPTOPP_BYTESWAP_AVAILABLE)
00858 return bswap_64(value);
00859 #elif defined(_MSC_VER) && _MSC_VER >= 1300
00860 return _byteswap_uint64(value);
00861 #elif CRYPTOPP_BOOL_SLOW_WORD64
00862 return (word64(ByteReverse(word32(value))) << 32) | ByteReverse(word32(value>>32));
00863 #else
00864 value = ((value & W64LIT(0xFF00FF00FF00FF00)) >> 8) | ((value & W64LIT(0x00FF00FF00FF00FF)) << 8);
00865 value = ((value & W64LIT(0xFFFF0000FFFF0000)) >> 16) | ((value & W64LIT(0x0000FFFF0000FFFF)) << 16);
00866 return rotlFixed(value, 32U);
00867 #endif
00868 }
00869
00870 inline byte BitReverse(byte value)
00871 {
00872 value = ((value & 0xAA) >> 1) | ((value & 0x55) << 1);
00873 value = ((value & 0xCC) >> 2) | ((value & 0x33) << 2);
00874 return rotlFixed(value, 4U);
00875 }
00876
00877 inline word16 BitReverse(word16 value)
00878 {
00879 value = ((value & 0xAAAA) >> 1) | ((value & 0x5555) << 1);
00880 value = ((value & 0xCCCC) >> 2) | ((value & 0x3333) << 2);
00881 value = ((value & 0xF0F0) >> 4) | ((value & 0x0F0F) << 4);
00882 return ByteReverse(value);
00883 }
00884
00885 inline word32 BitReverse(word32 value)
00886 {
00887 value = ((value & 0xAAAAAAAA) >> 1) | ((value & 0x55555555) << 1);
00888 value = ((value & 0xCCCCCCCC) >> 2) | ((value & 0x33333333) << 2);
00889 value = ((value & 0xF0F0F0F0) >> 4) | ((value & 0x0F0F0F0F) << 4);
00890 return ByteReverse(value);
00891 }
00892
00893 inline word64 BitReverse(word64 value)
00894 {
00895 #if CRYPTOPP_BOOL_SLOW_WORD64
00896 return (word64(BitReverse(word32(value))) << 32) | BitReverse(word32(value>>32));
00897 #else
00898 value = ((value & W64LIT(0xAAAAAAAAAAAAAAAA)) >> 1) | ((value & W64LIT(0x5555555555555555)) << 1);
00899 value = ((value & W64LIT(0xCCCCCCCCCCCCCCCC)) >> 2) | ((value & W64LIT(0x3333333333333333)) << 2);
00900 value = ((value & W64LIT(0xF0F0F0F0F0F0F0F0)) >> 4) | ((value & W64LIT(0x0F0F0F0F0F0F0F0F)) << 4);
00901 return ByteReverse(value);
00902 #endif
00903 }
00904
00905 template <class T>
00906 inline T BitReverse(T value)
00907 {
00908 if (sizeof(T) == 1)
00909 return (T)BitReverse((byte)value);
00910 else if (sizeof(T) == 2)
00911 return (T)BitReverse((word16)value);
00912 else if (sizeof(T) == 4)
00913 return (T)BitReverse((word32)value);
00914 else
00915 {
00916 assert(sizeof(T) == 8);
00917 return (T)BitReverse((word64)value);
00918 }
00919 }
00920
00921 template <class T>
00922 inline T ConditionalByteReverse(ByteOrder order, T value)
00923 {
00924 return NativeByteOrderIs(order) ? value : ByteReverse(value);
00925 }
00926
00927 template <class T>
00928 void ByteReverse(T *out, const T *in, size_t byteCount)
00929 {
00930 assert(byteCount % sizeof(T) == 0);
00931 size_t count = byteCount/sizeof(T);
00932 for (size_t i=0; i<count; i++)
00933 out[i] = ByteReverse(in[i]);
00934 }
00935
00936 template <class T>
00937 inline void ConditionalByteReverse(ByteOrder order, T *out, const T *in, size_t byteCount)
00938 {
00939 if (!NativeByteOrderIs(order))
00940 ByteReverse(out, in, byteCount);
00941 else if (in != out)
00942 memcpy_s(out, byteCount, in, byteCount);
00943 }
00944
00945 template <class T>
00946 inline void GetUserKey(ByteOrder order, T *out, size_t outlen, const byte *in, size_t inlen)
00947 {
00948 const size_t U = sizeof(T);
00949 assert(inlen <= outlen*U);
00950 memcpy_s(out, outlen*U, in, inlen);
00951 memset_z((byte *)out+inlen, 0, outlen*U-inlen);
00952 ConditionalByteReverse(order, out, out, RoundUpToMultipleOf(inlen, U));
00953 }
00954
00955 #ifndef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS
00956 inline byte UnalignedGetWordNonTemplate(ByteOrder order, const byte *block, const byte *)
00957 {
00958 return block[0];
00959 }
00960
00961 inline word16 UnalignedGetWordNonTemplate(ByteOrder order, const byte *block, const word16 *)
00962 {
00963 return (order == BIG_ENDIAN_ORDER)
00964 ? block[1] | (block[0] << 8)
00965 : block[0] | (block[1] << 8);
00966 }
00967
00968 inline word32 UnalignedGetWordNonTemplate(ByteOrder order, const byte *block, const word32 *)
00969 {
00970 return (order == BIG_ENDIAN_ORDER)
00971 ? word32(block[3]) | (word32(block[2]) << 8) | (word32(block[1]) << 16) | (word32(block[0]) << 24)
00972 : word32(block[0]) | (word32(block[1]) << 8) | (word32(block[2]) << 16) | (word32(block[3]) << 24);
00973 }
00974
00975 inline word64 UnalignedGetWordNonTemplate(ByteOrder order, const byte *block, const word64 *)
00976 {
00977 return (order == BIG_ENDIAN_ORDER)
00978 ?
00979 (word64(block[7]) |
00980 (word64(block[6]) << 8) |
00981 (word64(block[5]) << 16) |
00982 (word64(block[4]) << 24) |
00983 (word64(block[3]) << 32) |
00984 (word64(block[2]) << 40) |
00985 (word64(block[1]) << 48) |
00986 (word64(block[0]) << 56))
00987 :
00988 (word64(block[0]) |
00989 (word64(block[1]) << 8) |
00990 (word64(block[2]) << 16) |
00991 (word64(block[3]) << 24) |
00992 (word64(block[4]) << 32) |
00993 (word64(block[5]) << 40) |
00994 (word64(block[6]) << 48) |
00995 (word64(block[7]) << 56));
00996 }
00997
00998 inline void UnalignedPutWordNonTemplate(ByteOrder order, byte *block, byte value, const byte *xorBlock)
00999 {
01000 block[0] = xorBlock ? (value ^ xorBlock[0]) : value;
01001 }
01002
01003 inline void UnalignedPutWordNonTemplate(ByteOrder order, byte *block, word16 value, const byte *xorBlock)
01004 {
01005 if (order == BIG_ENDIAN_ORDER)
01006 {
01007 if (xorBlock)
01008 {
01009 block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01010 block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01011 }
01012 else
01013 {
01014 block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01015 block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01016 }
01017 }
01018 else
01019 {
01020 if (xorBlock)
01021 {
01022 block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01023 block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01024 }
01025 else
01026 {
01027 block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01028 block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01029 }
01030 }
01031 }
01032
01033 inline void UnalignedPutWordNonTemplate(ByteOrder order, byte *block, word32 value, const byte *xorBlock)
01034 {
01035 if (order == BIG_ENDIAN_ORDER)
01036 {
01037 if (xorBlock)
01038 {
01039 block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
01040 block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
01041 block[2] = xorBlock[2] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01042 block[3] = xorBlock[3] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01043 }
01044 else
01045 {
01046 block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
01047 block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
01048 block[2] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01049 block[3] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01050 }
01051 }
01052 else
01053 {
01054 if (xorBlock)
01055 {
01056 block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01057 block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01058 block[2] = xorBlock[2] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
01059 block[3] = xorBlock[3] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
01060 }
01061 else
01062 {
01063 block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01064 block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01065 block[2] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
01066 block[3] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
01067 }
01068 }
01069 }
01070
01071 inline void UnalignedPutWordNonTemplate(ByteOrder order, byte *block, word64 value, const byte *xorBlock)
01072 {
01073 if (order == BIG_ENDIAN_ORDER)
01074 {
01075 if (xorBlock)
01076 {
01077 block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 7);
01078 block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 6);
01079 block[2] = xorBlock[2] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 5);
01080 block[3] = xorBlock[3] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 4);
01081 block[4] = xorBlock[4] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
01082 block[5] = xorBlock[5] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
01083 block[6] = xorBlock[6] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01084 block[7] = xorBlock[7] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01085 }
01086 else
01087 {
01088 block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 7);
01089 block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 6);
01090 block[2] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 5);
01091 block[3] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 4);
01092 block[4] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
01093 block[5] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
01094 block[6] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01095 block[7] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01096 }
01097 }
01098 else
01099 {
01100 if (xorBlock)
01101 {
01102 block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01103 block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01104 block[2] = xorBlock[2] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
01105 block[3] = xorBlock[3] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
01106 block[4] = xorBlock[4] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 4);
01107 block[5] = xorBlock[5] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 5);
01108 block[6] = xorBlock[6] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 6);
01109 block[7] = xorBlock[7] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 7);
01110 }
01111 else
01112 {
01113 block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01114 block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01115 block[2] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
01116 block[3] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
01117 block[4] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 4);
01118 block[5] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 5);
01119 block[6] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 6);
01120 block[7] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 7);
01121 }
01122 }
01123 }
01124 #endif // #ifndef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS
01125
01126 template <class T>
01127 inline T GetWord(bool assumeAligned, ByteOrder order, const byte *block)
01128 {
01129 #ifndef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS
01130 if (!assumeAligned)
01131 return UnalignedGetWordNonTemplate(order, block, (T*)NULL);
01132 assert(IsAligned<T>(block));
01133 #endif
01134 return ConditionalByteReverse(order, *reinterpret_cast<const T *>(block));
01135 }
01136
01137 template <class T>
01138 inline void GetWord(bool assumeAligned, ByteOrder order, T &result, const byte *block)
01139 {
01140 result = GetWord<T>(assumeAligned, order, block);
01141 }
01142
01143 template <class T>
01144 inline void PutWord(bool assumeAligned, ByteOrder order, byte *block, T value, const byte *xorBlock = NULL)
01145 {
01146 #ifndef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS
01147 if (!assumeAligned)
01148 return UnalignedPutWordNonTemplate(order, block, value, xorBlock);
01149 assert(IsAligned<T>(block));
01150 assert(IsAligned<T>(xorBlock));
01151 #endif
01152 *reinterpret_cast<T *>(block) = ConditionalByteReverse(order, value) ^ (xorBlock ? *reinterpret_cast<const T *>(xorBlock) : 0);
01153 }
01154
01155 template <class T, class B, bool A=false>
01156 class GetBlock
01157 {
01158 public:
01159 GetBlock(const void *block)
01160 : m_block((const byte *)block) {}
01161
01162 template <class U>
01163 inline GetBlock<T, B, A> & operator()(U &x)
01164 {
01165 CRYPTOPP_COMPILE_ASSERT(sizeof(U) >= sizeof(T));
01166 x = GetWord<T>(A, B::ToEnum(), m_block);
01167 m_block += sizeof(T);
01168 return *this;
01169 }
01170
01171 private:
01172 const byte *m_block;
01173 };
01174
01175 template <class T, class B, bool A=false>
01176 class PutBlock
01177 {
01178 public:
01179 PutBlock(const void *xorBlock, void *block)
01180 : m_xorBlock((const byte *)xorBlock), m_block((byte *)block) {}
01181
01182 template <class U>
01183 inline PutBlock<T, B, A> & operator()(U x)
01184 {
01185 PutWord(A, B::ToEnum(), m_block, (T)x, m_xorBlock);
01186 m_block += sizeof(T);
01187 if (m_xorBlock)
01188 m_xorBlock += sizeof(T);
01189 return *this;
01190 }
01191
01192 private:
01193 const byte *m_xorBlock;
01194 byte *m_block;
01195 };
01196
01197 template <class T, class B, bool GA=false, bool PA=false>
01198 struct BlockGetAndPut
01199 {
01200
01201 static inline GetBlock<T, B, GA> Get(const void *block) {return GetBlock<T, B, GA>(block);}
01202 typedef PutBlock<T, B, PA> Put;
01203 };
01204
01205 template <class T>
01206 std::string WordToString(T value, ByteOrder order = BIG_ENDIAN_ORDER)
01207 {
01208 if (!NativeByteOrderIs(order))
01209 value = ByteReverse(value);
01210
01211 return std::string((char *)&value, sizeof(value));
01212 }
01213
01214 template <class T>
01215 T StringToWord(const std::string &str, ByteOrder order = BIG_ENDIAN_ORDER)
01216 {
01217 T value = 0;
01218 memcpy_s(&value, sizeof(value), str.data(), UnsignedMin(str.size(), sizeof(value)));
01219 return NativeByteOrderIs(order) ? value : ByteReverse(value);
01220 }
01221
01222
01223
01224 template <bool overflow> struct SafeShifter;
01225
01226 template<> struct SafeShifter<true>
01227 {
01228 template <class T>
01229 static inline T RightShift(T value, unsigned int bits)
01230 {
01231 return 0;
01232 }
01233
01234 template <class T>
01235 static inline T LeftShift(T value, unsigned int bits)
01236 {
01237 return 0;
01238 }
01239 };
01240
01241 template<> struct SafeShifter<false>
01242 {
01243 template <class T>
01244 static inline T RightShift(T value, unsigned int bits)
01245 {
01246 return value >> bits;
01247 }
01248
01249 template <class T>
01250 static inline T LeftShift(T value, unsigned int bits)
01251 {
01252 return value << bits;
01253 }
01254 };
01255
01256 template <unsigned int bits, class T>
01257 inline T SafeRightShift(T value)
01258 {
01259 return SafeShifter<(bits>=(8*sizeof(T)))>::RightShift(value, bits);
01260 }
01261
01262 template <unsigned int bits, class T>
01263 inline T SafeLeftShift(T value)
01264 {
01265 return SafeShifter<(bits>=(8*sizeof(T)))>::LeftShift(value, bits);
01266 }
01267
01268
01269
01270 #define CRYPTOPP_BLOCK_1(n, t, s) t* m_##n() {return (t *)(m_aggregate+0);} size_t SS1() {return sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
01271 #define CRYPTOPP_BLOCK_2(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS1());} size_t SS2() {return SS1()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
01272 #define CRYPTOPP_BLOCK_3(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS2());} size_t SS3() {return SS2()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
01273 #define CRYPTOPP_BLOCK_4(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS3());} size_t SS4() {return SS3()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
01274 #define CRYPTOPP_BLOCK_5(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS4());} size_t SS5() {return SS4()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
01275 #define CRYPTOPP_BLOCK_6(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS5());} size_t SS6() {return SS5()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
01276 #define CRYPTOPP_BLOCK_7(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS6());} size_t SS7() {return SS6()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
01277 #define CRYPTOPP_BLOCK_8(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS7());} size_t SS8() {return SS7()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
01278 #define CRYPTOPP_BLOCKS_END(i) size_t SST() {return SS##i();} void AllocateBlocks() {m_aggregate.New(SST());} AlignedSecByteBlock m_aggregate;
01279
01280 NAMESPACE_END
01281
01282 #endif