Skip to content
Snippets Groups Projects
Commit e74470dd authored by Tobias S. Josefowitz's avatar Tobias S. Josefowitz Committed by dev
Browse files

some portable helpers for unaligned memory access

unlike manually shifted bytes, memset() gets optimized to one
load/store on x86.
parent 91d6952b
No related branches found
No related tags found
No related merge requests found
...@@ -252,43 +252,63 @@ static inline unsigned INT32 ctz64(const unsigned INT64 i) { ...@@ -252,43 +252,63 @@ static inline unsigned INT32 ctz64(const unsigned INT64 i) {
#endif /* SIZEOF_LONG == 8 || SIZEOF_LONG_LONG == 8 */ #endif /* SIZEOF_LONG == 8 || SIZEOF_LONG_LONG == 8 */
#define hton8(x) (x) static INLINE unsigned INT32 bswap32(unsigned INT32 x) {
#ifdef HAS___BUILTIN_BSWAP32
return __builtin_bswap32(x);
#elif defined(HAS__BSWAP)
return _bswap(x);
#elif defined(HAS__BYTESWAP_ULONG)
return _byteswap_ulong((unsigned long)x);
#else
return (((x & 0xff000000) >> 24) | ((x & 0x000000ff) << 24)
| ((x & 0x00ff0000) >> 8) | ((x & 0x0000ff00) << 8));
#endif
}
static INLINE unsigned INT64 bswap64(unsigned INT64 x) {
#ifdef HAS___BUILTIN_BSWAP64
return __builtin_bswap64(x);
#elif defined(HAS__BSWAP64)
return _bswap64(x);
#elif defined(HAS__BYTESWAP_UINT64)
return _byteswap_uint64((unsigned INT64)x);
#else
return bswap32(x >> 32) | (unsigned INT64)bswap32(x & 0xffffffff) << 32;
#endif
}
#define bswap16(x) ((unsigned INT16)bswap32((unsigned INT32)x << 16))
#if !(defined(PIKE_BYTEORDER)) #if PIKE_BYTEORDER == 1234
# error "Byte order could not be decided." #define get_unaligned_le16 get_unaligned16
#define get_unaligned_le32 get_unaligned32
#define get_unaligned_le64 get_unaligned64
#define set_unaligned_le16 set_unaligned16
#define set_unaligned_le32 set_unaligned32
#define set_unaligned_le64 set_unaligned64
#define get_unaligned_be16(x) bswap16(get_unaligned16(x))
#define get_unaligned_be32(x) bswap32(get_unaligned32(x))
#define get_unaligned_be64(x) bswap64(get_unaligned64(x))
#define set_unaligned_be16(x) set_unaligned16(bswap16(x))
#define set_unaligned_be32(x) set_unaligned32(bswap32(x))
#define set_unaligned_be64(x) set_unaligned64(bswap64(x))
#define hton32(x) bswap32(x)
#define hton64(x) bswap64(x)
#else #else
# if PIKE_BYTEORDER == 1234 #define get_unaligned_be16 get_unaligned16
/* # warning "little endian" */ #define get_unaligned_be32 get_unaligned32
# define LBITMASK(type, n) ((type)1 << (type)(n)) #define get_unaligned_be64 get_unaligned64
# define LBITN(type, p, n) (!!((p) & LBITMASK(type, n))) #define set_unaligned_be16 set_unaligned16
# ifdef HAS___BUILTIN_BSWAP32 #define set_unaligned_be32 set_unaligned32
# define hton32(x) __builtin_bswap32(x) #define set_unaligned_be64 set_unaligned64
# elif defined(HAS__BSWAP) #define get_unaligned_le16(x) bswap16(get_unaligned16(x))
# define hton32(x) _bswap(x) #define get_unaligned_le32(x) bswap32(get_unaligned32(x))
# elif defined(HAS__BYTESWAP_ULONG) #define get_unaligned_le64(x) bswap64(get_unaligned64(x))
# define hton32(x) _byteswap_ulong((unsigned long)x) #define set_unaligned_le16(x) set_unaligned16(bswap16(x))
# else #define set_unaligned_le32(x) set_unaligned32(bswap32(x))
# define hton32(x) ((((x) & 0xff000000) >> 24) | (((x) & 0x000000ff) << 24) \ #define set_unaligned_le64(x) set_unaligned64(bswap64(x))
| (((x) & 0x00ff0000) >> 8) | (((x) & 0x0000ff00) << 8)) #define hton32(x) (x)
# endif #define hton64(x) (x)
# ifdef HAS___BUILTIN_BSWAP64
# define hton64(x) __builtin_bswap64(x)
# elif defined(HAS__BSWAP64)
# define hton64(x) _bswap64(x)
# elif defined(HAS__BYTESWAP_UINT64)
# define hton64(x) _byteswap_uint64((unsigned __int64)x)
# else
# define hton64(x) ((INT64)hton32((int)((x) >> 32))\
| (((INT64)hton32((int)((x) & 0x00000000ffffffff))) << 32))
# endif
# else /* PIKE_BYTEORDER = 1234 */
/* # warning "big endian" */
# define hton64(x) (x)
# define hton32(x) (x)
# define LBITMASK(type, n) ((type)1 << (type)((sizeof(type) \
- ((type)(n)<<8) - 1)*8 + (type)(n)&7))
# define LBITN(type, p, n) (!!((p) & LBITMASK(type, n)))
# endif /* PIKE_BYTEORDER == 1234 */
#endif #endif
#endif /* BITVECTOR_H */ #endif /* BITVECTOR_H */
...@@ -158,6 +158,36 @@ static INLINE void * guaranteed_memset(void * p, int c, size_t n) { ...@@ -158,6 +158,36 @@ static INLINE void * guaranteed_memset(void * p, int c, size_t n) {
return (void *)p; return (void *)p;
} }
static INLINE unsigned INT64 get_unaligned64(const void * ptr) {
unsigned INT64 v;
memcpy(&v, ptr, 8);
return v;
}
static INLINE void set_unaligned64(void * ptr, unsigned INT64 v) {
memcpy(ptr, &v, 8);
}
static INLINE unsigned INT64 get_unaligned32(const void * ptr) {
unsigned INT32 v;
memcpy(&v, ptr, 4);
return v;
}
static INLINE void set_unaligned32(void * ptr, unsigned INT32 v) {
memcpy(ptr, &v, 4);
}
static INLINE unsigned INT16 get_unaligned16(const void * ptr) {
unsigned INT16 v;
memcpy(&v, ptr, 2);
return v;
}
static INLINE void set_unaligned16(void * ptr, unsigned INT16 v) {
memcpy(ptr, &v, 2);
}
#include "pike_search.h" #include "pike_search.h"
#include "block_alloc_h.h" #include "block_alloc_h.h"
......
...@@ -16,6 +16,8 @@ typedef p_wchar2 CB_NAME(char); ...@@ -16,6 +16,8 @@ typedef p_wchar2 CB_NAME(char);
#endif #endif
#define cb_char CB_NAME(char) #define cb_char CB_NAME(char)
#define hton8(x) (x)
#ifdef CB_SOURCE #ifdef CB_SOURCE
#define CB_ADD_KEY_REF(x) do { if ((x).str) add_ref((x).str); } while(0) #define CB_ADD_KEY_REF(x) do { if ((x).str) add_ref((x).str); } while(0)
#define CB_FREE_KEY(x) do { if ((x).str) free_string((x).str); } while(0) #define CB_FREE_KEY(x) do { if ((x).str) free_string((x).str); } while(0)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment