aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorDouglas Gilbert <dgilbert@interlog.com>2018-03-16 09:38:38 +0000
committerDouglas Gilbert <dgilbert@interlog.com>2018-03-16 09:38:38 +0000
commit760a4bdf5dd5679cf17b4eacf8c5e178cec30839 (patch)
treeb98920b0811b6463becf292ceeb6c8a1f24e7b4a /include
parent01a31912491efa232e08f71057edb258986b5508 (diff)
downloadsg3_utils-760a4bdf5dd5679cf17b4eacf8c5e178cec30839.tar.gz
Add little/big endian specializations to sg_unaligned.h
git-svn-id: https://svn.bingwo.ca/repos/sg3_utils/trunk@762 6180dd3e-e324-4e3e-922d-17de1ae2f315
Diffstat (limited to 'include')
-rw-r--r--include/Makefile.am2
-rw-r--r--include/Makefile.in11
-rw-r--r--include/sg_unaligned.h446
3 files changed, 344 insertions, 115 deletions
diff --git a/include/Makefile.am b/include/Makefile.am
index 3dc1ef3b..64c27b43 100644
--- a/include/Makefile.am
+++ b/include/Makefile.am
@@ -8,6 +8,8 @@ scsiinclude_HEADERS = \
sg_cmds_basic.h \
sg_cmds_extra.h \
sg_cmds_mmc.h \
+ sg_pr2serr.h \
+ sg_unaligned.h \
sg_pt.h \
sg_pt_nvme.h
diff --git a/include/Makefile.in b/include/Makefile.in
index a16e29ec..f12229b1 100644
--- a/include/Makefile.in
+++ b/include/Makefile.in
@@ -127,9 +127,9 @@ am__can_run_installinfo = \
esac
am__noinst_HEADERS_DIST = sg_linux_inc.h sg_io_linux.h sg_pt_win32.h
am__scsiinclude_HEADERS_DIST = sg_lib.h sg_lib_data.h sg_cmds.h \
- sg_cmds_basic.h sg_cmds_extra.h sg_cmds_mmc.h sg_pt.h \
- sg_pt_nvme.h sg_linux_inc.h sg_io_linux.h sg_pt_linux.h \
- sg_pt_win32.h
+ sg_cmds_basic.h sg_cmds_extra.h sg_cmds_mmc.h sg_pr2serr.h \
+ sg_unaligned.h sg_pt.h sg_pt_nvme.h sg_linux_inc.h \
+ sg_io_linux.h sg_pt_linux.h sg_pt_win32.h
am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
am__vpath_adj = case $$p in \
$(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
@@ -302,8 +302,9 @@ top_builddir = @top_builddir@
top_srcdir = @top_srcdir@
scsiincludedir = $(includedir)/scsi
scsiinclude_HEADERS = sg_lib.h sg_lib_data.h sg_cmds.h sg_cmds_basic.h \
- sg_cmds_extra.h sg_cmds_mmc.h sg_pt.h sg_pt_nvme.h \
- $(am__append_1) $(am__append_2) $(am__append_3)
+ sg_cmds_extra.h sg_cmds_mmc.h sg_pr2serr.h sg_unaligned.h \
+ sg_pt.h sg_pt_nvme.h $(am__append_1) $(am__append_2) \
+ $(am__append_3)
@OS_FREEBSD_TRUE@noinst_HEADERS = \
@OS_FREEBSD_TRUE@ sg_linux_inc.h \
@OS_FREEBSD_TRUE@ sg_io_linux.h \
diff --git a/include/sg_unaligned.h b/include/sg_unaligned.h
index 0a5c92f1..b6f4698d 100644
--- a/include/sg_unaligned.h
+++ b/include/sg_unaligned.h
@@ -8,173 +8,279 @@
* license that can be found in the BSD_LICENSE file.
*/
-#include <stdint.h>
+#include <stdbool.h>
+#include <stdint.h> /* for uint8_t and friends */
+#include <string.h> /* for memcpy */
#ifdef __cplusplus
extern "C" {
#endif
-/* Borrowed from the Linux kernel, via mhvtl */
+/* These inline functions convert integers (always unsigned) to byte streams
+ * and vice versa. They have two goals:
+ * - change the byte ordering of integers between host order and big
+ * endian ("_be") or little endian ("_le")
+ * - copy the big or little endian byte stream so it complies with any
+ * alignment that host integers require
+ *
+ * Host integer to given endian byte stream is a "_put_" function taking
+ * two arguments (integer and pointer to byte stream) returning void.
+ * Given endian byte stream to host integer is a "_get_" function that takes
+ * one argument and returns an integer of appropriate size (uint32_t for 24
+ * bit operations, uint64_t for 48 bit operations).
+ *
+ * Big endian byte format "on the wire" is the default used by SCSI
+ * standards (www.t10.org). Big endian is also the network byte order.
+ * Little endian is used by ATA, PCI and NVMe.
+ */
+
+/* The generic form of these routines was borrowed from the Linux kernel,
+ * via mhvtl. There is a specialised version of the main functions for
+ * little endian or big endian provided that not-quite-standard defines for
+ * endianness are available from the compiler and the <byteswap.h> header
+ * (a GNU extension) has been detected by ./configure . To force the
+ * generic version, use './configure --disable-fast-lebe ' . */
+
+
+#ifdef HAVE_CONFIG_H
+#include "config.h" /* need this to see if HAVE_BYTESWAP_H */
+#endif
+
+#undef GOT_UNALIGNED_SPECIALS /* just in case */
-/* In the first section below, functions that copy unsigned integers in a
- * computer's native format, to and from an unaligned big endian sequence of
- * bytes. Big endian byte format "on the wire" is the default used by SCSI
- * standards (www.t10.org). Big endian is also the network byte order. */
+#if defined(__BYTE_ORDER__) && defined(HAVE_BYTESWAP_H) && \
+ ! defined(IGNORE_FAST_LEBE)
+
+#if defined(__LITTLE_ENDIAN__) || (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+
+#define GOT_UNALIGNED_SPECIALS 1
+
+#include <byteswap.h> /* for bswap_16(), bswap_32() and bswap_64() */
+
+// #warning ">>>>>> Doing Little endian special unaligneds"
static inline uint16_t __get_unaligned_be16(const uint8_t *p)
{
- return p[0] << 8 | p[1];
+ uint16_t u;
+
+ memcpy(&u, p, 2);
+ return bswap_16(u);
}
static inline uint32_t __get_unaligned_be32(const uint8_t *p)
{
- return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3];
-}
+ uint32_t u;
-/* Assume 48 bit value placed in uint64_t */
-static inline uint64_t __get_unaligned_be48(const uint8_t *p)
-{
- return (uint64_t)__get_unaligned_be16(p) << 32 |
- __get_unaligned_be32(p + 2);
+ memcpy(&u, p, 4);
+ return bswap_32(u);
}
static inline uint64_t __get_unaligned_be64(const uint8_t *p)
{
- return (uint64_t)__get_unaligned_be32(p) << 32 |
- __get_unaligned_be32(p + 4);
+ uint64_t u;
+
+ memcpy(&u, p, 8);
+ return bswap_64(u);
}
static inline void __put_unaligned_be16(uint16_t val, uint8_t *p)
{
- *p++ = (uint8_t)(val >> 8);
- *p++ = (uint8_t)val;
+ uint16_t u = bswap_16(val);
+
+ memcpy(p, &u, 2);
}
static inline void __put_unaligned_be32(uint32_t val, uint8_t *p)
{
- __put_unaligned_be16(val >> 16, p);
- __put_unaligned_be16(val, p + 2);
+ uint32_t u = bswap_32(val);
+
+ memcpy(p, &u, 4);
}
-/* Assume 48 bit value placed in uint64_t */
-static inline void __put_unaligned_be48(uint64_t val, uint8_t *p)
+static inline void __put_unaligned_be64(uint64_t val, uint8_t *p)
{
- __put_unaligned_be16(val >> 32, p);
- __put_unaligned_be32(val, p + 2);
+ uint64_t u = bswap_64(val);
+
+ memcpy(p, &u, 8);
}
-static inline void __put_unaligned_be64(uint64_t val, uint8_t *p)
+static inline uint16_t __get_unaligned_le16(const uint8_t *p)
{
- __put_unaligned_be32(val >> 32, p);
- __put_unaligned_be32(val, p + 4);
+ uint16_t u;
+
+ memcpy(&u, p, 2);
+ return u;
}
-static inline uint16_t sg_get_unaligned_be16(const void *p)
+static inline uint32_t __get_unaligned_le32(const uint8_t *p)
{
- return __get_unaligned_be16((const uint8_t *)p);
+ uint32_t u;
+
+ memcpy(&u, p, 4);
+ return u;
}
-static inline uint32_t sg_get_unaligned_be24(const void *p)
+static inline uint64_t __get_unaligned_le64(const uint8_t *p)
{
- return ((const uint8_t *)p)[0] << 16 | ((const uint8_t *)p)[1] << 8 |
- ((const uint8_t *)p)[2];
+ uint64_t u;
+
+ memcpy(&u, p, 8);
+ return u;
}
-static inline uint32_t sg_get_unaligned_be32(const void *p)
+static inline void __put_unaligned_le16(uint16_t val, uint8_t *p)
{
- return __get_unaligned_be32((const uint8_t *)p);
+ memcpy(p, &val, 2);
}
-/* Assume 48 bit value placed in uint64_t */
-static inline uint64_t sg_get_unaligned_be48(const void *p)
+static inline void __put_unaligned_le32(uint32_t val, uint8_t *p)
{
- return __get_unaligned_be48((const uint8_t *)p);
+ memcpy(p, &val, 4);
}
-static inline uint64_t sg_get_unaligned_be64(const void *p)
+static inline void __put_unaligned_le64(uint64_t val, uint8_t *p)
{
- return __get_unaligned_be64((const uint8_t *)p);
+ memcpy(p, &val, 8);
}
-/* Returns 0 if 'num_bytes' is less than or equal to 0 or greater than
- * 8 (i.e. sizeof(uint64_t)). Else returns result in uint64_t which is
- * an 8 byte unsigned integer. */
-static inline uint64_t sg_get_unaligned_be(int num_bytes, const void *p)
+#elif defined(__BIG_ENDIAN__) || (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
+
+#define GOT_UNALIGNED_SPECIALS 1
+
+#include <byteswap.h>
+
+// #warning ">>>>>> Doing BIG endian special unaligneds"
+
+static inline uint16_t __get_unaligned_le16(const uint8_t *p)
{
- if ((num_bytes <= 0) || (num_bytes > (int)sizeof(uint64_t)))
- return 0;
- else {
- const uint8_t * xp = (const uint8_t *)p;
- uint64_t res = *xp;
+ uint16_t u;
- for (++xp; num_bytes > 1; ++xp, --num_bytes)
- res = (res << 8) | *xp;
- return res;
- }
+ memcpy(&u, p, 2);
+ return bswap_16(u);
}
-static inline void sg_put_unaligned_be16(uint16_t val, void *p)
+static inline uint32_t __get_unaligned_le32(const uint8_t *p)
{
- __put_unaligned_be16(val, (uint8_t *)p);
+ uint32_t u;
+
+ memcpy(&u, p, 4);
+ return bswap_32(u);
}
-static inline void sg_put_unaligned_be24(uint32_t val, void *p)
+static inline uint64_t __get_unaligned_le64(const uint8_t *p)
{
- ((uint8_t *)p)[0] = (val >> 16) & 0xff;
- ((uint8_t *)p)[1] = (val >> 8) & 0xff;
- ((uint8_t *)p)[2] = val & 0xff;
+ uint64_t u;
+
+ memcpy(&u, p, 8);
+ return bswap_64(u);
}
-static inline void sg_put_unaligned_be32(uint32_t val, void *p)
+static inline void __put_unaligned_le16(uint16_t val, uint8_t *p)
{
- __put_unaligned_be32(val, (uint8_t *)p);
+ uint16_t u = bswap_16(val);
+
+ memcpy(p, &u, 2);
}
-/* Assume 48 bit value placed in uint64_t */
-static inline void sg_put_unaligned_be48(uint64_t val, void *p)
+static inline void __put_unaligned_le32(uint32_t val, uint8_t *p)
{
- __put_unaligned_be48(val, (uint8_t *)p);
+ uint32_t u = bswap_32(val);
+
+ memcpy(p, &u, 4);
}
-static inline void sg_put_unaligned_be64(uint64_t val, void *p)
+static inline void __put_unaligned_le64(uint64_t val, uint8_t *p)
{
- __put_unaligned_be64(val, (uint8_t *)p);
+ uint64_t u = bswap_64(val);
+
+ memcpy(p, &u, 8);
}
-/* Since cdb and parameter blocks are often memset to zero before these
- * unaligned function partially fill them, then check for a val of zero
- * and ignore if it is with these variants. */
-static inline void sg_nz_put_unaligned_be16(uint16_t val, void *p)
+static inline uint16_t __get_unaligned_be16(const uint8_t *p)
{
- if (val)
- __put_unaligned_be16(val, (uint8_t *)p);
+ uint16_t u;
+
+ memcpy(&u, p, 2);
+ return u;
}
-static inline void sg_nz_put_unaligned_be24(uint32_t val, void *p)
+static inline uint32_t __get_unaligned_be32(const uint8_t *p)
{
- if (val) {
- ((uint8_t *)p)[0] = (val >> 16) & 0xff;
- ((uint8_t *)p)[1] = (val >> 8) & 0xff;
- ((uint8_t *)p)[2] = val & 0xff;
- }
+ uint32_t u;
+
+ memcpy(&u, p, 4);
+ return u;
}
-static inline void sg_nz_put_unaligned_be32(uint32_t val, void *p)
+static inline uint64_t __get_unaligned_be64(const uint8_t *p)
{
- if (val)
- __put_unaligned_be32(val, (uint8_t *)p);
+ uint64_t u;
+
+ memcpy(&u, p, 8);
+ return u;
}
-static inline void sg_nz_put_unaligned_be64(uint64_t val, void *p)
+static inline void __put_unaligned_be16(uint16_t val, uint8_t *p)
{
- if (val)
- __put_unaligned_be64(val, (uint8_t *)p);
+ memcpy(p, &val, 2);
+}
+
+static inline void __put_unaligned_be32(uint32_t val, uint8_t *p)
+{
+ memcpy(p, &val, 4);
+}
+
+static inline void __put_unaligned_be64(uint64_t val, uint8_t *p)
+{
+ memcpy(p, &val, 8);
}
+#endif /* __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ */
+#endif /* #if defined __BYTE_ORDER__ && defined <byteswap.h> &&
+ * ! defined IGNORE_FAST_LEBE */
+
+
+#ifndef GOT_UNALIGNED_SPECIALS
+
+/* Now we have no tricks left, so use the only way this can be done
+ * correctly in C safely: lots of shifts. */
+
+// #warning ">>>>>> Doing GENERIC unaligneds"
+
+static inline uint16_t __get_unaligned_be16(const uint8_t *p)
+{
+ return p[0] << 8 | p[1];
+}
+
+static inline uint32_t __get_unaligned_be32(const uint8_t *p)
+{
+ return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3];
+}
+
+static inline uint64_t __get_unaligned_be64(const uint8_t *p)
+{
+ return (uint64_t)__get_unaligned_be32(p) << 32 |
+ __get_unaligned_be32(p + 4);
+}
+
+static inline void __put_unaligned_be16(uint16_t val, uint8_t *p)
+{
+ *p++ = (uint8_t)(val >> 8);
+ *p++ = (uint8_t)val;
+}
+
+static inline void __put_unaligned_be32(uint32_t val, uint8_t *p)
+{
+ __put_unaligned_be16(val >> 16, p);
+ __put_unaligned_be16(val, p + 2);
+}
+
+static inline void __put_unaligned_be64(uint64_t val, uint8_t *p)
+{
+ __put_unaligned_be32(val >> 32, p);
+ __put_unaligned_be32(val, p + 4);
+}
-/* Below are the little endian equivalents of the big endian functions
- * above. Little endian is used by ATA, PCI and NVMe.
- */
static inline uint16_t __get_unaligned_le16(const uint8_t *p)
{
@@ -210,27 +316,50 @@ static inline void __put_unaligned_le64(uint64_t val, uint8_t *p)
__put_unaligned_le32(val, p);
}
-static inline uint16_t sg_get_unaligned_le16(const void *p)
+#endif /* #ifndef GOT_UNALIGNED_SPECIALS */
+
+
+/* These are the end user function, essentially dummies just doing a
+ * cast to the __ functions that do the work. Hopefully the compiler
+ * inlines these functions (as instructed). */
+static inline uint16_t sg_get_unaligned_be16(const void *p)
{
- return __get_unaligned_le16((const uint8_t *)p);
+ return __get_unaligned_be16((const uint8_t *)p);
}
-static inline uint32_t sg_get_unaligned_le24(const void *p)
+static inline uint32_t sg_get_unaligned_be32(const void *p)
{
- return (uint32_t)__get_unaligned_le16((const uint8_t *)p) |
- ((const uint8_t *)p)[2] << 16;
+ return __get_unaligned_be32((const uint8_t *)p);
}
-static inline uint32_t sg_get_unaligned_le32(const void *p)
+static inline uint64_t sg_get_unaligned_be64(const void *p)
{
- return __get_unaligned_le32((const uint8_t *)p);
+ return __get_unaligned_be64((const uint8_t *)p);
}
-/* Assume 48 bit value placed in uint64_t */
-static inline uint64_t sg_get_unaligned_le48(const void *p)
+static inline void sg_put_unaligned_be16(uint16_t val, void *p)
{
- return (uint64_t)__get_unaligned_le16((const uint8_t *)p + 4) << 32 |
- __get_unaligned_le32((const uint8_t *)p);
+ __put_unaligned_be16(val, (uint8_t *)p);
+}
+
+static inline void sg_put_unaligned_be32(uint32_t val, void *p)
+{
+ __put_unaligned_be32(val, (uint8_t *)p);
+}
+
+static inline void sg_put_unaligned_be64(uint64_t val, void *p)
+{
+ __put_unaligned_be64(val, (uint8_t *)p);
+}
+
+static inline uint16_t sg_get_unaligned_le16(const void *p)
+{
+ return __get_unaligned_le16((const uint8_t *)p);
+}
+
+static inline uint32_t sg_get_unaligned_le32(const void *p)
+{
+ return __get_unaligned_le32((const uint8_t *)p);
}
static inline uint64_t sg_get_unaligned_le64(const void *p)
@@ -238,26 +367,88 @@ static inline uint64_t sg_get_unaligned_le64(const void *p)
return __get_unaligned_le64((const uint8_t *)p);
}
+static inline void sg_put_unaligned_le16(uint16_t val, void *p)
+{
+ __put_unaligned_le16(val, (uint8_t *)p);
+}
+
+static inline void sg_put_unaligned_le32(uint32_t val, void *p)
+{
+ __put_unaligned_le32(val, (uint8_t *)p);
+}
+
+static inline void sg_put_unaligned_le64(uint64_t val, void *p)
+{
+ __put_unaligned_le64(val, (uint8_t *)p);
+}
+
+/* Following are lesser used conversions that don't have specializations
+ * for endianess; big endian first. In summary these are the 24, 48 bit and
+ * given-length conversions plus the "nz" conditional put conversions. */
+
+/* Now big endian, get 24+48 then put 24+48 */
+static inline uint32_t sg_get_unaligned_be24(const void *p)
+{
+ return ((const uint8_t *)p)[0] << 16 | ((const uint8_t *)p)[1] << 8 |
+ ((const uint8_t *)p)[2];
+}
+
+/* Assume 48 bit value placed in uint64_t */
+static inline uint64_t sg_get_unaligned_be48(const void *p)
+{
+ return (uint64_t)__get_unaligned_be16((const uint8_t *)p) << 32 |
+ __get_unaligned_be32((const uint8_t *)p + 2);
+}
+
/* Returns 0 if 'num_bytes' is less than or equal to 0 or greater than
* 8 (i.e. sizeof(uint64_t)). Else returns result in uint64_t which is
* an 8 byte unsigned integer. */
-static inline uint64_t sg_get_unaligned_le(int num_bytes, const void *p)
+static inline uint64_t sg_get_unaligned_be(int num_bytes, const void *p)
{
if ((num_bytes <= 0) || (num_bytes > (int)sizeof(uint64_t)))
return 0;
else {
- const uint8_t * xp = (const uint8_t *)p + (num_bytes - 1);
+ const uint8_t * xp = (const uint8_t *)p;
uint64_t res = *xp;
- for (--xp; num_bytes > 1; --xp, --num_bytes)
+ for (++xp; num_bytes > 1; ++xp, --num_bytes)
res = (res << 8) | *xp;
return res;
}
}
-static inline void sg_put_unaligned_le16(uint16_t val, void *p)
+static inline void sg_put_unaligned_be24(uint32_t val, void *p)
{
- __put_unaligned_le16(val, (uint8_t *)p);
+ ((uint8_t *)p)[0] = (val >> 16) & 0xff;
+ ((uint8_t *)p)[1] = (val >> 8) & 0xff;
+ ((uint8_t *)p)[2] = val & 0xff;
+}
+
+/* Assume 48 bit value placed in uint64_t */
+static inline void __put_unaligned_be48(uint64_t val, uint8_t *p)
+{
+ __put_unaligned_be16(val >> 32, p);
+ __put_unaligned_be32(val, p + 2);
+}
+
+/* Assume 48 bit value placed in uint64_t */
+static inline void sg_put_unaligned_be48(uint64_t val, void *p)
+{
+ __put_unaligned_be48(val, (uint8_t *)p);
+}
+
+/* Now little endian, get 24+48 then put 24+48 */
+static inline uint32_t sg_get_unaligned_le24(const void *p)
+{
+ return (uint32_t)__get_unaligned_le16((const uint8_t *)p) |
+ ((const uint8_t *)p)[2] << 16;
+}
+
+/* Assume 48 bit value placed in uint64_t */
+static inline uint64_t sg_get_unaligned_le48(const void *p)
+{
+ return (uint64_t)__get_unaligned_le16((const uint8_t *)p + 4) << 32 |
+ __get_unaligned_le32((const uint8_t *)p);
}
static inline void sg_put_unaligned_le24(uint32_t val, void *p)
@@ -267,11 +458,6 @@ static inline void sg_put_unaligned_le24(uint32_t val, void *p)
((uint8_t *)p)[0] = val & 0xff;
}
-static inline void sg_put_unaligned_le32(uint32_t val, void *p)
-{
- __put_unaligned_le32(val, (uint8_t *)p);
-}
-
/* Assume 48 bit value placed in uint64_t */
static inline void sg_put_unaligned_le48(uint64_t val, void *p)
{
@@ -283,14 +469,53 @@ static inline void sg_put_unaligned_le48(uint64_t val, void *p)
((uint8_t *)p)[0] = val & 0xff;
}
-static inline void sg_put_unaligned_le64(uint64_t val, void *p)
+/* Returns 0 if 'num_bytes' is less than or equal to 0 or greater than
+ * 8 (i.e. sizeof(uint64_t)). Else returns result in uint64_t which is
+ * an 8 byte unsigned integer. */
+static inline uint64_t sg_get_unaligned_le(int num_bytes, const void *p)
{
- __put_unaligned_le64(val, (uint8_t *)p);
+ if ((num_bytes <= 0) || (num_bytes > (int)sizeof(uint64_t)))
+ return 0;
+ else {
+ const uint8_t * xp = (const uint8_t *)p + (num_bytes - 1);
+ uint64_t res = *xp;
+
+ for (--xp; num_bytes > 1; --xp, --num_bytes)
+ res = (res << 8) | *xp;
+ return res;
+ }
}
/* Since cdb and parameter blocks are often memset to zero before these
* unaligned function partially fill them, then check for a val of zero
- * and ignore if it is with these variants. */
+ * and ignore if it is with these variants. First big endian, then little */
+static inline void sg_nz_put_unaligned_be16(uint16_t val, void *p)
+{
+ if (val)
+ __put_unaligned_be16(val, (uint8_t *)p);
+}
+
+static inline void sg_nz_put_unaligned_be24(uint32_t val, void *p)
+{
+ if (val) {
+ ((uint8_t *)p)[0] = (val >> 16) & 0xff;
+ ((uint8_t *)p)[1] = (val >> 8) & 0xff;
+ ((uint8_t *)p)[2] = val & 0xff;
+ }
+}
+
+static inline void sg_nz_put_unaligned_be32(uint32_t val, void *p)
+{
+ if (val)
+ __put_unaligned_be32(val, (uint8_t *)p);
+}
+
+static inline void sg_nz_put_unaligned_be64(uint64_t val, void *p)
+{
+ if (val)
+ __put_unaligned_be64(val, (uint8_t *)p);
+}
+
static inline void sg_nz_put_unaligned_le16(uint16_t val, void *p)
{
if (val)
@@ -318,6 +543,7 @@ static inline void sg_nz_put_unaligned_le64(uint64_t val, void *p)
__put_unaligned_le64(val, (uint8_t *)p);
}
+
#ifdef __cplusplus
}
#endif