@@ -573,6 +573,32 @@ namespace xsimd
573
573
return vld1q_f32 (src);
574
574
}
575
575
576
+ /* batch bool version */
577
+ template <class T , class A , detail::enable_sized_t <T, 1 > = 0 >
578
+ XSIMD_INLINE batch_bool<T, A> load_unaligned (bool const * mem, batch_bool<T, A>, requires_arch<neon>) noexcept
579
+ {
580
+ auto vmem = load_unaligned<A>((unsigned char const *)mem, convert<unsigned char > {}, A {});
581
+ return bitwise_cast<T>(0 - vmem);
582
+ }
583
+ template <class T , class A , detail::enable_sized_t <T, 1 > = 0 >
584
+ XSIMD_INLINE batch_bool<T, A> load_aligned (bool const * mem, batch_bool<T, A> t, requires_arch<neon> r) noexcept
585
+ {
586
+ return load_unaligned (mem, t, r);
587
+ }
588
+
589
+ template <class T , class A , detail::enable_sized_t <T, 2 > = 0 >
590
+ XSIMD_INLINE batch_bool<T, A> load_unaligned (bool const * mem, batch_bool<T, A>, requires_arch<neon>) noexcept
591
+ {
592
+ auto vmem = vmovl_u8 (vld1_u8 ((unsigned char const *)mem));
593
+ return bitwise_cast<T>(0 - vmem);
594
+ }
595
+
596
+ template <class T , class A , detail::enable_sized_t <T, 2 > = 0 >
597
+ XSIMD_INLINE batch_bool<T, A> load_aligned (bool const * mem, batch_bool<T, A> t, requires_arch<neon> r) noexcept
598
+ {
599
+ return load_unaligned (mem, t, r);
600
+ }
601
+
576
602
/* ********
577
603
* store *
578
604
*********/
0 commit comments