use crate::{f32::math, neon::*, BVec4, BVec4A, Vec2, Vec3, Vec3A};
use core::fmt;
use core::iter::{Product, Sum};
use core::{f32, ops::*};
use core::arch::aarch64::*;
#[repr(C)]
union UnionCast {
a: [f32; 4],
v: Vec4,
}
#[inline(always)]
#[must_use]
pub const fn vec4(x: f32, y: f32, z: f32, w: f32) -> Vec4 {
Vec4::new(x, y, z, w)
}
#[derive(Clone, Copy)]
#[repr(transparent)]
pub struct Vec4(pub(crate) float32x4_t);
impl Vec4 {
pub const ZERO: Self = Self::splat(0.0);
pub const ONE: Self = Self::splat(1.0);
pub const NEG_ONE: Self = Self::splat(-1.0);
pub const MIN: Self = Self::splat(f32::MIN);
pub const MAX: Self = Self::splat(f32::MAX);
pub const NAN: Self = Self::splat(f32::NAN);
pub const INFINITY: Self = Self::splat(f32::INFINITY);
pub const NEG_INFINITY: Self = Self::splat(f32::NEG_INFINITY);
pub const X: Self = Self::new(1.0, 0.0, 0.0, 0.0);
pub const Y: Self = Self::new(0.0, 1.0, 0.0, 0.0);
pub const Z: Self = Self::new(0.0, 0.0, 1.0, 0.0);
pub const W: Self = Self::new(0.0, 0.0, 0.0, 1.0);
pub const NEG_X: Self = Self::new(-1.0, 0.0, 0.0, 0.0);
pub const NEG_Y: Self = Self::new(0.0, -1.0, 0.0, 0.0);
pub const NEG_Z: Self = Self::new(0.0, 0.0, -1.0, 0.0);
pub const NEG_W: Self = Self::new(0.0, 0.0, 0.0, -1.0);
pub const AXES: [Self; 4] = [Self::X, Self::Y, Self::Z, Self::W];
pub const USES_CORE_SIMD: bool = false;
pub const USES_NEON: bool = true;
pub const USES_SCALAR_MATH: bool = false;
pub const USES_SSE2: bool = false;
pub const USES_WASM32_SIMD: bool = false;
#[inline(always)]
#[must_use]
pub const fn new(x: f32, y: f32, z: f32, w: f32) -> Self {
unsafe { UnionCast { a: [x, y, z, w] }.v }
}
#[inline]
#[must_use]
pub const fn splat(v: f32) -> Self {
unsafe { UnionCast { a: [v; 4] }.v }
}
#[inline]
#[must_use]
pub fn map<F>(self, f: F) -> Self
where
F: Fn(f32) -> f32,
{
Self::new(f(self.x), f(self.y), f(self.z), f(self.w))
}
#[inline]
#[must_use]
pub fn select(mask: BVec4A, if_true: Self, if_false: Self) -> Self {
Self(unsafe { vbslq_f32(mask.0, if_true.0, if_false.0) })
}
#[inline]
#[must_use]
pub const fn from_array(a: [f32; 4]) -> Self {
Self::new(a[0], a[1], a[2], a[3])
}
#[inline]
#[must_use]
pub const fn to_array(&self) -> [f32; 4] {
unsafe { *(self as *const Vec4 as *const [f32; 4]) }
}
#[inline]
#[must_use]
pub const fn from_slice(slice: &[f32]) -> Self {
assert!(slice.len() >= 4);
Self::new(slice[0], slice[1], slice[2], slice[3])
}
#[inline]
pub fn write_to_slice(self, slice: &mut [f32]) {
assert!(slice.len() >= 4);
unsafe {
vst1q_f32(slice.as_mut_ptr(), self.0);
}
}
#[inline]
#[must_use]
pub fn truncate(self) -> Vec3 {
use crate::swizzles::Vec4Swizzles;
self.xyz()
}
#[inline]
#[must_use]
pub fn with_x(mut self, x: f32) -> Self {
self.x = x;
self
}
#[inline]
#[must_use]
pub fn with_y(mut self, y: f32) -> Self {
self.y = y;
self
}
#[inline]
#[must_use]
pub fn with_z(mut self, z: f32) -> Self {
self.z = z;
self
}
#[inline]
#[must_use]
pub fn with_w(mut self, w: f32) -> Self {
self.w = w;
self
}
#[inline]
#[must_use]
pub fn dot(self, rhs: Self) -> f32 {
unsafe { dot4(self.0, rhs.0) }
}
#[inline]
#[must_use]
pub fn dot_into_vec(self, rhs: Self) -> Self {
Self(unsafe { dot4_into_f32x4(self.0, rhs.0) })
}
#[inline]
#[must_use]
pub fn min(self, rhs: Self) -> Self {
Self(unsafe { vminq_f32(self.0, rhs.0) })
}
#[inline]
#[must_use]
pub fn max(self, rhs: Self) -> Self {
Self(unsafe { vmaxq_f32(self.0, rhs.0) })
}
#[inline]
#[must_use]
pub fn clamp(self, min: Self, max: Self) -> Self {
glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
self.max(min).min(max)
}
#[inline]
#[must_use]
pub fn min_element(self) -> f32 {
unsafe { vminnmvq_f32(self.0) }
}
#[inline]
#[must_use]
pub fn max_element(self) -> f32 {
unsafe { vmaxnmvq_f32(self.0) }
}
#[doc(alias = "argmin")]
#[inline]
#[must_use]
pub fn min_position(self) -> usize {
let mut min = self.x;
let mut index = 0;
if self.y < min {
min = self.y;
index = 1;
}
if self.z < min {
min = self.z;
index = 2;
}
if self.w < min {
index = 3;
}
index
}
#[doc(alias = "argmax")]
#[inline]
#[must_use]
pub fn max_position(self) -> usize {
let mut max = self.x;
let mut index = 0;
if self.y > max {
max = self.y;
index = 1;
}
if self.z > max {
max = self.z;
index = 2;
}
if self.w > max {
index = 3;
}
index
}
#[inline]
#[must_use]
pub fn element_sum(self) -> f32 {
unsafe { vaddvq_f32(self.0) }
}
#[inline]
#[must_use]
pub fn element_product(self) -> f32 {
unsafe {
let s = vmuls_laneq_f32(vgetq_lane_f32(self.0, 0), self.0, 1);
let s = vmuls_laneq_f32(s, self.0, 2);
vmuls_laneq_f32(s, self.0, 3)
}
}
#[inline]
#[must_use]
pub fn cmpeq(self, rhs: Self) -> BVec4A {
BVec4A(unsafe { vceqq_f32(self.0, rhs.0) })
}
#[inline]
#[must_use]
pub fn cmpne(self, rhs: Self) -> BVec4A {
BVec4A(unsafe { vmvnq_u32(vceqq_f32(self.0, rhs.0)) })
}
#[inline]
#[must_use]
pub fn cmpge(self, rhs: Self) -> BVec4A {
BVec4A(unsafe { vcgeq_f32(self.0, rhs.0) })
}
#[inline]
#[must_use]
pub fn cmpgt(self, rhs: Self) -> BVec4A {
BVec4A(unsafe { vcgtq_f32(self.0, rhs.0) })
}
#[inline]
#[must_use]
pub fn cmple(self, rhs: Self) -> BVec4A {
BVec4A(unsafe { vcleq_f32(self.0, rhs.0) })
}
#[inline]
#[must_use]
pub fn cmplt(self, rhs: Self) -> BVec4A {
BVec4A(unsafe { vcltq_f32(self.0, rhs.0) })
}
#[inline]
#[must_use]
pub fn abs(self) -> Self {
Self(unsafe { vabsq_f32(self.0) })
}
#[inline]
#[must_use]
pub fn signum(self) -> Self {
let result = Self(unsafe {
vreinterpretq_f32_u32(vorrq_u32(
vandq_u32(
vreinterpretq_u32_f32(self.0),
vreinterpretq_u32_f32(Self::NEG_ONE.0),
),
vreinterpretq_u32_f32(Self::ONE.0),
))
});
let mask = self.is_nan_mask();
Self::select(mask, self, result)
}
#[inline]
#[must_use]
pub fn copysign(self, rhs: Self) -> Self {
let mask = Self::splat(-0.0);
Self(unsafe {
vreinterpretq_f32_u32(vorrq_u32(
vandq_u32(vreinterpretq_u32_f32(rhs.0), vreinterpretq_u32_f32(mask.0)),
vandq_u32(
vreinterpretq_u32_f32(self.0),
vmvnq_u32(vreinterpretq_u32_f32(mask.0)),
),
))
})
}
#[inline]
#[must_use]
pub fn is_negative_bitmask(self) -> u32 {
unsafe {
let nmask = vreinterpretq_u32_f32(vdupq_n_f32(-0.0));
let m = vandq_u32(vreinterpretq_u32_f32(self.0), nmask);
let x = vgetq_lane_u32(m, 0) >> 31;
let y = vgetq_lane_u32(m, 1) >> 31;
let z = vgetq_lane_u32(m, 2) >> 31;
let w = vgetq_lane_u32(m, 3) >> 31;
x | y << 1 | z << 2 | w << 3
}
}
#[inline]
#[must_use]
pub fn is_finite(self) -> bool {
self.is_finite_mask().all()
}
pub fn is_finite_mask(self) -> BVec4A {
BVec4A(unsafe { vcltq_f32(vabsq_f32(self.0), Self::INFINITY.0) })
}
#[inline]
#[must_use]
pub fn is_nan(self) -> bool {
self.is_nan_mask().any()
}
#[inline]
#[must_use]
pub fn is_nan_mask(self) -> BVec4A {
BVec4A(unsafe { vmvnq_u32(vceqq_f32(self.0, self.0)) })
}
#[doc(alias = "magnitude")]
#[inline]
#[must_use]
pub fn length(self) -> f32 {
math::sqrt(self.dot(self))
}
#[doc(alias = "magnitude2")]
#[inline]
#[must_use]
pub fn length_squared(self) -> f32 {
self.dot(self)
}
#[inline]
#[must_use]
pub fn length_recip(self) -> f32 {
self.length().recip()
}
#[inline]
#[must_use]
pub fn distance(self, rhs: Self) -> f32 {
(self - rhs).length()
}
#[inline]
#[must_use]
pub fn distance_squared(self, rhs: Self) -> f32 {
(self - rhs).length_squared()
}
#[inline]
#[must_use]
pub fn div_euclid(self, rhs: Self) -> Self {
Self::new(
math::div_euclid(self.x, rhs.x),
math::div_euclid(self.y, rhs.y),
math::div_euclid(self.z, rhs.z),
math::div_euclid(self.w, rhs.w),
)
}
#[inline]
#[must_use]
pub fn rem_euclid(self, rhs: Self) -> Self {
Self::new(
math::rem_euclid(self.x, rhs.x),
math::rem_euclid(self.y, rhs.y),
math::rem_euclid(self.z, rhs.z),
math::rem_euclid(self.w, rhs.w),
)
}
#[inline]
#[must_use]
pub fn normalize(self) -> Self {
#[allow(clippy::let_and_return)]
let normalized = self.mul(self.length_recip());
glam_assert!(normalized.is_finite());
normalized
}
#[inline]
#[must_use]
pub fn try_normalize(self) -> Option<Self> {
let rcp = self.length_recip();
if rcp.is_finite() && rcp > 0.0 {
Some(self * rcp)
} else {
None
}
}
#[inline]
#[must_use]
pub fn normalize_or(self, fallback: Self) -> Self {
let rcp = self.length_recip();
if rcp.is_finite() && rcp > 0.0 {
self * rcp
} else {
fallback
}
}
#[inline]
#[must_use]
pub fn normalize_or_zero(self) -> Self {
self.normalize_or(Self::ZERO)
}
#[inline]
#[must_use]
pub fn normalize_and_length(self) -> (Self, f32) {
let length = self.length();
let rcp = 1.0 / length;
if rcp.is_finite() && rcp > 0.0 {
(self * rcp, length)
} else {
(Self::X, 0.0)
}
}
#[inline]
#[must_use]
pub fn is_normalized(self) -> bool {
math::abs(self.length_squared() - 1.0) <= 2e-4
}
#[inline]
#[must_use]
pub fn project_onto(self, rhs: Self) -> Self {
let other_len_sq_rcp = rhs.dot(rhs).recip();
glam_assert!(other_len_sq_rcp.is_finite());
rhs * self.dot(rhs) * other_len_sq_rcp
}
#[doc(alias("plane"))]
#[inline]
#[must_use]
pub fn reject_from(self, rhs: Self) -> Self {
self - self.project_onto(rhs)
}
#[inline]
#[must_use]
pub fn project_onto_normalized(self, rhs: Self) -> Self {
glam_assert!(rhs.is_normalized());
rhs * self.dot(rhs)
}
#[doc(alias("plane"))]
#[inline]
#[must_use]
pub fn reject_from_normalized(self, rhs: Self) -> Self {
self - self.project_onto_normalized(rhs)
}
#[inline]
#[must_use]
pub fn round(self) -> Self {
Self(unsafe { vrndnq_f32(self.0) })
}
#[inline]
#[must_use]
pub fn floor(self) -> Self {
Self(unsafe { vrndmq_f32(self.0) })
}
#[inline]
#[must_use]
pub fn ceil(self) -> Self {
Self(unsafe { vrndpq_f32(self.0) })
}
#[inline]
#[must_use]
pub fn trunc(self) -> Self {
Self(unsafe { vrndq_f32(self.0) })
}
#[inline]
#[must_use]
pub fn fract(self) -> Self {
self - self.trunc()
}
#[inline]
#[must_use]
pub fn fract_gl(self) -> Self {
self - self.floor()
}
#[inline]
#[must_use]
pub fn exp(self) -> Self {
Self::new(
math::exp(self.x),
math::exp(self.y),
math::exp(self.z),
math::exp(self.w),
)
}
#[inline]
#[must_use]
pub fn powf(self, n: f32) -> Self {
Self::new(
math::powf(self.x, n),
math::powf(self.y, n),
math::powf(self.z, n),
math::powf(self.w, n),
)
}
#[inline]
#[must_use]
pub fn recip(self) -> Self {
Self(unsafe { vdivq_f32(Self::ONE.0, self.0) })
}
#[doc(alias = "mix")]
#[inline]
#[must_use]
pub fn lerp(self, rhs: Self, s: f32) -> Self {
self * (1.0 - s) + rhs * s
}
#[inline]
#[must_use]
pub fn move_towards(&self, rhs: Self, d: f32) -> Self {
let a = rhs - *self;
let len = a.length();
if len <= d || len <= 1e-4 {
return rhs;
}
*self + a / len * d
}
#[inline]
pub fn midpoint(self, rhs: Self) -> Self {
(self + rhs) * 0.5
}
#[inline]
#[must_use]
pub fn abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool {
self.sub(rhs).abs().cmple(Self::splat(max_abs_diff)).all()
}
#[inline]
#[must_use]
pub fn clamp_length(self, min: f32, max: f32) -> Self {
glam_assert!(0.0 <= min);
glam_assert!(min <= max);
let length_sq = self.length_squared();
if length_sq < min * min {
min * (self / math::sqrt(length_sq))
} else if length_sq > max * max {
max * (self / math::sqrt(length_sq))
} else {
self
}
}
#[inline]
#[must_use]
pub fn clamp_length_max(self, max: f32) -> Self {
glam_assert!(0.0 <= max);
let length_sq = self.length_squared();
if length_sq > max * max {
max * (self / math::sqrt(length_sq))
} else {
self
}
}
#[inline]
#[must_use]
pub fn clamp_length_min(self, min: f32) -> Self {
glam_assert!(0.0 <= min);
let length_sq = self.length_squared();
if length_sq < min * min {
min * (self / math::sqrt(length_sq))
} else {
self
}
}
#[inline]
#[must_use]
pub fn mul_add(self, a: Self, b: Self) -> Self {
Self(unsafe { vfmaq_f32(b.0, self.0, a.0) })
}
#[inline]
#[must_use]
pub fn reflect(self, normal: Self) -> Self {
glam_assert!(normal.is_normalized());
self - 2.0 * self.dot(normal) * normal
}
#[inline]
#[must_use]
pub fn refract(self, normal: Self, eta: f32) -> Self {
glam_assert!(self.is_normalized());
glam_assert!(normal.is_normalized());
let n_dot_i = normal.dot(self);
let k = 1.0 - eta * eta * (1.0 - n_dot_i * n_dot_i);
if k >= 0.0 {
eta * self - (eta * n_dot_i + math::sqrt(k)) * normal
} else {
Self::ZERO
}
}
#[inline]
#[must_use]
pub fn as_dvec4(&self) -> crate::DVec4 {
crate::DVec4::new(self.x as f64, self.y as f64, self.z as f64, self.w as f64)
}
#[inline]
#[must_use]
pub fn as_i8vec4(&self) -> crate::I8Vec4 {
crate::I8Vec4::new(self.x as i8, self.y as i8, self.z as i8, self.w as i8)
}
#[inline]
#[must_use]
pub fn as_u8vec4(&self) -> crate::U8Vec4 {
crate::U8Vec4::new(self.x as u8, self.y as u8, self.z as u8, self.w as u8)
}
#[inline]
#[must_use]
pub fn as_i16vec4(&self) -> crate::I16Vec4 {
crate::I16Vec4::new(self.x as i16, self.y as i16, self.z as i16, self.w as i16)
}
#[inline]
#[must_use]
pub fn as_u16vec4(&self) -> crate::U16Vec4 {
crate::U16Vec4::new(self.x as u16, self.y as u16, self.z as u16, self.w as u16)
}
#[inline]
#[must_use]
pub fn as_ivec4(&self) -> crate::IVec4 {
crate::IVec4::new(self.x as i32, self.y as i32, self.z as i32, self.w as i32)
}
#[inline]
#[must_use]
pub fn as_uvec4(&self) -> crate::UVec4 {
crate::UVec4::new(self.x as u32, self.y as u32, self.z as u32, self.w as u32)
}
#[inline]
#[must_use]
pub fn as_i64vec4(&self) -> crate::I64Vec4 {
crate::I64Vec4::new(self.x as i64, self.y as i64, self.z as i64, self.w as i64)
}
#[inline]
#[must_use]
pub fn as_u64vec4(&self) -> crate::U64Vec4 {
crate::U64Vec4::new(self.x as u64, self.y as u64, self.z as u64, self.w as u64)
}
#[inline]
#[must_use]
pub fn as_usizevec4(&self) -> crate::USizeVec4 {
crate::USizeVec4::new(
self.x as usize,
self.y as usize,
self.z as usize,
self.w as usize,
)
}
}
impl Default for Vec4 {
#[inline(always)]
fn default() -> Self {
Self::ZERO
}
}
impl PartialEq for Vec4 {
#[inline]
fn eq(&self, rhs: &Self) -> bool {
self.cmpeq(*rhs).all()
}
}
impl Div<Vec4> for Vec4 {
type Output = Self;
#[inline]
fn div(self, rhs: Self) -> Self {
Self(unsafe { vdivq_f32(self.0, rhs.0) })
}
}
impl Div<&Vec4> for Vec4 {
type Output = Vec4;
#[inline]
fn div(self, rhs: &Vec4) -> Vec4 {
self.div(*rhs)
}
}
impl Div<&Vec4> for &Vec4 {
type Output = Vec4;
#[inline]
fn div(self, rhs: &Vec4) -> Vec4 {
(*self).div(*rhs)
}
}
impl Div<Vec4> for &Vec4 {
type Output = Vec4;
#[inline]
fn div(self, rhs: Vec4) -> Vec4 {
(*self).div(rhs)
}
}
impl DivAssign<Vec4> for Vec4 {
#[inline]
fn div_assign(&mut self, rhs: Self) {
self.0 = unsafe { vdivq_f32(self.0, rhs.0) };
}
}
impl DivAssign<&Vec4> for Vec4 {
#[inline]
fn div_assign(&mut self, rhs: &Vec4) {
self.div_assign(*rhs)
}
}
impl Div<f32> for Vec4 {
type Output = Self;
#[inline]
fn div(self, rhs: f32) -> Self {
Self(unsafe { vdivq_f32(self.0, vld1q_dup_f32(&rhs)) })
}
}
impl Div<&f32> for Vec4 {
type Output = Vec4;
#[inline]
fn div(self, rhs: &f32) -> Vec4 {
self.div(*rhs)
}
}
impl Div<&f32> for &Vec4 {
type Output = Vec4;
#[inline]
fn div(self, rhs: &f32) -> Vec4 {
(*self).div(*rhs)
}
}
impl Div<f32> for &Vec4 {
type Output = Vec4;
#[inline]
fn div(self, rhs: f32) -> Vec4 {
(*self).div(rhs)
}
}
impl DivAssign<f32> for Vec4 {
#[inline]
fn div_assign(&mut self, rhs: f32) {
self.0 = unsafe { vdivq_f32(self.0, vld1q_dup_f32(&rhs)) };
}
}
impl DivAssign<&f32> for Vec4 {
#[inline]
fn div_assign(&mut self, rhs: &f32) {
self.div_assign(*rhs)
}
}
impl Div<Vec4> for f32 {
type Output = Vec4;
#[inline]
fn div(self, rhs: Vec4) -> Vec4 {
Vec4(unsafe { vdivq_f32(vld1q_dup_f32(&self), rhs.0) })
}
}
impl Div<&Vec4> for f32 {
type Output = Vec4;
#[inline]
fn div(self, rhs: &Vec4) -> Vec4 {
self.div(*rhs)
}
}
impl Div<&Vec4> for &f32 {
type Output = Vec4;
#[inline]
fn div(self, rhs: &Vec4) -> Vec4 {
(*self).div(*rhs)
}
}
impl Div<Vec4> for &f32 {
type Output = Vec4;
#[inline]
fn div(self, rhs: Vec4) -> Vec4 {
(*self).div(rhs)
}
}
impl Mul<Vec4> for Vec4 {
type Output = Self;
#[inline]
fn mul(self, rhs: Self) -> Self {
Self(unsafe { vmulq_f32(self.0, rhs.0) })
}
}
impl Mul<&Vec4> for Vec4 {
type Output = Vec4;
#[inline]
fn mul(self, rhs: &Vec4) -> Vec4 {
self.mul(*rhs)
}
}
impl Mul<&Vec4> for &Vec4 {
type Output = Vec4;
#[inline]
fn mul(self, rhs: &Vec4) -> Vec4 {
(*self).mul(*rhs)
}
}
impl Mul<Vec4> for &Vec4 {
type Output = Vec4;
#[inline]
fn mul(self, rhs: Vec4) -> Vec4 {
(*self).mul(rhs)
}
}
impl MulAssign<Vec4> for Vec4 {
#[inline]
fn mul_assign(&mut self, rhs: Self) {
self.0 = unsafe { vmulq_f32(self.0, rhs.0) };
}
}
impl MulAssign<&Vec4> for Vec4 {
#[inline]
fn mul_assign(&mut self, rhs: &Vec4) {
self.mul_assign(*rhs)
}
}
impl Mul<f32> for Vec4 {
type Output = Self;
#[inline]
fn mul(self, rhs: f32) -> Self {
Self(unsafe { vmulq_n_f32(self.0, rhs) })
}
}
impl Mul<&f32> for Vec4 {
type Output = Vec4;
#[inline]
fn mul(self, rhs: &f32) -> Vec4 {
self.mul(*rhs)
}
}
impl Mul<&f32> for &Vec4 {
type Output = Vec4;
#[inline]
fn mul(self, rhs: &f32) -> Vec4 {
(*self).mul(*rhs)
}
}
impl Mul<f32> for &Vec4 {
type Output = Vec4;
#[inline]
fn mul(self, rhs: f32) -> Vec4 {
(*self).mul(rhs)
}
}
impl MulAssign<f32> for Vec4 {
#[inline]
fn mul_assign(&mut self, rhs: f32) {
self.0 = unsafe { vmulq_n_f32(self.0, rhs) };
}
}
impl MulAssign<&f32> for Vec4 {
#[inline]
fn mul_assign(&mut self, rhs: &f32) {
self.mul_assign(*rhs)
}
}
impl Mul<Vec4> for f32 {
type Output = Vec4;
#[inline]
fn mul(self, rhs: Vec4) -> Vec4 {
Vec4(unsafe { vmulq_n_f32(rhs.0, self) })
}
}
impl Mul<&Vec4> for f32 {
type Output = Vec4;
#[inline]
fn mul(self, rhs: &Vec4) -> Vec4 {
self.mul(*rhs)
}
}
impl Mul<&Vec4> for &f32 {
type Output = Vec4;
#[inline]
fn mul(self, rhs: &Vec4) -> Vec4 {
(*self).mul(*rhs)
}
}
impl Mul<Vec4> for &f32 {
type Output = Vec4;
#[inline]
fn mul(self, rhs: Vec4) -> Vec4 {
(*self).mul(rhs)
}
}
impl Add<Vec4> for Vec4 {
type Output = Self;
#[inline]
fn add(self, rhs: Self) -> Self {
Self(unsafe { vaddq_f32(self.0, rhs.0) })
}
}
impl Add<&Vec4> for Vec4 {
type Output = Vec4;
#[inline]
fn add(self, rhs: &Vec4) -> Vec4 {
self.add(*rhs)
}
}
impl Add<&Vec4> for &Vec4 {
type Output = Vec4;
#[inline]
fn add(self, rhs: &Vec4) -> Vec4 {
(*self).add(*rhs)
}
}
impl Add<Vec4> for &Vec4 {
type Output = Vec4;
#[inline]
fn add(self, rhs: Vec4) -> Vec4 {
(*self).add(rhs)
}
}
impl AddAssign<Vec4> for Vec4 {
#[inline]
fn add_assign(&mut self, rhs: Self) {
self.0 = unsafe { vaddq_f32(self.0, rhs.0) };
}
}
impl AddAssign<&Vec4> for Vec4 {
#[inline]
fn add_assign(&mut self, rhs: &Vec4) {
self.add_assign(*rhs)
}
}
impl Add<f32> for Vec4 {
type Output = Self;
#[inline]
fn add(self, rhs: f32) -> Self {
Self(unsafe { vaddq_f32(self.0, vld1q_dup_f32(&rhs)) })
}
}
impl Add<&f32> for Vec4 {
type Output = Vec4;
#[inline]
fn add(self, rhs: &f32) -> Vec4 {
self.add(*rhs)
}
}
impl Add<&f32> for &Vec4 {
type Output = Vec4;
#[inline]
fn add(self, rhs: &f32) -> Vec4 {
(*self).add(*rhs)
}
}
impl Add<f32> for &Vec4 {
type Output = Vec4;
#[inline]
fn add(self, rhs: f32) -> Vec4 {
(*self).add(rhs)
}
}
impl AddAssign<f32> for Vec4 {
#[inline]
fn add_assign(&mut self, rhs: f32) {
self.0 = unsafe { vaddq_f32(self.0, vld1q_dup_f32(&rhs)) };
}
}
impl AddAssign<&f32> for Vec4 {
#[inline]
fn add_assign(&mut self, rhs: &f32) {
self.add_assign(*rhs)
}
}
impl Add<Vec4> for f32 {
type Output = Vec4;
#[inline]
fn add(self, rhs: Vec4) -> Vec4 {
Vec4(unsafe { vaddq_f32(vld1q_dup_f32(&self), rhs.0) })
}
}
impl Add<&Vec4> for f32 {
type Output = Vec4;
#[inline]
fn add(self, rhs: &Vec4) -> Vec4 {
self.add(*rhs)
}
}
impl Add<&Vec4> for &f32 {
type Output = Vec4;
#[inline]
fn add(self, rhs: &Vec4) -> Vec4 {
(*self).add(*rhs)
}
}
impl Add<Vec4> for &f32 {
type Output = Vec4;
#[inline]
fn add(self, rhs: Vec4) -> Vec4 {
(*self).add(rhs)
}
}
impl Sub<Vec4> for Vec4 {
type Output = Self;
#[inline]
fn sub(self, rhs: Self) -> Self {
Self(unsafe { vsubq_f32(self.0, rhs.0) })
}
}
impl Sub<&Vec4> for Vec4 {
type Output = Vec4;
#[inline]
fn sub(self, rhs: &Vec4) -> Vec4 {
self.sub(*rhs)
}
}
impl Sub<&Vec4> for &Vec4 {
type Output = Vec4;
#[inline]
fn sub(self, rhs: &Vec4) -> Vec4 {
(*self).sub(*rhs)
}
}
impl Sub<Vec4> for &Vec4 {
type Output = Vec4;
#[inline]
fn sub(self, rhs: Vec4) -> Vec4 {
(*self).sub(rhs)
}
}
impl SubAssign<Vec4> for Vec4 {
#[inline]
fn sub_assign(&mut self, rhs: Vec4) {
self.0 = unsafe { vsubq_f32(self.0, rhs.0) };
}
}
impl SubAssign<&Vec4> for Vec4 {
#[inline]
fn sub_assign(&mut self, rhs: &Vec4) {
self.sub_assign(*rhs)
}
}
impl Sub<f32> for Vec4 {
type Output = Self;
#[inline]
fn sub(self, rhs: f32) -> Self {
Self(unsafe { vsubq_f32(self.0, vld1q_dup_f32(&rhs)) })
}
}
impl Sub<&f32> for Vec4 {
type Output = Vec4;
#[inline]
fn sub(self, rhs: &f32) -> Vec4 {
self.sub(*rhs)
}
}
impl Sub<&f32> for &Vec4 {
type Output = Vec4;
#[inline]
fn sub(self, rhs: &f32) -> Vec4 {
(*self).sub(*rhs)
}
}
impl Sub<f32> for &Vec4 {
type Output = Vec4;
#[inline]
fn sub(self, rhs: f32) -> Vec4 {
(*self).sub(rhs)
}
}
impl SubAssign<f32> for Vec4 {
#[inline]
fn sub_assign(&mut self, rhs: f32) {
self.0 = unsafe { vsubq_f32(self.0, vld1q_dup_f32(&rhs)) };
}
}
impl SubAssign<&f32> for Vec4 {
#[inline]
fn sub_assign(&mut self, rhs: &f32) {
self.sub_assign(*rhs)
}
}
impl Sub<Vec4> for f32 {
type Output = Vec4;
#[inline]
fn sub(self, rhs: Vec4) -> Vec4 {
Vec4(unsafe { vsubq_f32(vld1q_dup_f32(&self), rhs.0) })
}
}
impl Sub<&Vec4> for f32 {
type Output = Vec4;
#[inline]
fn sub(self, rhs: &Vec4) -> Vec4 {
self.sub(*rhs)
}
}
impl Sub<&Vec4> for &f32 {
type Output = Vec4;
#[inline]
fn sub(self, rhs: &Vec4) -> Vec4 {
(*self).sub(*rhs)
}
}
impl Sub<Vec4> for &f32 {
type Output = Vec4;
#[inline]
fn sub(self, rhs: Vec4) -> Vec4 {
(*self).sub(rhs)
}
}
impl Rem<Vec4> for Vec4 {
type Output = Self;
#[inline]
fn rem(self, rhs: Self) -> Self {
unsafe {
let n = vrndmq_f32(vdivq_f32(self.0, rhs.0));
Self(vsubq_f32(self.0, vmulq_f32(n, rhs.0)))
}
}
}
impl Rem<&Vec4> for Vec4 {
type Output = Vec4;
#[inline]
fn rem(self, rhs: &Vec4) -> Vec4 {
self.rem(*rhs)
}
}
impl Rem<&Vec4> for &Vec4 {
type Output = Vec4;
#[inline]
fn rem(self, rhs: &Vec4) -> Vec4 {
(*self).rem(*rhs)
}
}
impl Rem<Vec4> for &Vec4 {
type Output = Vec4;
#[inline]
fn rem(self, rhs: Vec4) -> Vec4 {
(*self).rem(rhs)
}
}
impl RemAssign<Vec4> for Vec4 {
#[inline]
fn rem_assign(&mut self, rhs: Self) {
*self = self.rem(rhs);
}
}
impl RemAssign<&Vec4> for Vec4 {
#[inline]
fn rem_assign(&mut self, rhs: &Vec4) {
self.rem_assign(*rhs)
}
}
impl Rem<f32> for Vec4 {
type Output = Self;
#[inline]
fn rem(self, rhs: f32) -> Self {
self.rem(Self::splat(rhs))
}
}
impl Rem<&f32> for Vec4 {
type Output = Vec4;
#[inline]
fn rem(self, rhs: &f32) -> Vec4 {
self.rem(*rhs)
}
}
impl Rem<&f32> for &Vec4 {
type Output = Vec4;
#[inline]
fn rem(self, rhs: &f32) -> Vec4 {
(*self).rem(*rhs)
}
}
impl Rem<f32> for &Vec4 {
type Output = Vec4;
#[inline]
fn rem(self, rhs: f32) -> Vec4 {
(*self).rem(rhs)
}
}
impl RemAssign<f32> for Vec4 {
#[inline]
fn rem_assign(&mut self, rhs: f32) {
*self = self.rem(Self::splat(rhs));
}
}
impl RemAssign<&f32> for Vec4 {
#[inline]
fn rem_assign(&mut self, rhs: &f32) {
self.rem_assign(*rhs)
}
}
impl Rem<Vec4> for f32 {
type Output = Vec4;
#[inline]
fn rem(self, rhs: Vec4) -> Vec4 {
Vec4::splat(self).rem(rhs)
}
}
impl Rem<&Vec4> for f32 {
type Output = Vec4;
#[inline]
fn rem(self, rhs: &Vec4) -> Vec4 {
self.rem(*rhs)
}
}
impl Rem<&Vec4> for &f32 {
type Output = Vec4;
#[inline]
fn rem(self, rhs: &Vec4) -> Vec4 {
(*self).rem(*rhs)
}
}
impl Rem<Vec4> for &f32 {
type Output = Vec4;
#[inline]
fn rem(self, rhs: Vec4) -> Vec4 {
(*self).rem(rhs)
}
}
#[cfg(not(target_arch = "spirv"))]
impl AsRef<[f32; 4]> for Vec4 {
#[inline]
fn as_ref(&self) -> &[f32; 4] {
unsafe { &*(self as *const Vec4 as *const [f32; 4]) }
}
}
#[cfg(not(target_arch = "spirv"))]
impl AsMut<[f32; 4]> for Vec4 {
#[inline]
fn as_mut(&mut self) -> &mut [f32; 4] {
unsafe { &mut *(self as *mut Vec4 as *mut [f32; 4]) }
}
}
impl Sum for Vec4 {
#[inline]
fn sum<I>(iter: I) -> Self
where
I: Iterator<Item = Self>,
{
iter.fold(Self::ZERO, Self::add)
}
}
impl<'a> Sum<&'a Self> for Vec4 {
#[inline]
fn sum<I>(iter: I) -> Self
where
I: Iterator<Item = &'a Self>,
{
iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
}
}
impl Product for Vec4 {
#[inline]
fn product<I>(iter: I) -> Self
where
I: Iterator<Item = Self>,
{
iter.fold(Self::ONE, Self::mul)
}
}
impl<'a> Product<&'a Self> for Vec4 {
#[inline]
fn product<I>(iter: I) -> Self
where
I: Iterator<Item = &'a Self>,
{
iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
}
}
impl Neg for Vec4 {
type Output = Self;
#[inline]
fn neg(self) -> Self {
Self(unsafe { vnegq_f32(self.0) })
}
}
impl Neg for &Vec4 {
type Output = Vec4;
#[inline]
fn neg(self) -> Vec4 {
(*self).neg()
}
}
impl Index<usize> for Vec4 {
type Output = f32;
#[inline]
fn index(&self, index: usize) -> &Self::Output {
match index {
0 => &self.x,
1 => &self.y,
2 => &self.z,
3 => &self.w,
_ => panic!("index out of bounds"),
}
}
}
impl IndexMut<usize> for Vec4 {
#[inline]
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
match index {
0 => &mut self.x,
1 => &mut self.y,
2 => &mut self.z,
3 => &mut self.w,
_ => panic!("index out of bounds"),
}
}
}
impl fmt::Display for Vec4 {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if let Some(p) = f.precision() {
write!(
f,
"[{:.*}, {:.*}, {:.*}, {:.*}]",
p, self.x, p, self.y, p, self.z, p, self.w
)
} else {
write!(f, "[{}, {}, {}, {}]", self.x, self.y, self.z, self.w)
}
}
}
impl fmt::Debug for Vec4 {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_tuple(stringify!(Vec4))
.field(&self.x)
.field(&self.y)
.field(&self.z)
.field(&self.w)
.finish()
}
}
impl From<Vec4> for float32x4_t {
#[inline(always)]
fn from(t: Vec4) -> Self {
t.0
}
}
impl From<float32x4_t> for Vec4 {
#[inline(always)]
fn from(t: float32x4_t) -> Self {
Self(t)
}
}
impl From<[f32; 4]> for Vec4 {
#[inline]
fn from(a: [f32; 4]) -> Self {
Self(unsafe { vld1q_f32(a.as_ptr()) })
}
}
impl From<Vec4> for [f32; 4] {
#[inline]
fn from(v: Vec4) -> Self {
use crate::align16::Align16;
use core::mem::MaybeUninit;
let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
unsafe {
vst1q_f32(out.as_mut_ptr().cast(), v.0);
out.assume_init().0
}
}
}
impl From<(f32, f32, f32, f32)> for Vec4 {
#[inline]
fn from(t: (f32, f32, f32, f32)) -> Self {
Self::new(t.0, t.1, t.2, t.3)
}
}
impl From<Vec4> for (f32, f32, f32, f32) {
#[inline]
fn from(v: Vec4) -> Self {
use crate::align16::Align16;
use core::mem::MaybeUninit;
let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
unsafe {
vst1q_f32(out.as_mut_ptr().cast(), v.0);
out.assume_init().0
}
}
}
impl From<(Vec3A, f32)> for Vec4 {
#[inline]
fn from((v, w): (Vec3A, f32)) -> Self {
v.extend(w)
}
}
impl From<(f32, Vec3A)> for Vec4 {
#[inline]
fn from((x, v): (f32, Vec3A)) -> Self {
Self::new(x, v.x, v.y, v.z)
}
}
impl From<(Vec3, f32)> for Vec4 {
#[inline]
fn from((v, w): (Vec3, f32)) -> Self {
Self::new(v.x, v.y, v.z, w)
}
}
impl From<(f32, Vec3)> for Vec4 {
#[inline]
fn from((x, v): (f32, Vec3)) -> Self {
Self::new(x, v.x, v.y, v.z)
}
}
impl From<(Vec2, f32, f32)> for Vec4 {
#[inline]
fn from((v, z, w): (Vec2, f32, f32)) -> Self {
Self::new(v.x, v.y, z, w)
}
}
impl From<(Vec2, Vec2)> for Vec4 {
#[inline]
fn from((v, u): (Vec2, Vec2)) -> Self {
Self::new(v.x, v.y, u.x, u.y)
}
}
impl Deref for Vec4 {
type Target = crate::deref::Vec4<f32>;
#[inline]
fn deref(&self) -> &Self::Target {
unsafe { &*(self as *const Self).cast() }
}
}
impl DerefMut for Vec4 {
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *(self as *mut Self).cast() }
}
}
impl From<BVec4> for Vec4 {
#[inline]
fn from(v: BVec4) -> Self {
Self::new(
f32::from(v.x),
f32::from(v.y),
f32::from(v.z),
f32::from(v.w),
)
}
}
#[cfg(not(feature = "scalar-math"))]
impl From<BVec4A> for Vec4 {
#[inline]
fn from(v: BVec4A) -> Self {
let bool_array: [bool; 4] = v.into();
Self::new(
f32::from(bool_array[0]),
f32::from(bool_array[1]),
f32::from(bool_array[2]),
f32::from(bool_array[3]),
)
}
}