use crate::{coresimd::*, BVec3A, Vec2, Vec3, Vec4};
#[cfg(not(target_arch = "spirv"))]
use core::fmt;
use core::iter::{Product, Sum};
use core::{f32, ops::*};
use core::simd::*;
use std::simd::StdFloat;
#[cfg(feature = "libm")]
#[allow(unused_imports)]
use num_traits::Float;
#[inline(always)]
pub const fn vec3a(x: f32, y: f32, z: f32) -> Vec3A {
Vec3A::new(x, y, z)
}
#[derive(Clone, Copy)]
#[repr(transparent)]
pub struct Vec3A(pub(crate) f32x4);
impl Vec3A {
pub const ZERO: Self = Self::splat(0.0);
pub const ONE: Self = Self::splat(1.0);
pub const NEG_ONE: Self = Self::splat(-1.0);
pub const NAN: Self = Self::splat(f32::NAN);
pub const X: Self = Self::new(1.0, 0.0, 0.0);
pub const Y: Self = Self::new(0.0, 1.0, 0.0);
pub const Z: Self = Self::new(0.0, 0.0, 1.0);
pub const NEG_X: Self = Self::new(-1.0, 0.0, 0.0);
pub const NEG_Y: Self = Self::new(0.0, -1.0, 0.0);
pub const NEG_Z: Self = Self::new(0.0, 0.0, -1.0);
pub const AXES: [Self; 3] = [Self::X, Self::Y, Self::Z];
#[inline(always)]
pub const fn new(x: f32, y: f32, z: f32) -> Self {
Self(f32x4::from_array([x, y, z, z]))
}
#[inline]
pub const fn splat(v: f32) -> Self {
Self(f32x4::splat(v))
}
#[inline]
pub fn select(mask: BVec3A, if_true: Self, if_false: Self) -> Self {
Self(mask.0.select(if_true.0, if_false.0))
}
#[inline]
pub const fn from_array(a: [f32; 3]) -> Self {
Self::new(a[0], a[1], a[2])
}
#[inline]
pub const fn to_array(&self) -> [f32; 3] {
unsafe { *(self as *const Vec3A as *const [f32; 3]) }
}
#[inline]
pub const fn from_slice(slice: &[f32]) -> Self {
Self::new(slice[0], slice[1], slice[2])
}
#[inline]
pub fn write_to_slice(self, slice: &mut [f32]) {
slice[0] = self.x;
slice[1] = self.y;
slice[2] = self.z;
}
#[allow(dead_code)]
#[inline]
pub(crate) fn from_vec4(v: Vec4) -> Self {
Self(v.0)
}
#[inline]
pub fn extend(self, w: f32) -> Vec4 {
Vec4::new(self.x, self.y, self.z, w)
}
#[inline]
pub fn truncate(self) -> Vec2 {
use crate::swizzles::Vec3Swizzles;
self.xy()
}
#[inline]
pub fn dot(self, rhs: Self) -> f32 {
dot3(self.0, rhs.0)
}
#[inline]
pub fn cross(self, rhs: Self) -> Self {
let lhszxy = simd_swizzle!(self.0, [2, 0, 1, 1]);
let rhszxy = simd_swizzle!(rhs.0, [2, 0, 1, 1]);
let lhszxy_rhs = lhszxy * rhs.0;
let rhszxy_lhs = rhszxy * self.0;
let sub = lhszxy_rhs - rhszxy_lhs;
Self(simd_swizzle!(sub, [2, 0, 1, 1]))
}
#[inline]
pub fn min(self, rhs: Self) -> Self {
Self(self.0.min(rhs.0))
}
#[inline]
pub fn max(self, rhs: Self) -> Self {
Self(self.0.max(rhs.0))
}
#[inline]
pub fn clamp(self, min: Self, max: Self) -> Self {
glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
self.max(min).min(max)
}
#[inline]
pub fn min_element(self) -> f32 {
let v = self.0;
let v = v.min(simd_swizzle!(v, [2, 2, 1, 1]));
let v = v.min(simd_swizzle!(v, [1, 0, 0, 0]));
v[0]
}
#[inline]
pub fn max_element(self) -> f32 {
let v = self.0;
let v = v.max(simd_swizzle!(v, [2, 2, 0, 0]));
let v = v.max(simd_swizzle!(v, [1, 0, 0, 0]));
v[0]
}
#[inline]
pub fn cmpeq(self, rhs: Self) -> BVec3A {
BVec3A(f32x4::lanes_eq(self.0, rhs.0))
}
#[inline]
pub fn cmpne(self, rhs: Self) -> BVec3A {
BVec3A(f32x4::lanes_ne(self.0, rhs.0))
}
#[inline]
pub fn cmpge(self, rhs: Self) -> BVec3A {
BVec3A(f32x4::lanes_ge(self.0, rhs.0))
}
#[inline]
pub fn cmpgt(self, rhs: Self) -> BVec3A {
BVec3A(f32x4::lanes_gt(self.0, rhs.0))
}
#[inline]
pub fn cmple(self, rhs: Self) -> BVec3A {
BVec3A(f32x4::lanes_le(self.0, rhs.0))
}
#[inline]
pub fn cmplt(self, rhs: Self) -> BVec3A {
BVec3A(f32x4::lanes_lt(self.0, rhs.0))
}
#[inline]
pub fn abs(self) -> Self {
Self(self.0.abs())
}
#[inline]
pub fn signum(self) -> Self {
Self(self.0.signum())
}
#[inline]
pub fn is_finite(self) -> bool {
f32x4::is_finite(self.0)
.bitor(mask32x4::from_array([false, false, false, true]))
.all()
}
#[inline]
pub fn is_nan(self) -> bool {
self.is_nan_mask().any()
}
#[inline]
pub fn is_nan_mask(self) -> BVec3A {
BVec3A(f32x4::is_nan(self.0))
}
#[doc(alias = "magnitude")]
#[inline]
pub fn length(self) -> f32 {
let dot = dot3_in_x(self.0, self.0);
dot.sqrt()[0]
}
#[doc(alias = "magnitude2")]
#[inline]
pub fn length_squared(self) -> f32 {
self.dot(self)
}
#[inline]
pub fn length_recip(self) -> f32 {
let dot = dot3_in_x(self.0, self.0);
dot.sqrt().recip()[0]
}
#[inline]
pub fn distance(self, rhs: Self) -> f32 {
(self - rhs).length()
}
#[inline]
pub fn distance_squared(self, rhs: Self) -> f32 {
(self - rhs).length_squared()
}
#[must_use]
#[inline]
pub fn normalize(self) -> Self {
let length = dot3_into_f32x4(self.0, self.0).sqrt();
#[allow(clippy::let_and_return)]
let normalized = Self(self.0 / length);
glam_assert!(normalized.is_finite());
normalized
}
#[must_use]
#[inline]
pub fn try_normalize(self) -> Option<Self> {
let rcp = self.length_recip();
if rcp.is_finite() && rcp > 0.0 {
Some(self * rcp)
} else {
None
}
}
#[must_use]
#[inline]
pub fn normalize_or_zero(self) -> Self {
let rcp = self.length_recip();
if rcp.is_finite() && rcp > 0.0 {
self * rcp
} else {
Self::ZERO
}
}
#[inline]
pub fn is_normalized(self) -> bool {
(self.length_squared() - 1.0).abs() <= 1e-4
}
#[must_use]
#[inline]
pub fn project_onto(self, rhs: Self) -> Self {
let other_len_sq_rcp = rhs.dot(rhs).recip();
glam_assert!(other_len_sq_rcp.is_finite());
rhs * self.dot(rhs) * other_len_sq_rcp
}
#[must_use]
#[inline]
pub fn reject_from(self, rhs: Self) -> Self {
self - self.project_onto(rhs)
}
#[must_use]
#[inline]
pub fn project_onto_normalized(self, rhs: Self) -> Self {
glam_assert!(rhs.is_normalized());
rhs * self.dot(rhs)
}
#[must_use]
#[inline]
pub fn reject_from_normalized(self, rhs: Self) -> Self {
self - self.project_onto_normalized(rhs)
}
#[inline]
pub fn round(self) -> Self {
Self(self.0.round())
}
#[inline]
pub fn floor(self) -> Self {
Self(self.0.floor())
}
#[inline]
pub fn ceil(self) -> Self {
Self(self.0.ceil())
}
#[inline]
pub fn fract(self) -> Self {
self - self.floor()
}
#[inline]
pub fn exp(self) -> Self {
Self::new(self.x.exp(), self.y.exp(), self.z.exp())
}
#[inline]
pub fn powf(self, n: f32) -> Self {
Self::new(self.x.powf(n), self.y.powf(n), self.z.powf(n))
}
#[inline]
pub fn recip(self) -> Self {
Self(self.0.recip())
}
#[doc(alias = "mix")]
#[inline]
pub fn lerp(self, rhs: Self, s: f32) -> Self {
self + ((rhs - self) * s)
}
#[inline]
pub fn abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool {
self.sub(rhs).abs().cmple(Self::splat(max_abs_diff)).all()
}
#[inline]
pub fn clamp_length(self, min: f32, max: f32) -> Self {
glam_assert!(min <= max);
let length_sq = self.length_squared();
if length_sq < min * min {
self * (length_sq.sqrt().recip() * min)
} else if length_sq > max * max {
self * (length_sq.sqrt().recip() * max)
} else {
self
}
}
pub fn clamp_length_max(self, max: f32) -> Self {
let length_sq = self.length_squared();
if length_sq > max * max {
self * (length_sq.sqrt().recip() * max)
} else {
self
}
}
pub fn clamp_length_min(self, min: f32) -> Self {
let length_sq = self.length_squared();
if length_sq < min * min {
self * (length_sq.sqrt().recip() * min)
} else {
self
}
}
#[inline]
pub fn mul_add(self, a: Self, b: Self) -> Self {
Self(self.0.mul_add(a.0, b.0))
}
#[inline]
pub fn angle_between(self, rhs: Self) -> f32 {
use crate::FloatEx;
self.dot(rhs)
.div(self.length_squared().mul(rhs.length_squared()).sqrt())
.acos_approx()
}
#[inline]
pub fn any_orthogonal_vector(&self) -> Self {
if self.x.abs() > self.y.abs() {
Self::new(-self.z, 0.0, self.x) } else {
Self::new(0.0, self.z, -self.y) }
}
#[inline]
pub fn any_orthonormal_vector(&self) -> Self {
glam_assert!(self.is_normalized());
#[cfg(feature = "std")]
let sign = (1.0_f32).copysign(self.z);
#[cfg(not(feature = "std"))]
let sign = self.z.signum();
let a = -1.0 / (sign + self.z);
let b = self.x * self.y * a;
Self::new(b, sign + self.y * self.y * a, -self.y)
}
#[inline]
pub fn any_orthonormal_pair(&self) -> (Self, Self) {
glam_assert!(self.is_normalized());
#[cfg(feature = "std")]
let sign = (1.0_f32).copysign(self.z);
#[cfg(not(feature = "std"))]
let sign = self.z.signum();
let a = -1.0 / (sign + self.z);
let b = self.x * self.y * a;
(
Self::new(1.0 + sign * self.x * self.x * a, sign * b, -sign * self.x),
Self::new(b, sign + self.y * self.y * a, -self.y),
)
}
#[inline]
pub fn as_dvec3(&self) -> crate::DVec3 {
crate::DVec3::new(self.x as f64, self.y as f64, self.z as f64)
}
#[inline]
pub fn as_ivec3(&self) -> crate::IVec3 {
crate::IVec3::new(self.x as i32, self.y as i32, self.z as i32)
}
#[inline]
pub fn as_uvec3(&self) -> crate::UVec3 {
crate::UVec3::new(self.x as u32, self.y as u32, self.z as u32)
}
}
impl Default for Vec3A {
#[inline(always)]
fn default() -> Self {
Self::ZERO
}
}
impl PartialEq for Vec3A {
#[inline]
fn eq(&self, rhs: &Self) -> bool {
self.cmpeq(*rhs).all()
}
}
impl Div<Vec3A> for Vec3A {
type Output = Self;
#[inline]
fn div(self, rhs: Self) -> Self {
Self(self.0 / rhs.0)
}
}
impl DivAssign<Vec3A> for Vec3A {
#[inline]
fn div_assign(&mut self, rhs: Self) {
self.0 /= rhs.0;
}
}
impl Div<f32> for Vec3A {
type Output = Self;
#[inline]
fn div(self, rhs: f32) -> Self {
Self(self.0 / f32x4::splat(rhs))
}
}
impl DivAssign<f32> for Vec3A {
#[inline]
fn div_assign(&mut self, rhs: f32) {
self.0 /= f32x4::splat(rhs);
}
}
impl Div<Vec3A> for f32 {
type Output = Vec3A;
#[inline]
fn div(self, rhs: Vec3A) -> Vec3A {
Vec3A(f32x4::splat(self) / rhs.0)
}
}
impl Mul<Vec3A> for Vec3A {
type Output = Self;
#[inline]
fn mul(self, rhs: Self) -> Self {
Self(self.0 * rhs.0)
}
}
impl MulAssign<Vec3A> for Vec3A {
#[inline]
fn mul_assign(&mut self, rhs: Self) {
self.0 *= rhs.0;
}
}
impl Mul<f32> for Vec3A {
type Output = Self;
#[inline]
fn mul(self, rhs: f32) -> Self {
Self(self.0 * f32x4::splat(rhs))
}
}
impl MulAssign<f32> for Vec3A {
#[inline]
fn mul_assign(&mut self, rhs: f32) {
self.0 *= f32x4::splat(rhs);
}
}
impl Mul<Vec3A> for f32 {
type Output = Vec3A;
#[inline]
fn mul(self, rhs: Vec3A) -> Vec3A {
Vec3A(f32x4::splat(self) * rhs.0)
}
}
impl Add<Vec3A> for Vec3A {
type Output = Self;
#[inline]
fn add(self, rhs: Self) -> Self {
Self(self.0 + rhs.0)
}
}
impl AddAssign<Vec3A> for Vec3A {
#[inline]
fn add_assign(&mut self, rhs: Self) {
self.0 += rhs.0;
}
}
impl Add<f32> for Vec3A {
type Output = Self;
#[inline]
fn add(self, rhs: f32) -> Self {
Self(self.0 + f32x4::splat(rhs))
}
}
impl AddAssign<f32> for Vec3A {
#[inline]
fn add_assign(&mut self, rhs: f32) {
self.0 += f32x4::splat(rhs);
}
}
impl Add<Vec3A> for f32 {
type Output = Vec3A;
#[inline]
fn add(self, rhs: Vec3A) -> Vec3A {
Vec3A(f32x4::splat(self) + rhs.0)
}
}
impl Sub<Vec3A> for Vec3A {
type Output = Self;
#[inline]
fn sub(self, rhs: Self) -> Self {
Self(self.0 - rhs.0)
}
}
impl SubAssign<Vec3A> for Vec3A {
#[inline]
fn sub_assign(&mut self, rhs: Vec3A) {
self.0 -= rhs.0;
}
}
impl Sub<f32> for Vec3A {
type Output = Self;
#[inline]
fn sub(self, rhs: f32) -> Self {
Self(self.0 - f32x4::splat(rhs))
}
}
impl SubAssign<f32> for Vec3A {
#[inline]
fn sub_assign(&mut self, rhs: f32) {
self.0 -= f32x4::splat(rhs);
}
}
impl Sub<Vec3A> for f32 {
type Output = Vec3A;
#[inline]
fn sub(self, rhs: Vec3A) -> Vec3A {
Vec3A(f32x4::splat(self) - rhs.0)
}
}
impl Rem<Vec3A> for Vec3A {
type Output = Self;
#[inline]
fn rem(self, rhs: Self) -> Self {
Self(self.0 % rhs.0)
}
}
impl RemAssign<Vec3A> for Vec3A {
#[inline]
fn rem_assign(&mut self, rhs: Self) {
self.0 %= rhs.0;
}
}
impl Rem<f32> for Vec3A {
type Output = Self;
#[inline]
fn rem(self, rhs: f32) -> Self {
self.rem(Self::splat(rhs))
}
}
impl RemAssign<f32> for Vec3A {
#[inline]
fn rem_assign(&mut self, rhs: f32) {
self.0 %= f32x4::splat(rhs);
}
}
impl Rem<Vec3A> for f32 {
type Output = Vec3A;
#[inline]
fn rem(self, rhs: Vec3A) -> Vec3A {
Vec3A::splat(self).rem(rhs)
}
}
#[cfg(not(target_arch = "spirv"))]
impl AsRef<[f32; 3]> for Vec3A {
#[inline]
fn as_ref(&self) -> &[f32; 3] {
unsafe { &*(self as *const Vec3A as *const [f32; 3]) }
}
}
#[cfg(not(target_arch = "spirv"))]
impl AsMut<[f32; 3]> for Vec3A {
#[inline]
fn as_mut(&mut self) -> &mut [f32; 3] {
unsafe { &mut *(self as *mut Vec3A as *mut [f32; 3]) }
}
}
impl<'a> Sum<&'a Self> for Vec3A {
#[inline]
fn sum<I>(iter: I) -> Self
where
I: Iterator<Item = &'a Self>,
{
iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
}
}
impl<'a> Product<&'a Self> for Vec3A {
#[inline]
fn product<I>(iter: I) -> Self
where
I: Iterator<Item = &'a Self>,
{
iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
}
}
impl Neg for Vec3A {
type Output = Self;
#[inline]
fn neg(self) -> Self {
Self(-self.0)
}
}
impl Index<usize> for Vec3A {
type Output = f32;
#[inline]
fn index(&self, index: usize) -> &Self::Output {
&self.0[index]
}
}
impl IndexMut<usize> for Vec3A {
#[inline]
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
&mut self.0[index]
}
}
#[cfg(not(target_arch = "spirv"))]
impl fmt::Display for Vec3A {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "[{}, {}, {}]", self.x, self.y, self.z)
}
}
#[cfg(not(target_arch = "spirv"))]
impl fmt::Debug for Vec3A {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_tuple(stringify!(Vec3A))
.field(&self.x)
.field(&self.y)
.field(&self.z)
.finish()
}
}
impl From<Vec3A> for f32x4 {
#[inline]
fn from(t: Vec3A) -> Self {
t.0
}
}
impl From<f32x4> for Vec3A {
#[inline]
fn from(t: f32x4) -> Self {
Self(t)
}
}
impl From<[f32; 3]> for Vec3A {
#[inline]
fn from(a: [f32; 3]) -> Self {
Self::new(a[0], a[1], a[2])
}
}
impl From<Vec3A> for [f32; 3] {
#[inline]
fn from(v: Vec3A) -> Self {
unsafe { *(v.0.to_array().as_ptr() as *const Self) }
}
}
impl From<(f32, f32, f32)> for Vec3A {
#[inline]
fn from(t: (f32, f32, f32)) -> Self {
Self::new(t.0, t.1, t.2)
}
}
impl From<Vec3A> for (f32, f32, f32) {
#[inline]
fn from(v: Vec3A) -> Self {
unsafe { *(v.0.to_array().as_ptr() as *const Self) }
}
}
impl From<Vec3> for Vec3A {
#[inline]
fn from(v: Vec3) -> Self {
Self::new(v.x, v.y, v.z)
}
}
impl From<Vec4> for Vec3A {
#[inline]
fn from(v: Vec4) -> Self {
Self(v.0)
}
}
impl From<Vec3A> for Vec3 {
#[inline]
fn from(v: Vec3A) -> Self {
unsafe { *(v.0.to_array().as_ptr() as *const Self) }
}
}
impl From<(Vec2, f32)> for Vec3A {
#[inline]
fn from((v, z): (Vec2, f32)) -> Self {
Self::new(v.x, v.y, z)
}
}
impl Deref for Vec3A {
type Target = crate::deref::Vec3<f32>;
#[inline]
fn deref(&self) -> &Self::Target {
unsafe { &*(self as *const Self).cast() }
}
}
impl DerefMut for Vec3A {
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *(self as *mut Self).cast() }
}
}