aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAdrien Larbanet <adrienl@google.com>2022-04-11 17:52:33 +0200
committerAdrien Larbanet <adrienl@google.com>2022-04-21 18:48:23 +0200
commit5d648c139ab07071a5c06331442f14467decb4e5 (patch)
treeb6929ada6be237f118f6541e437232a61be92bd0
parent6c0453a9f8c6c63fe1cc16e506cd6f1400bb395c (diff)
downloadglam-5d648c139ab07071a5c06331442f14467decb4e5.tar.gz
Import glam crate
Bug: 229873329 Change-Id: I1595a5abd7999d7bc896ff9028f9009f38a65896
-rw-r--r--.github/workflows/cargo-deny.yml8
-rw-r--r--.github/workflows/ci.yml52
-rw-r--r--.github/workflows/coverage.yml21
-rw-r--r--.gitignore8
-rw-r--r--.tarpaulin.toml7
-rw-r--r--ARCHITECTURE.md217
-rw-r--r--ATTRIBUTION.md80
-rw-r--r--Android.bp17
-rw-r--r--CHANGELOG.md788
-rw-r--r--CONTRIBUTING.md58
-rw-r--r--Cargo.toml135
-rw-r--r--Cargo.toml.orig117
-rw-r--r--LICENSE201
-rw-r--r--LICENSE-APACHE201
-rw-r--r--LICENSE-MIT23
-rw-r--r--METADATA20
-rw-r--r--MODULE_LICENSE_APACHE20
-rw-r--r--NOTICE201
-rw-r--r--OWNERS1
-rw-r--r--README.md253
-rw-r--r--benches/affine2.rs79
-rw-r--r--benches/affine3.rs117
-rw-r--r--benches/mat2.rs42
-rw-r--r--benches/mat3.rs72
-rw-r--r--benches/mat3a.rs72
-rw-r--r--benches/mat4.rs129
-rw-r--r--benches/quat.rs80
-rw-r--r--benches/support/macros.rs210
-rw-r--r--benches/support/mod.rs129
-rw-r--r--benches/transform.rs132
-rw-r--r--benches/vec2.rs51
-rw-r--r--benches/vec3.rs169
-rw-r--r--benches/vec3a.rs111
-rw-r--r--benches/vec4.rs29
-rwxr-xr-xbuild_all_msrv.sh9
-rwxr-xr-xbuild_and_test_features.sh28
-rwxr-xr-xbuild_and_test_wasm32_chrome.sh6
-rwxr-xr-xbuild_and_test_wasm32_firefox.sh6
-rw-r--r--clippy.toml1
-rw-r--r--deny.toml18
-rw-r--r--src/affine2.rs519
-rw-r--r--src/affine3.rs631
-rw-r--r--src/cast.rs167
-rw-r--r--src/core/mod.rs18
-rw-r--r--src/core/scalar/mask.rs452
-rw-r--r--src/core/scalar/matrix.rs285
-rw-r--r--src/core/scalar/mod.rs4
-rw-r--r--src/core/scalar/quaternion.rs87
-rw-r--r--src/core/scalar/vector.rs1465
-rw-r--r--src/core/sse2/float.rs280
-rw-r--r--src/core/sse2/matrix.rs558
-rw-r--r--src/core/sse2/mod.rs4
-rw-r--r--src/core/sse2/quaternion.rs135
-rw-r--r--src/core/sse2/vector.rs871
-rw-r--r--src/core/storage.rs128
-rw-r--r--src/core/traits/matrix.rs985
-rw-r--r--src/core/traits/mod.rs5
-rw-r--r--src/core/traits/projection.rs164
-rw-r--r--src/core/traits/quaternion.rs140
-rw-r--r--src/core/traits/scalar.rs434
-rw-r--r--src/core/traits/vector.rs854
-rw-r--r--src/core/wasm32/float.rs113
-rw-r--r--src/core/wasm32/matrix.rs532
-rw-r--r--src/core/wasm32/mod.rs4
-rw-r--r--src/core/wasm32/quaternion.rs130
-rw-r--r--src/core/wasm32/vector.rs812
-rw-r--r--src/euler.rs261
-rw-r--r--src/features/impl_approx.rs212
-rw-r--r--src/features/impl_bytemuck.rs98
-rw-r--r--src/features/impl_mint.rs528
-rw-r--r--src/features/impl_rand.rs199
-rw-r--r--src/features/impl_rkyv.rs188
-rw-r--r--src/features/impl_serde.rs787
-rw-r--r--src/features/mod.rs17
-rw-r--r--src/lib.rs344
-rw-r--r--src/macros.rs484
-rw-r--r--src/mat.rs114
-rw-r--r--src/mat2.rs398
-rw-r--r--src/mat3.rs596
-rw-r--r--src/mat4.rs890
-rw-r--r--src/quat.rs825
-rw-r--r--src/spirv.rs26
-rw-r--r--src/swizzles/dvec2_impl_scalar.rs118
-rw-r--r--src/swizzles/dvec3_impl_scalar.rs474
-rw-r--r--src/swizzles/dvec4_impl_scalar.rs1350
-rw-r--r--src/swizzles/ivec2_impl_scalar.rs118
-rw-r--r--src/swizzles/ivec3_impl_scalar.rs474
-rw-r--r--src/swizzles/ivec4_impl_scalar.rs1350
-rw-r--r--src/swizzles/mod.rs35
-rw-r--r--src/swizzles/uvec2_impl_scalar.rs118
-rw-r--r--src/swizzles/uvec3_impl_scalar.rs474
-rw-r--r--src/swizzles/uvec4_impl_scalar.rs1350
-rw-r--r--src/swizzles/vec2_impl_scalar.rs118
-rw-r--r--src/swizzles/vec3_impl_scalar.rs474
-rw-r--r--src/swizzles/vec3a_impl_scalar.rs474
-rw-r--r--src/swizzles/vec3a_impl_sse2.rs479
-rw-r--r--src/swizzles/vec3a_impl_wasm32.rs476
-rw-r--r--src/swizzles/vec4_impl_scalar.rs1350
-rw-r--r--src/swizzles/vec4_impl_sse2.rs1355
-rw-r--r--src/swizzles/vec4_impl_wasm32.rs1352
-rw-r--r--src/swizzles/vec_traits.rs512
-rw-r--r--src/transform.rs432
-rw-r--r--src/vec.rs1029
-rw-r--r--src/vec2.rs317
-rw-r--r--src/vec3.rs454
-rw-r--r--src/vec4.rs434
-rw-r--r--src/vec_mask.rs460
-rw-r--r--tests/affine2.rs217
-rw-r--r--tests/affine3.rs344
-rw-r--r--tests/euler.rs209
-rw-r--r--tests/mat2.rs262
-rw-r--r--tests/mat3.rs455
-rw-r--r--tests/mat4.rs745
-rw-r--r--tests/quat.rs584
-rw-r--r--tests/support/macros.rs204
-rw-r--r--tests/support/mod.rs280
-rw-r--r--tests/swizzles_f32.rs618
-rw-r--r--tests/swizzles_f64.rs501
-rw-r--r--tests/swizzles_i32.rs497
-rw-r--r--tests/swizzles_u32.rs497
-rw-r--r--tests/transform.rs129
-rw-r--r--tests/vec2.rs901
-rw-r--r--tests/vec3.rs1102
-rw-r--r--tests/vec4.rs1163
124 files changed, 43927 insertions, 0 deletions
diff --git a/.github/workflows/cargo-deny.yml b/.github/workflows/cargo-deny.yml
new file mode 100644
index 0000000..f5452f7
--- /dev/null
+++ b/.github/workflows/cargo-deny.yml
@@ -0,0 +1,8 @@
+name: cargo-deny
+on: [push, pull_request]
+jobs:
+ cargo-deny:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v1
+ - uses: EmbarkStudios/cargo-deny-action@v1
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
new file mode 100644
index 0000000..9ce0c54
--- /dev/null
+++ b/.github/workflows/ci.yml
@@ -0,0 +1,52 @@
+name: CI
+on: [push, pull_request]
+jobs:
+
+ lint:
+ name: Lint
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+
+ - uses: actions-rs/toolchain@v1
+ with:
+ toolchain: stable
+ components: rustfmt, clippy
+
+ # make sure all code has been formatted with rustfmt and linted with clippy
+ - name: rustfmt
+ run: cargo fmt -- --check --color always
+
+ # run clippy to verify we have no warnings
+ - run: cargo fetch
+ - name: cargo clippy
+ run: cargo clippy --all-features -- -D warnings
+
+ test:
+ name: Test
+ strategy:
+ matrix:
+ os: [ubuntu-latest, macos-latest, windows-latest]
+ toolchain: [1.52.1, stable, beta, nightly]
+ runs-on: ${{ matrix.os }}
+ steps:
+ - uses: actions/checkout@v2
+ - run: rustup update --no-self-update ${{ matrix.toolchain }}
+ - run: rustup default ${{ matrix.toolchain }}
+ - run: ./build_and_test_features.sh
+ shell: bash
+
+ test-wasm:
+ strategy:
+ matrix:
+ toolchain: [stable]
+ os: [ubuntu-latest]
+ runs-on: ${{ matrix.os }}
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Install
+ run: curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh
+
+ - run: ./build_and_test_wasm32_firefox.sh
+ - run: ./build_and_test_wasm32_chrome.sh
diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml
new file mode 100644
index 0000000..504edd3
--- /dev/null
+++ b/.github/workflows/coverage.yml
@@ -0,0 +1,21 @@
+name: coverage
+on: [push]
+jobs:
+ coverage:
+ container:
+ image: xd009642/tarpaulin
+ options: --security-opt seccomp=unconfined
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+
+ # - name: Install tarpaulin
+ # run: cargo install cargo-tarpaulin
+
+ - name: Generate code coverage
+ run: cargo tarpaulin -v --timeout 120 --out Lcov --output-dir ./coverage
+
+ - name: Upload to coveralls.io
+ uses: coverallsapp/github-action@master
+ with:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..3da98e9
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,8 @@
+**/target
+*.json
+*.swp
+**/*.rs.bk
+Cargo.lock
+Session.vim
+.cargo-ok
+cargo-timing*.html
diff --git a/.tarpaulin.toml b/.tarpaulin.toml
new file mode 100644
index 0000000..9fe5954
--- /dev/null
+++ b/.tarpaulin.toml
@@ -0,0 +1,7 @@
+[sse2_math]
+features = "approx bytemuck mint rand rkyv serde debug-glam-assert"
+exclude-files = ["src/transform.rs", "src/core/wasm32/*", "src/swizzles/vec3a_impl_wasm32.rs", "src/swizzles/vec4_impl_wasm32.rs", "benches/*", "tests/support/mod.rs"]
+
+[scalar_math]
+features = "scalar-math approx bytemuck mint rand rkyv serde debug-glam-assert"
+exclude-files = ["src/transform.rs", "src/core/wasm32/*", "src/swizzles/vec3a_impl_wasm32.rs", "src/swizzles/vec4_impl_wasm32.rs", "benches/*", "tests/support/mod.rs"]
diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md
new file mode 100644
index 0000000..47c5332
--- /dev/null
+++ b/ARCHITECTURE.md
@@ -0,0 +1,217 @@
+# Architecture
+
+This document describes the high-level architecture of `glam`. While `glam` is
+not a large library there are some complexities to its implementation. The
+rational and explanation of these follows.
+
+## Design goals
+
+There overarching design goals of glam are:
+
+* Good out of the box performance using SIMD when available
+* Has a simple public interface
+* Is fast to compile
+* Follow Rust [standard library] conventions and [API guidelines] where possible
+* High quality [rustdoc] generated document
+
+[standard library]: https://doc.rust-lang.org/std/index.html
+[API guidelines]: https://rust-lang.github.io/api-guidelines
+[rustdoc]: https://doc.rust-lang.org/rustdoc/index.html
+
+### SIMD
+
+One of the core premises of `glam` was that using SSE2 instructions on `x86` and
+`x86_64` architectures gave better performance than using Rust's built in `f32`
+type. For more on this finding see [Optimising path tracing with SIMD].
+
+I also wanted to have a `f32` fallback when SIMD was not available.
+
+[Optimising path tracing with SIMD]: https://bitshifter.github.io/2018/06/04/simd-path-tracing/#converting-vec3-to-sse2.
+
+### No generics
+
+Because internally storage could be a SIMD vector intrinsic like `__m128` on
+`x86` or say an array of `f32` if SSE2 was not available, a simple generic
+parameter like `Vec4<T>` could not be used. The `T` would specify the public
+facing type, but not storage. Perhaps this could be achieved with a second
+generic parameter for storage, e.g. `Vec4<f32, __m128>` or `Vec4<f32, [f32; 4]>`
+but I felt that such a design would introduce a lot of complexity that end users
+would ultimately be burdened with, so it's not something that was pursued.
+
+Generics can also increase compile time and code size which is something glam
+wants to avoid.
+
+### No traits
+
+`glam` also mostly avoids using traits in the public interface. Primarily
+because there wasn't a good reason to. A `Vec3` is not an interface, it is a
+concrete type. The secondary reason is traits fragment documentation. If the
+functionality of a `Vec3` is implemented across a number of different traits
+then the documentation of all of the `Vec3` methods will be on the individual
+traits, not the `Vec3` itself. This makes it harder for users to find what
+methods a struct actually implements as the documentation is not in one place.
+
+Conversely `glam` does use traits for swizzle methods so that the documentation
+for these methods is on the trait and not the `Vec2`, `Vec3`, `Vec4` and so on
+structs. There are many swizzle methods which would clutter the documentation,
+making them a trait means they won't pollute documentation.
+
+### Support common primitives
+
+Initially `glam` only supported `f32` which kept the internal implementation
+relatively simple. However users also wanted support for other primitives types
+like `f64`, `i32` and `u32`. Because `glam` avoids using `generics` adding
+support for other primitive types without a lot of code duplication required
+some additional complexity in implementation.
+
+## High level structure
+
+`glam` supports a number of permutations of vector, quaternion and matrix types
+for `f32`, `f64`, `i32` and `u32` primitives, with SSE2 or wasm32 for some `f32`
+types and scalar fallbacks if SIMD is not available.
+
+This is done with a combination of Rust macros for generating the public facing
+types and documentation, e.g. `Vec4` and inner storage types which have a number
+of traits implemented for different kinds of storage.
+
+### Inner types and traits
+
+Many `glam` types may use SIMD storage where available, e.g. `Vec4` might use
+`__m128` for storage if available or an inner storage struct `XYZW<f32>` for
+the scalar implementation.
+
+There are a number of internal traits defined in `core::traits` for scalar,
+vector, matrix and quaternion functionality that glam needs. These traits are
+implemented for the different storage types, e.g.
+`core::traits::vector::Vector4` has an implementations for `__m128`, `simd128`
+and the `XYZW` struct.
+
+The traits will provide default definitions where possible so not every trait
+method needs to be implemented for every storage type.
+
+### Component access via Deref
+
+Because `glam` uses an inner storage type which could be a simple struct or a
+SIMD vector it is not possible to provide direct access to the vector's
+component values (e.g. `.x`, `.y`, and `.z`). The `Deref` trait is used to work
+around this and provide direct access to vector components like `.x`, `.y` and
+so on. The `Deref` implementation will return `XYZ<T>` structure on which the
+vector components are accessible. Unfortunately if users dereference the public
+types they will see confusing errors messages about `XYZ` types but this on
+balance seemed preferable to needing to setter and getting methods to read and
+write component values.
+
+### Public types and macros
+
+Macros are used to generate the public types to reuse common implementation
+details where possible.
+
+Generally the public type `struct` is declared then it's methods are populated
+with multiple macros within a single `impl`. Methods are all declared within a
+single `impl` definition for documentation purposes. While it is possible to
+have multiple `impl` blocks for the same type this splits the method
+documentation generated by `rustdoc`.
+
+A lot of the motivation for removing duplication is documentation. It is
+reasonably easy to find discrepancies in duplicated code through unit testing
+but documentation is a lot harder to keep in sync.
+
+## A walkthrough of 3D vectors
+
+3D vectors are the most complicated case in `glam`. There are 5 different 3D
+vector types, including `Vec3`, `Vec3A` (16 byte aligned SIMD), `DVec3`, `IVec3`
+and `UVec3`. There is some common code used by all of these types and some that
+is specific to the primitives and storage that they implement. Note that there
+is also a `BVec3` but as that type is used as a mask it is quite separate.
+
+### Storage
+
+Store for a 3D vector may use `XYZ<T>` where `T` is one of `f32`, `f64`, `i32`,
+or `u32` for the scalar case and `__m128` or `simd128` for the SIMD case for
+SSE2 or wasm32 respectively. There is also `XYZF32A16` which is used as storage
+for `Vec3A` when SIMD is not available.
+
+### Traits
+
+There are quite a few traits involved in implementing a 3D vector. To start with
+there are scalar traits which set up some constants and expected operations for
+scalar types in the `core::trait::scalar` module:
+
+* `NumConstsEx` - defines `ZERO` and `ONE` constants
+* `FloatConstEx` - defines `NEG_ONE`, `TWO` and `HALF` constants
+* `NumEx` - base number trait, implemented for `f32`, `f64`, `i32` and `u32`
+* `SignedEx` - signed number trait, implemented for `i32`, `f32` and `f64`
+* `FloatEx` - float number trait, implemented for `f32` and `f64`
+
+Then vector traits are defined in `core::trait::vector`:
+
+* `VectorConst` - defines `ZERO` and `ONE` constants for vectors
+* `Vector3Const` - defines 3D vector constants, `X`, `Y`, and `Z`
+* `Vector` - defines methods for any size of vector, typically these methods
+ can be implemented without needing to know the number of components, e.g.
+ `splat`
+* `SignedVector` - defines methods for signed vectors of any size, e.g. `neg`
+* `Vector3` - defines methods for 3D vectors, e.g. `dot` needs to know how many
+ components to operate on
+* `SignedVector3` - defines methods for signed vector types with 3 components,
+ in this case the default implementation of some methods needs to know the
+ number of components, e.g. `abs`
+* `FloatVector3` - defines methods for float vector types with 3 components, for
+ when the implementation needs to know the number of components, e.g. `length`
+
+Note that the `Vector<T>` trait also has an associated `Mask` type which is the
+type used for returning from comparison operators. Different types are used for
+scalar and SIMD types.
+
+### Macros
+
+The different 3D vector types are declared in the `vec3` module and macros are
+used to implement the majority of their methods. Macros specific to 3D vectors
+are found in the `vec3` module, most macros call other macros.
+
+* `impl_f32_vec3` - implements methods and traits common to `Vec3` and `Vec3A`
+* `impl_vec3_float_methods` - implements methods for 3D vectors of floats
+* `impl_vec3_signed_methods` - implements methods for 3D vectors of signed types
+* `impl_vec3_common_methods` - implements common methods for 3D vectors
+* `impl_vec3_float_traits` - implements traits for 3D vector float operators
+* `impl_vec3_common_traits` - implements traits for common 3D vector operators
+
+Macros that define functionality to vectors of any size are found in
+`src/vec.rs`. These do not call other macros.
+
+* `impl_vecn_float_methods` - implements common methods for vectors of floats
+* `impl_vecn_signed_methods` - implements common methods for vectors of signed
+* `imp_vecn_common_methods` - implements common methods for all vector types
+* `impl_vecn_signed_traits` - implements trait operators for signed vector types
+* `impl_vecn_common_traits` - implements common trait operators for all vectors
+
+### Summary
+
+Functionality is broken down to the point that there is very little duplicated
+code. Macros are also used to avoid duplicating comments. Functionality is
+implemented through traits operating on different storage types. Common
+functionality is often implemented in default trait implementations.
+
+## Limitations
+
+Adding support for types other than `f32` greatly increased the complexity of
+the internal implementation of the crate.
+
+While the current approach works well for keeping the public interface and
+documentation simple and clean complexity exists under the surface. The use of
+macros unfortunately obfuscates the source code for anyone trying to read it.
+The largest issues being when users navigate to glam code in an IDE they are
+usually presented with a very high level macro and need to manually hunt down
+the actual implementation. The same issue exists when attempting to view source
+in the `rustdoc` generated documentation.
+
+One way to address this would be to use an offline code generator instead of a
+compile time code generator (i.e. Rust macros) which would remove macros from
+the code that users end up viewing. The main downside is it would take a while
+to write this and it might be more confusing for contributors. Another option
+might be to improve tooling so that IDEs and rustdoc navigate directly to the
+code that was generated by the macro rather than the macro itself.
+
+The use of `Deref` does occasionally cause confusion. It would be good if it was
+only necessary to implement `Deref` on the SIMD types but currently that is not
+possible.
diff --git a/ATTRIBUTION.md b/ATTRIBUTION.md
new file mode 100644
index 0000000..34ae900
--- /dev/null
+++ b/ATTRIBUTION.md
@@ -0,0 +1,80 @@
+# Attribution
+
+`glam` contains code ported from the following C++ libraries.
+
+## [DirectXMath]
+
+[DirectXMath]: https://docs.microsoft.com/en-us/windows/win32/dxmath/directxmath-portal
+
+[The MIT License (MIT)](https://github.com/microsoft/DirectXMath/blob/master/LICENSE)
+
+Copyright (c) 2011-2020 Microsoft Corp
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this
+software and associated documentation files (the "Software"), to deal in the Software
+without restriction, including without limitation the rights to use, copy, modify,
+merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be included in all copies
+or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
+INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+## [Realtime Math]
+
+[Realtime Math]: https://github.com/nfrechette/rtm
+
+[MIT License](https://github.com/nfrechette/rtm/blob/develop/LICENSE)
+
+Copyright (c) 2018 Nicholas Frechette
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+## [GLM]
+
+[GLM]: https://glm.g-truc.net
+
+[The MIT License](https://github.com/g-truc/glm/blob/master/copying.txt)
+
+Copyright (c) 2005 - G-Truc Creation
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/Android.bp b/Android.bp
new file mode 100644
index 0000000..ab06d91
--- /dev/null
+++ b/Android.bp
@@ -0,0 +1,17 @@
+// This file is generated by cargo2android.py --run.
+// Do not modify this file as changes will be overridden on upgrade.
+
+
+
+rust_library_host {
+ name: "libglam",
+ crate_name: "glam",
+ cargo_env_compat: true,
+ cargo_pkg_version: "0.20.3",
+ srcs: ["src/lib.rs"],
+ edition: "2018",
+ features: [
+ "default",
+ "std",
+ ],
+}
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000..f2e54b1
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,788 @@
+# Changelog
+
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog], and this project adheres to
+[Semantic Versioning].
+
+## [0.20.3] - 2022-03-28
+
+### Added
+
+* Added `to_array()` to `Quat` and `DQuat`.
+* Added `mul_add` method to all vector types - note that this will be slower
+ without hardware support enabled.
+* Added the `fast-math` flag which will sacrifice some float determinism for
+ speed.
+
+### Fixed
+
+* Fixed a bug in the `sse2` and `wasm32` implementations of
+ `Mat4::determinant()`.
+
+## [0.20.2] - 2021-12-20
+
+### Fixed
+
+* Fixed SPIR-V build which was broken due to a typo.
+
+## [0.20.1] - 2021-11-23
+
+### Addeed
+
+* Added the `from_rotation_arc_2d()` method to `Quat` and `DQuat` which will
+ return a rotation between two 2D vectors around the z axis.
+* Added impl of `Neg` operator for matrix types.
+* Added `cuda` feature which forces `glam` types to match cuda's alignment
+ requirements.
+
+### Changed
+
+* The `Quat` and `DQuat` methods `from_rotation_arc()` and
+ `from_rotation_arc_colinear()` are now available in `no_std`.
+* The `Vec3` and `DVec3` methods `any_orthogonal_vector()`,
+ `any_orthonormal_vector()` and `any_orthonormal_pair()` are now available in
+ `no_std`.
+* Added `repr(C)` attribute to affine types.
+
+### Removed
+
+* Removed deprecated `as_f32()`, `as_f64()`, `as_i32()` and `as_u32()` methods.
+
+## [0.20.0] - 2021-11-01
+
+### Breaking changes
+
+* Minimum Supported Version of Rust bumped to 1.52.1 for an update to the `mint`
+ crate.
+
+### Added
+
+* Added implementations for new `IntoMint` trait from the `mint` crate.
+* Added `mint` conversions for `Mat3A`.
+* Added `as_vec3a` cast methods to vector types.
+
+## [0.19.0] - 2021-10-05
+
+### Breaking changes
+
+* Removed truncating vector `From` implementations. Use `.truncate()` or swizzle
+ methods instead.
+
+### Added
+
+* Added `Not`, `Shl`, `Shr`, `BitAnd`, `BitOr` and `BitXor` implementations for
+ all `IVec` and `UVec` vector types.
+* Added `NAN` constant for all types.
+* Documented `glam`'s [architecture](ARCHITECTURE.md).
+
+### Changed
+
+* `Sum` and `Product` traits are now implemented in `no_std` builds.
+
+## [0.18.0] - 2021-08-26
+
+### Breaking changes
+
+* Minimum Supported Version of Rust bumped to 1.51.0 for `wasm-bindgen-test`
+ and `rustdoc` `alias` support.
+
+### Added
+
+* Added `wasm32` SIMD intrinsics support.
+* Added optional support for the `rkyv` serialization crate.
+* Added `Rem` and `RemAssign` implementations for all vector types.
+* Added quaternion `xyz()` method for returning the vector part of the
+ quaternion.
+* Added `From((Scalar, Vector3))` for 4D vector types.
+
+### Changed
+
+* Deprecated `as_f32()`, `as_f64()`, `as_i32()` and `as_u32()` methods in favor
+ of more specific methods such as `as_vec2()`, `as_dvec2()`, `as_ivec2()` and
+ `as_uvec2()` and so on.
+
+## [0.17.3] - 2021-07-18
+
+### Fixed
+
+* Fix alignment unit tests on non x86 platforms.
+
+## [0.17.2] - 2021-07-15
+
+### Fixed
+
+* Fix alignment unit tests on i686 and S390x.
+
+## [0.17.1] - 2021-06-29
+
+### Added
+
+* Added `serde` support for `Affine2`, `DAffine2`, `Affine3A` and `DAffine3`.
+
+## [0.17.0] - 2021-06-26
+
+### Breaking changes
+
+* The addition of `Add` and `Sub` implementations of scalar values for vector
+ types may create ambiguities with existing calls to `add` and `sub`.
+* Removed `From<Mat3>` implementation for `Mat2` and `From<DMat3>` for `DMat2`.
+ These have been replaced by `Mat2::from_mat3()` and `DMat2::from_mat3()`.
+* Removed `From<Mat4>` implementation for `Mat3` and `From<DMat4>` for `DMat3`.
+ These have been replaced by `Mat3::from_mat4()` and `DMat3::from_mat4()`.
+* Removed deprecated `from_slice_unaligned()`, `write_to_slice_unaligned()`,
+ `from_rotation_mat4` and `from_rotation_ypr()` methods.
+
+### Added
+
+* Added `col_mut()` method which returns a mutable reference to a matrix column
+ to all matrix types.
+* Added `AddAssign`, `MulAssign` and `SubAssign` implementations for all matrix
+ types.
+* Added `Add` and `Sub` implementations of scalar values for vector types.
+* Added more `glam_assert!` checks and documented methods where they are used.
+* Added vector projection and rejection methods `project_onto()`,
+ `project_onto_normalized()`, `reject_from()` and `reject_from_normalized()`.
+* Added `Mat2::from_mat3()`, `DMat2::from_mat3()`, `Mat3::from_mat4()`,
+ `DMat3::from_mat4()` which create a smaller matrix from a larger one,
+ discarding a final row and column of the input matrix.
+* Added `Mat3::from_mat2()`, `DMat3::from_mat2()`, `Mat4::from_mat3()` and
+ `DMat4::from_mat3()` which create an affine transform from a smaller linear
+ transform matrix.
+
+### Changed
+
+* Don't support `AsRef` and `AsMut` on SPIR-V targets. Also removed SPIR-V
+ support for some methods that used `as_ref()`, including `hash()`. Not a
+ breaking change as these methods would not have worked anyway.
+
+### Fixed
+
+* Fixed compile time alignment checks failing on i686 targets.
+
+## [0.16.0] - 2021-06-06
+
+### Breaking changes
+
+* `sprirv-std` dependency was removed, rust-gpu depends on glam internally
+ again for now.
+* Added `must_use` attribute to all `inverse()`, `normalize()`,
+ `try_normalize()`, `transpose()` and `conjugate()` methods.
+
+### Added
+
+* Added `fract()` method to float vector types which return a vector containing
+ `self - self.floor()`.
+* Added optional support for the `approx` crate. Note that all glam types
+ implement their own `abs_diff_eq()` method without requiring the `approx`
+ dependency.
+
+## [0.15.2] - 2021-05-20
+
+### Added
+
+* Added `from_cols()` methods to affine types.
+* Added methods for reading and writing affine types from and to arrays and
+ slices, including `from_cols_array()`, `to_cols_array()`,
+ `from_cols_array_2d()`, `to_cols_array_2d()`, `from_cols_slice()` and
+ `write_cols_to_slice()`.
+* Added `core::fmt::Display` trait implementations for affine types.
+* Added `core::ops::Add`, `core::ops::Mul` scalar and `core::ops::Sub` trait
+ implementations for affine types.
+* Added `from_array()` methods to quaternion types.
+
+### Changed
+
+* Renamed vector and quaternion `from_slice_unaligned()` and
+ `write_to_slice_unaligned()` methods to `from_slice()` and
+ `write_to_slice()`.
+* Removed usage of `_mm_rcp_ps` from SSE2 implementation of `Quat::slerp` as
+ this instruction is not deterministic between Intel and AMD chips.
+
+## [0.15.1] - 2021-05-14
+
+### Changed
+
+* Disable `const_assert_eq!` size and alignment checks for SPIR-V targets.
+
+## [0.15.0] - 2021-05-14
+
+### Breaking changes
+
+* Removed `PartialOrd` and `Ord` trait implementations for all `glam` types.
+* Removed deprecated `zero()`, `one()`, `unit_x()`, `unit_y()`, `unit_z()`,
+ `unit_w()`, `identity()` and `Mat2::scale()` methods.
+* Remove problematic `Quat` `From` trait conversions which would allow creating
+ a non-uniform quaternion without necessarily realising, including from
+ `Vec4`, `(x, y, z, w)` and `[f32; 4]`.
+
+### Added
+
+* Added `EulerRot` enum for specifying Euler rotation order and
+ `Quat::from_euler()`, `Mat3::from_euler()` and `Mat4::from_euler()` which
+ support specifying a rotation order and angles of rotation.
+* Added `Quat::to_euler()` method for extracting Euler angles.
+* Added `Quat::from_vec4()` which is an explicit method for creating a
+ quaternion from a 4D vector. The method does not normalize the resulting
+ quaternion.
+* Added `Mat3A` type which uses `Vec3A` columns. It is 16 byte aligned and
+ contains internal padding but it generally faster than `Mat3` for most
+ operations if SIMD is available.
+* Added 3D affine transform types `Affine3A` and `DAffine3`. These are more
+ efficient than using `Mat4` and `DMat4` respectively when working with 3D
+ affine transforms.
+* Added 2D affine transform types `Affine2` and `DAffine2`. These are more
+ efficient than using `Mat3` and `DMat3` respectively when working with 2D
+ affine transforms.
+* Added `Quat::from_affine3()` to create a quaternion from an affine transform
+ rotation.
+* Added explicit `to_array()` method to vector types to better match the matrix
+ methods.
+
+### Changed
+
+* Deprecated `Quat::from_rotation_ypr()`, `Mat3::from_rotation_ypr()` and
+ `Mat4::from_rotation_ypr()` in favor of new `from_euler()` methods.
+* Deprecated `Quat::from_rotation_mat3()` and `Quat::from_rotation_mat4()` in
+ favor of new `from_mat3` and `from_mat4` methods.
+* Deprecated `TransformSRT` and `TransformRT` which are under the
+ `transform-types` feature. These will be moved to a separate experimental
+ crate.
+* Updated `spirv-std` dependency version to `0.4.0-alpha7`.
+
+## [0.14.0] - 2021-04-09
+
+### Breaking changes
+
+* Minimum Supported Version of Rust bumped to 1.45.0 for the `spirv-std`
+ dependency.
+
+### Added
+
+* Added `AXES[]` constants to all vector types. These are arrays containing the
+ unit vector for each axis.
+* Added quaternion `from_scaled_axis` and `to_scaled_axis` methods.
+
+### Changed
+
+* Updated dependency versions of `bytemuck` to `1.5`, `rand` to `0.8`,
+ `rand_xoshiro` to `0.6` and `spirv-std` to `0.4.0-alpha4`.
+
+## [0.13.1] - 2021-03-24
+
+### Added
+
+* Added vector `clamp()` functions.
+* Added matrix column and row accessor methods, `col()` and `row()`.
+* Added SPIR-V module and dependency on `spirv-std` for the SPIR-V target.
+* Added matrix truncation from 4x4 to 3x3 and 3x3 to 2x2 via `From` impls.
+
+### Changed
+
+* Documentation corrections and improvements.
+
+## [0.13.0] - 2021-03-04
+
+### Breaking Changes
+
+* The behavior of the 4x4 matrix method `transform_point3()` was changed to not
+ perform the perspective divide. This is an optimization for use with affine
+ transforms where perspective correction is not required. The
+ `project_point3()` method was added for transforming points by perspective
+ projections.
+* The 3x3 matrix `from_scale()` method was changed to
+ create a affine transform containing a 2-dimensional non-uniform scale to be
+ consistent with the 4x4 matrix version. The
+ `from_diagonal()` method can be used to create a 3x3 scale matrix.
+* The 3x3 matrix methods `transform_point2_as_vec3a`,
+ `transform_vector2_as_vec3a` and `mul_vec3_as_vec3a` were unintentionally
+ `pub` and are no longer publicly accessible.
+
+### Added
+
+* Added `Vec2::X`, `Vec4::W` etc constants as a shorter versions of `unit_x()`
+ and friends.
+* Added `ONE` constants for vectors.
+* Added `IDENTITY` constants for `Mat2`, `Mat3`, `Mat4` and `Quat`.
+* Added `ZERO` constant for vectors and matrices.
+* Added `clamp_length()`, `clamp_length_max()`, and `clamp_length_min` methods
+ for `f32` and `f64` vector types.
+* Added `try_normalize()` and `normalize_or_zero()` for all real vector types.
+* Added `from_diagonal()` methods to all matrix types for creating diagonal
+ matrices from a vector.
+* Added `angle_between()`, `from_rotation_arc()` and
+ `from_rotation_arc_colinear()` to quaternion types.
+* Added quaternion `inverse()` which assumes the quaternion is already
+ normalized and returns the conjugate.
+* Added `from_translation()` and `from_angle()` methods to 3x3 matrix types.
+* Added `project_point3()` method to 4x4 matrix types. This method is for
+ transforming 3D vectors by perspective projection transforms.
+* Added `Eq` and `Hash` impls for integer vector types.
+
+### Changed
+
+* Deprecated `::unit_x/y/z()`, `::zero()`, `::one()`, `::identity()` functions
+ in favor of constants.
+
+## [0.12.0] - 2021-01-15
+
+### Breaking Changes
+
+* `Vec2Mask`, `Vec3Mask` and `Vec4Mask` have been replaced by `BVec2`, `BVec3`,
+ `BVec3A`, `BVec4` and `BVec4A`. These types are used by some vector methods
+ and are not typically referenced directly.
+
+### Added
+
+* Added `f64` primitive type support
+ * vectors: `DVec2`, `DVec3` and `DVec4`
+ * square matrices: `DMat2`, `DMat3` and `DMat4`
+ * a quaternion type: `DQuat`
+* Added `i32` primitive type support
+ * vectors: `IVec2`, `IVec3` and `IVec4`
+* Added `u32` primitive type support
+ * vectors: `UVec2`, `UVec3` and `UVec4`
+* Added `bool` primitive type support
+ * vectors: `BVec2`, `BVec3` and `BVec4`
+
+### Removed
+
+* `build.rs` has been removed.
+
+## [0.11.3] - 2020-12-29
+
+### Changed
+
+* Made `Vec3` `repr(simd)` for `spirv` targets.
+
+### Added
+
+* Added `From<(Vec2, f32)>` for `Vec3` and `From<(Vec3, f32)` for `Vec4`.
+
+## [0.11.2] - 2020-12-04
+
+### Changed
+
+* Compilation fixes for Rust 1.36.0.
+
+## [0.11.1] - 2020-12-03
+
+### Added
+
+* Added support for the [Rust GPU](https://github.com/EmbarkStudios/rust-gpu)
+ SPIR-V target architecture.
+
+## [0.11.0] - 2020-11-26
+
+### Added
+
+* Added `is_finite` method to all types which returns `true` if, and only if,
+ all contained elements are finite.
+* Added `exp` and `powf` methods for all vector types.
+
+### Changed
+
+* The `is_nan` method now returns a `bool` to match the new `is_finite` method
+ and to be consistent with the same methods on the `f32` and `f64` primitive
+ types.
+* Renamed `is_nan` which returns a vector mask to `is_nan_mask`.
+* Don't use the `cfg` definitions added by `build.rs` for defining structs as
+ `rust-analyzer` is not aware of them.
+
+### Removed
+
+* Removed deprecated accessor methods.
+
+## [0.10.2] - 2020-11-17
+
+### Changed
+
+* Deprecated element accessor members `.x()`, `.x_mut()`, `.set_x()`, etc. on
+ vector and quaternion types.
+* Deprecated column accessor members `.x_axis()`, `.x_axis_mut()`,
+ `.set_x_axis()`, etc. on matrix types.
+
+## [0.10.1] - 2020-11-15
+
+### Added
+
+* Added the `Vec2::perp` method which returns a `Vec2` perpendicular to `self`.
+
+### Changed
+
+* `Vec2` and `Vec3` types were changed to use public named fields for `.x`,
+ `.y`, and `.z` instead of accessors.
+* `Quat`, `Vec3A` and `Vec4` implement `Deref` and `DerefMut` for the new `XYZ`
+ and `XYZW` structs to emulate public named field access.
+* `Mat3` and `Mat4` had their axis members made public instead of needing
+ accessors.
+* `Mat2` implements `Deref` and `DerefMut` for the new `XYAxes` struct to
+ emulate public named field access.
+
+### Removed
+
+* Removed deprecated `length_reciprocal` and `sign` methods.
+
+### Fixed
+
+* Adding `glam` as a `no_std` dependency should now work as expected.
+
+## [0.10.0] - 2020-10-31
+
+### Breaking Changes
+
+* Changed the return type of `Vec4::truncate` from `Vec3A` to `Vec3`.
+
+### Added
+
+* Added `From` implementations to truncate to narrower vector types, e.g.
+ `Vec4` to `Vec3A`, `Vec3` and `Vec2` and from `Vec3A` and `Vec3` to `Vec2`.
+* Added swizzles for `Vec4`, `Vec3A`, `Vec3` and `Vec2`. These can be used to
+ reorder elements in the same type and also to create larger or smaller
+ vectors from the given vectors elements.
+* Added `Quat` operators `Add<Quat>`, `Sub<Quat>`, `Mul<f32>` and `Div<f32`.
+ These are used by other crates for interpolation quaternions along splines.
+ Note that these operations will not return unit length quaternions, thus the
+ results must be normalized before performing other `Quat` operations.
+* Added `Mat4::transform_point3a` and `Mat4::transform_vector3a`.
+* Added `AsRef<[f32; 9]>` and `AsMut<[f32; 9]>` trait implementations to `Mat3`.
+* Added optional `bytemuck` support primarily for casting types to `&[u8]`.
+* Added support for compiling with `no_std` by disabling the default `std`
+ feature and adding the `libm` feature.
+* Added `distance` and `distance_squared` methods to `Vec2`, `Vec3`, `Vec3A`
+ and `Vec4`.
+
+## [0.9.5] - 2020-10-10
+
+### Added
+
+* `glam` uses SSE2 for some types which prevents constructor functions can not
+ be made `const fn`. To work around this limitation the following macro
+ functions have been added to support creating `const` values of `glam` types:
+ `const_mat2`, `const_mat3`, `const_mat4`, `const_quat`, `const_vec2`,
+ `const_vec3`, `const_vec3a` and `const_vec4`.
+* Added `is_nan` methods to `Vec2`, `Vec3`, `Vec3A` and `Vec4` which return a
+ mask.
+
+## Changed
+
+* Renamed the vector `reciprocal` and `length_reciprocal` methods to `recip`
+ and `length_recip` to match the Rust standard library naming. The old methods
+ have been deprecated.
+* Renamed the vector `sign` methods to `signum` match the Rust standard library
+ naming. The new methods now check for `NAN`. The old methods have been
+ deprecated.
+* Added SSE2 optimized implementations of `Mat4::determinant` and
+ `Mat4::inverse`.
+
+### Removed
+
+* Removed deprecated function `Mat4::perspective_glu_rh`.
+
+## [0.9.4] - 2020-08-31
+
+### Fixed
+
+* Fixed `Mat4::transform_point3` to account for homogeneous w coordinate.
+ Previously this would have been incorrect when the resulting homogeneous
+ coordinate was not 1.0, e.g. when transforming by a perspective projection.
+* Fixed `Mat3::transform_point2` to account for homogeneous z coordinate.
+
+## [0.9.3] - 2020-08-11
+
+### Added
+
+* Added `Mat4::perspective_rh`.
+
+## [0.9.2] - 2020-07-09
+
+### Added
+
+* Added `Mat3::mul_vec3a` and `Quat::mul_vec3a`.
+
+### Changed
+
+* Changed `Quat::mul_vec3` to accept and return `Vec3` instead of `Vec3A`.
+
+## [0.9.1] - 2020-07-01
+
+### Added
+
+* Added `Mat3 * Vec3A` implementation.
+* Added `Vec3A` benches.
+
+### Changed
+
+* Some documentation improvements around the new `Vec3A` type.
+
+## [0.9.0] - 2020-06-28
+
+### Added
+
+* `Vec3` has been split into scalar `Vec3` and 16 byte aligned `Vec3A` types.
+ Only the `Vec3A` type currently uses SIMD optimizations.
+* `Vec3Mask` has been split into scalar `Vec3Mask` and 16 byte aligned
+ `Vec3AMask` types.
+* Added `mut` column accessors to all matrix types, e.g. `Mat2::x_axis_mut()`.
+* Added `From` trait implementations for `Vec3AMask` and `Vec4Mask` to `__m128`.
+
+### Changed
+
+* The `Mat3` type is using the scalar `Vec3` type for storage.
+* Simplified `Debug` trait output for `Quat`, `Vec4` and `Vec3A`.
+
+## Removed
+
+* Removed the `packed-vec3` feature flag as it is now redundant.
+
+## [0.8.7] - 2020-04-28
+
+### Added
+
+* Added `Quat::slerp` - note that this uses a `sin` approximation.
+* Added `angle_between` method for `Vec2` and `Vec3`.
+* Implemented `Debug`, `Display`, `PartialEq`, `Eq`, `PartialOrd`, `Ord`,
+ `Hash`, and `AsRef` traits for `Vec2Mask`, `Vec3Mask` and `Vec4Mask`.
+* Added conversion functions from `Vec2Mask`, `Vec3Mask` and `Vec4Mask` to an
+ array of `[u32]`.
+* Added `build.rs` to simplify conditional feature compilation.
+
+### Changed
+
+* Increased test coverage.
+
+### Removed
+
+* Removed `cfg-if` dependency.
+
+## [0.8.6] - 2020-02-18
+
+### Added
+
+* Added the `packed-vec3` feature flag to disable using SIMD types for `Vec3`
+ and `Mat3` types. This avoids wasting some space due to 16 byte alignment at
+ the cost of some performance.
+* Added `x_mut`, `y_mut`, `z_mut`, `w_mut` where appropriate to `Vec2`, `Vec3`
+ and `Vec4`.
+* Added implementation of `core::ops::Index` and `core::ops::IndexMut` for
+ `Vec2`, `Vec3` and `Vec4`.
+
+### Changed
+
+* Merged SSE2 and scalar `Vec3` and `Vec4` implementations into single files
+ using the `cfg-if` crate.
+
+## [0.8.5] - 2020-01-02
+
+### Added
+
+* Added projection functions `Mat4::perspective_lh`,
+ `Mat4::perspective_infinite_lh`, `Mat4::perspective_infinite_reverse_lh`,
+ `Mat4::orthgraphic_lh` and `Mat4::orthographic_rh`.
+* Added `round`, `ceil` and `floor` methods to `Vec2`, `Vec3` and `Vec4`.
+
+## [0.8.4] - 2019-12-17
+
+### Added
+
+* Added `Mat4::to_scale_rotation_translation` for extracting scale, rotation and
+ translation from a 4x4 homogeneous transformation matrix.
+* Added `cargo-deny` GitHub Action.
+
+### Changed
+
+* Renamed `Quat::new` to `Quat::from_xyzw`.
+
+## [0.8.3] - 2019-11-27
+
+### Added
+
+* Added `Mat4::orthographic_rh_gl`.
+
+### Changed
+
+* Renamed `Mat4::perspective_glu_rh` to `Mat4::perspective_rh_gl`.
+* SSE2 optimizations for `Mat2::determinant`, `Mat2::inverse`,
+ `Mat2::transpose`, `Mat3::transpose`, `Quat::conjugate`, `Quat::lerp`,
+ `Quat::mul_vec3`, `Quat::mul_quat` and `Quat::from_rotation_ypr`.
+* Disabled optimizations to `Mat4::transform_point3` and
+ `Mat4::transform_vector3` as they are probably incorrect and need
+ investigating.
+* Added missing `#[repr(C)]` to `Mat2`, `Mat3` and `Mat4`.
+* Benchmarks now store output of functions to better estimate the cost of a
+ function call.
+
+### Removed
+
+* Removed deprecated functions `Mat2::new`, `Mat3::new` and `Mat4::new`.
+
+## [0.8.2] - 2019-11-06
+
+### Changed
+
+* `glam_assert!` is no longer enabled by default in debug builds, it can be
+ enabled in any configuration using the `glam-assert` feature or in debug
+ builds only using the `debug-glam-assert` feature.
+
+### Removed
+
+* `glam_assert!`'s checking `lerp` is bounded between 0.0 and 1.0 and that
+ matrix scales are non-zero have been removed.
+
+## [0.8.1] - 2019-11-03
+
+### Added
+
+* Added `Display` trait implementations for `Mat2`, `Mat3` and `Mat4`.
+
+### Changed
+
+* Disabled `glam`'s SSE2 `sin_cos` implementation - it became less precise for
+ large angle values.
+* Reduced the default epsilon used by the `is_normalized!` macro from
+ `std::f32::EPSILON` to `1e-6`.
+
+## [0.8.0] - 2019-10-14
+
+### Removed
+
+* Removed the `approx` crate dependency. Each `glam` type has an `abs_diff_eq`
+ method added which is used by unit tests for approximate floating point
+ comparisons.
+* Removed the `Angle` type. All angles are now `f32` and are expected to
+ be in radians.
+* Removed the deprecated `Vec2b`, `Vec3b` and `Vec4b` types and the `mask`
+ methods on `Vec2Mask`, `Vec3Mask` and `Vec4Mask`.
+
+### Changed
+
+* The `rand` crate dependency has been removed from default features. This was
+ required for benchmarking but a simple random number generator has been added
+ to the benches `support` module instead.
+* The `From` trait implementation converting between 1D and 2D `f32` arrays and
+ matrix types have been removed. It was ambiguous how array data would map to
+ matrix columns so these have been replaced with explicit methods
+ `from_cols_array` and `from_cols_array_2d`.
+* Matrix `new` methods have been renamed to `from_cols` to be consistent with
+ the other methods that create matrices from data.
+* Renamed `Mat4::perspective_glu` to `Mat4::perspective_glu_rh`.
+
+## [0.7.2] - 2019-09-22
+
+### Fixed
+
+* Fixed incorrect projection matrix methods `Mat4::look_at_lh`
+ and `Mat4::look_at_rh`.
+
+### Added
+
+* Added support for building infinite projection matrices, including both
+ standard and reverse depth `Mat4::perspective_infinite_rh` and
+ `Mat4::perspective_infinite_rh`.
+* Added `Vec2Mask::new`, `Vec3Mask::new` and `Vec4Mask::new` methods.
+* Implemented `std::ops` `BitAnd`, `BitAndAssign`, `BitOr`, `BitOrAssign`
+ and `Not` traits for `Vec2Mask`, `Vec3Mask` and `Vec4Mask`.
+* Added method documentation for `Vec4` and `Vec4Mask` types.
+* Added missing `serde` implementations for `Mat2`, `Mat3` and `Mat4`.
+* Updated `rand` and `criterion` versions.
+
+## [0.7.1] - 2019-07-08
+
+### Fixed
+
+* The SSE2 implementation of `Vec4` `dot` was missing a shuffle, meaning the
+ `dot`, `length`, `length_squared`, `length_reciprocal` and `normalize`
+ methods were sometimes incorrect.
+
+### Added
+
+* Added the `glam_assert` macro which behaves like Rust's `debug_assert` but
+ can be enabled separately to `debug_assert`. This is used to perform
+ asserts on correctness.
+* Added `is_normalized` method to `Vec2`, `Vec3` and `Vec4`.
+
+### Changed
+
+* Replaced usage of `std::mem::uninitialized` with `std::mem::MaybeUninit`. This
+ change requires stable Rust 1.36.
+* Renamed `Vec2b` to `Vec2Mask`, `Vec3b` to `Vec3Mask` and `Vec4b` to
+ `Vec4Mask`. Old names are aliased to the new name and deprecated.
+* Deprecate `VecNMask` `mask` method, use new `bitmask` method instead
+* Made fallback version of `VecNMask` types the same size and alignment as the
+ SIMD versions.
+* Added `Default` support to `VecNMask` types, will add more common traits in
+ the future.
+* Added `#[inline]` to `mat2`, `mat3` and `mat4` functions.
+
+## [0.7.0] - 2019-06-28
+
+### Added
+
+* Added `Mat2` into `[f32; 4]`, `Mat3` into `[f32; 9]` and `Mat4` into
+ `[f32; 16]`.
+
+### Removed
+
+* Removed `impl Mul<&Vec2> for Mat2` and `impl Mul<&Vec3> for Vec3` as these
+ don't exist for any other types.
+
+## [0.6.1] - 2019-06-22
+
+### Changed
+
+* `Mat2` now uses a `Vec4` internally which gives it some performance
+ improvements when SSE2 is available.
+
+## 0.6.0 - 2019-06-13
+
+### Changed
+
+* Switched from row vectors to column vectors
+* Vectors are now on the right of multiplications with matrices and quaternions.
+
+[Keep a Changelog]: https://keepachangelog.com/
+[Semantic Versioning]: https://semver.org/spec/v2.0.0.html
+[Unreleased]: https://github.com/bitshifter/glam-rs/compare/0.20.3...HEAD
+[0.20.3]: https://github.com/bitshifter/glam-rs/compare/0.20.2...0.20.3
+[0.20.2]: https://github.com/bitshifter/glam-rs/compare/0.20.1...0.20.2
+[0.20.1]: https://github.com/bitshifter/glam-rs/compare/0.20.0...0.20.1
+[0.20.0]: https://github.com/bitshifter/glam-rs/compare/0.19.0...0.20.0
+[0.19.0]: https://github.com/bitshifter/glam-rs/compare/0.18.0...0.19.0
+[0.18.0]: https://github.com/bitshifter/glam-rs/compare/0.17.3...0.18.0
+[0.17.3]: https://github.com/bitshifter/glam-rs/compare/0.17.2...0.17.3
+[0.17.2]: https://github.com/bitshifter/glam-rs/compare/0.17.1...0.17.2
+[0.17.1]: https://github.com/bitshifter/glam-rs/compare/0.17.0...0.17.1
+[0.17.0]: https://github.com/bitshifter/glam-rs/compare/0.16.0...0.17.0
+[0.16.0]: https://github.com/bitshifter/glam-rs/compare/0.15.2...0.16.0
+[0.15.2]: https://github.com/bitshifter/glam-rs/compare/0.15.1...0.15.2
+[0.15.1]: https://github.com/bitshifter/glam-rs/compare/0.15.0...0.15.1
+[0.15.0]: https://github.com/bitshifter/glam-rs/compare/0.14.0...0.15.0
+[0.14.0]: https://github.com/bitshifter/glam-rs/compare/0.13.1...0.14.0
+[0.13.1]: https://github.com/bitshifter/glam-rs/compare/0.13.0...0.13.1
+[0.13.0]: https://github.com/bitshifter/glam-rs/compare/0.12.0...0.13.0
+[0.12.0]: https://github.com/bitshifter/glam-rs/compare/0.11.3...0.12.0
+[0.11.3]: https://github.com/bitshifter/glam-rs/compare/0.11.2...0.11.3
+[0.11.2]: https://github.com/bitshifter/glam-rs/compare/0.11.1...0.11.2
+[0.11.1]: https://github.com/bitshifter/glam-rs/compare/0.11.0...0.11.1
+[0.11.0]: https://github.com/bitshifter/glam-rs/compare/0.10.2...0.11.0
+[0.10.2]: https://github.com/bitshifter/glam-rs/compare/0.10.1...0.10.2
+[0.10.1]: https://github.com/bitshifter/glam-rs/compare/0.10.0...0.10.1
+[0.10.0]: https://github.com/bitshifter/glam-rs/compare/0.9.5...0.10.0
+[0.9.5]: https://github.com/bitshifter/glam-rs/compare/0.9.4...0.9.5
+[0.9.4]: https://github.com/bitshifter/glam-rs/compare/0.9.3...0.9.4
+[0.9.3]: https://github.com/bitshifter/glam-rs/compare/0.9.2...0.9.3
+[0.9.2]: https://github.com/bitshifter/glam-rs/compare/0.9.1...0.9.2
+[0.9.1]: https://github.com/bitshifter/glam-rs/compare/0.9.0...0.9.1
+[0.9.0]: https://github.com/bitshifter/glam-rs/compare/0.8.7...0.9.0
+[0.8.7]: https://github.com/bitshifter/glam-rs/compare/0.8.6...0.8.7
+[0.8.6]: https://github.com/bitshifter/glam-rs/compare/0.8.5...0.8.6
+[0.8.5]: https://github.com/bitshifter/glam-rs/compare/0.8.4...0.8.5
+[0.8.4]: https://github.com/bitshifter/glam-rs/compare/0.8.3...0.8.4
+[0.8.3]: https://github.com/bitshifter/glam-rs/compare/0.8.2...0.8.3
+[0.8.2]: https://github.com/bitshifter/glam-rs/compare/0.8.1...0.8.2
+[0.8.1]: https://github.com/bitshifter/glam-rs/compare/0.8.0...0.8.1
+[0.8.0]: https://github.com/bitshifter/glam-rs/compare/0.7.2...0.8.0
+[0.7.2]: https://github.com/bitshifter/glam-rs/compare/0.7.1...0.7.2
+[0.7.1]: https://github.com/bitshifter/glam-rs/compare/0.7.0...0.7.1
+[0.7.0]: https://github.com/bitshifter/glam-rs/compare/0.6.1...0.7.0
+[0.6.1]: https://github.com/bitshifter/glam-rs/compare/0.6.0...0.6.1
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000..5e9965f
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,58 @@
+# Contributing to glam
+
+Thanks for contributing to `glam`! These guidelines will try to make the
+process painless and efficient.
+
+The short guide to contributing is [start a discussion] on GitHub. Pull
+requests are welcome for bug fixes, documentation improvements and
+optimizations. For anything else it would be best to discuss it first.
+
+## Questions
+
+If you have a question about the usage of this library please [ask a question]
+with GitHub Discussions. That's the easiest way to get support right now.
+
+## Bugs
+
+If you find a bug please [open an issue] on GitHub or submit a pull request. A
+unit test for any bug that slipped through existing coverage would also be
+greatly appreciated.
+
+## New functions and methods
+
+If `glam` is missing functionality on existing types, [suggest a new feature]
+with GitHub Discussions describing what feature you would like added and
+ideally what your use case is for it just so I have a better understanding of
+the feature. I'd like to keep `glam` reasonably light functionality wise
+initially but commonly used functionality that is missing is very welcome. If
+you do submit a pull request please ensure any new functionality also has a
+test.
+
+## Optimizations
+
+If you feel some functionality could be optimized please [open an issue] on
+GitHub or submit a pull request. Any optimization pull request should include a
+benchmark if there isn't one already, so I can confirm the performance
+improvement.
+
+## Documentation
+
+If you feel any documentation could be added or improved please
+[open a GitHub issue] or submit a pull request.
+
+## Code contributions
+
+Depending on the complexity of the change, it might be worth reading about
+`glam`'s [ARCHITECTURE].
+
+You can run some of `glam`'s test suite locally by running the
+`build_and_test_features.sh` script. It's worth running that before creating a
+PR.
+
+Also run `cargo fmt` and `cargo clippy` on any new code.
+
+[start a discussion]: https://github.com/bitshifter/glam-rs/discussions/new
+[open an issue]: https://GitHub.com/bitshifter/glam-rs/issues/new
+[ask a question]: https://github.com/bitshifter/glam-rs/discussions/new?category=q-a
+[suggest a new feature]: https://github.com/bitshifter/glam-rs/discussions/new?category=ideas
+[ARCHITECTURE]: ARCHITECTURE.md
diff --git a/Cargo.toml b/Cargo.toml
new file mode 100644
index 0000000..8360666
--- /dev/null
+++ b/Cargo.toml
@@ -0,0 +1,135 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+name = "glam"
+version = "0.20.3"
+authors = ["Cameron Hart <cameron.hart@gmail.com>"]
+description = "A simple and fast 3D math library for games and graphics"
+readme = "README.md"
+keywords = ["gamedev", "math", "matrix", "vector", "quaternion"]
+categories = ["game-engines", "no-std"]
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/bitshifter/glam-rs"
+
+[lib]
+bench = false
+
+[[bench]]
+name = "mat2"
+harness = false
+
+[[bench]]
+name = "mat3"
+harness = false
+
+[[bench]]
+name = "mat3a"
+harness = false
+
+[[bench]]
+name = "affine2"
+harness = false
+
+[[bench]]
+name = "affine3"
+harness = false
+
+[[bench]]
+name = "mat4"
+harness = false
+
+[[bench]]
+name = "quat"
+harness = false
+
+[[bench]]
+name = "transform"
+harness = false
+required-features = ["transform-types"]
+
+[[bench]]
+name = "vec2"
+harness = false
+
+[[bench]]
+name = "vec3"
+harness = false
+
+[[bench]]
+name = "vec3a"
+harness = false
+
+[[bench]]
+name = "vec4"
+harness = false
+[dependencies.approx]
+version = "0.5"
+optional = true
+default-features = false
+
+[dependencies.bytecheck]
+version = "0.6"
+optional = true
+default-features = false
+
+[dependencies.bytemuck]
+version = "1.5"
+optional = true
+default-features = false
+
+[dependencies.mint]
+version = "0.5.8"
+optional = true
+default-features = false
+
+[dependencies.num-traits]
+version = "0.2.14"
+optional = true
+default-features = false
+
+[dependencies.rand]
+version = "0.8"
+optional = true
+default-features = false
+
+[dependencies.rkyv]
+version = "0.7"
+optional = true
+
+[dependencies.serde]
+version = "1.0"
+features = ["derive"]
+optional = true
+[dev-dependencies.rand_xoshiro]
+version = "0.6"
+
+[dev-dependencies.serde_json]
+version = "1.0"
+
+[features]
+cuda = []
+debug-glam-assert = []
+default = ["std"]
+fast-math = []
+glam-assert = []
+libm = ["num-traits/libm"]
+scalar-math = []
+std = []
+transform-types = []
+[target."cfg(not(target_arch = \"wasm32\"))".dev-dependencies.criterion]
+version = "0.3"
+features = ["html_reports"]
+[target."cfg(target_arch = \"wasm32\")".dev-dependencies.wasm-bindgen-test]
+version = "0.3.0"
+[badges.maintenance]
+status = "actively-developed"
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
new file mode 100644
index 0000000..2935e65
--- /dev/null
+++ b/Cargo.toml.orig
@@ -0,0 +1,117 @@
+[package]
+name = "glam"
+version = "0.20.3" # remember to update html_root_url
+edition = "2018"
+authors = ["Cameron Hart <cameron.hart@gmail.com>"]
+description = "A simple and fast 3D math library for games and graphics"
+repository = "https://github.com/bitshifter/glam-rs"
+readme = "README.md"
+license = "MIT OR Apache-2.0"
+keywords = ["gamedev", "math", "matrix", "vector", "quaternion"]
+categories = ["game-engines", "no-std"]
+
+[badges]
+maintenance = { status = "actively-developed" }
+
+[features]
+default = ["std"]
+
+# enable support for the standard library
+std = []
+
+# enable additional glam checks if debug assertions are enabled
+debug-glam-assert = []
+# always enable additional glam checks
+glam-assert = []
+
+# this is primarily for testing the fallback implementation
+scalar-math = []
+
+# deprecated and will move to a separate crate
+transform-types = []
+
+# libm is required when building no_std
+libm = ["num-traits/libm"]
+
+# align types to match CUDA requirements
+cuda = []
+
+# Enables platform specific optimizations that might speed-up certain operations.
+# This will cause APIs to output different results depending on the platform used
+# and will likely break cross-platform determinism.
+# This should NOT be enabled by intermediate libraries, deferring the decision to
+# the end binary build instead.
+fast-math = []
+
+[dependencies]
+approx = { version = "0.5", optional = true, default-features = false }
+bytemuck = { version = "1.5", optional = true, default-features = false }
+mint = { version = "0.5.8", optional = true, default-features = false }
+num-traits = { version = "0.2.14", optional = true, default-features = false }
+rand = { version = "0.8", optional = true, default-features = false }
+serde = { version = "1.0", optional = true, features = ["derive"] }
+rkyv = { version = "0.7", optional = true }
+bytecheck = { version = "0.6", optional = true, default-features = false}
+
+[dev-dependencies]
+# rand_xoshiro is required for tests if rand is enabled
+rand_xoshiro = "0.6"
+serde_json = "1.0"
+
+[target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies]
+criterion = { version = "0.3", features = ["html_reports"] }
+
+[target.'cfg(target_arch = "wasm32")'.dev-dependencies]
+wasm-bindgen-test = "0.3.0"
+
+[lib]
+bench = false
+
+[[bench]]
+name = "mat2"
+harness = false
+
+[[bench]]
+name = "mat3"
+harness = false
+
+[[bench]]
+name = "mat3a"
+harness = false
+
+[[bench]]
+name = "affine2"
+harness = false
+
+[[bench]]
+name = "affine3"
+harness = false
+
+[[bench]]
+name = "mat4"
+harness = false
+
+[[bench]]
+name = "quat"
+harness = false
+
+[[bench]]
+name = "transform"
+harness = false
+required-features = ["transform-types"]
+
+[[bench]]
+name = "vec2"
+harness = false
+
+[[bench]]
+name = "vec3"
+harness = false
+
+[[bench]]
+name = "vec3a"
+harness = false
+
+[[bench]]
+name = "vec4"
+harness = false
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..301f1f0
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright 2020 Cameron Hart
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/LICENSE-APACHE b/LICENSE-APACHE
new file mode 100644
index 0000000..301f1f0
--- /dev/null
+++ b/LICENSE-APACHE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright 2020 Cameron Hart
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/LICENSE-MIT b/LICENSE-MIT
new file mode 100644
index 0000000..31aa793
--- /dev/null
+++ b/LICENSE-MIT
@@ -0,0 +1,23 @@
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/METADATA b/METADATA
new file mode 100644
index 0000000..5a00bef
--- /dev/null
+++ b/METADATA
@@ -0,0 +1,20 @@
+name: "glam"
+description: "A simple and fast 3D math library for games and graphics"
+third_party {
+ url {
+ type: HOMEPAGE
+ value: "https://crates.io/crates/glam"
+ }
+ url {
+ type: ARCHIVE
+ value: "https://static.crates.io/crates/glam/glam-0.20.3.crate"
+ }
+ version: "0.20.3"
+ # Dual-licensed, using the least restrictive per go/thirdpartylicenses#same.
+ license_type: NOTICE
+ last_upgrade_date {
+ year: 2022
+ month: 4
+ day: 11
+ }
+}
diff --git a/MODULE_LICENSE_APACHE2 b/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/MODULE_LICENSE_APACHE2
diff --git a/NOTICE b/NOTICE
new file mode 100644
index 0000000..301f1f0
--- /dev/null
+++ b/NOTICE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright 2020 Cameron Hart
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/OWNERS b/OWNERS
new file mode 100644
index 0000000..45dc4dd
--- /dev/null
+++ b/OWNERS
@@ -0,0 +1 @@
+include platform/prebuilts/rust:master:/OWNERS
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..555159d
--- /dev/null
+++ b/README.md
@@ -0,0 +1,253 @@
+# glam
+
+[![Build Status]][github-ci] [![Coverage Status]][coveralls.io]
+[![Latest Version]][crates.io] [![docs]][docs.rs]
+[![Minimum Supported Rust Version]][Rust 1.52]
+
+A simple and fast 3D math library for games and graphics.
+
+## Development status
+
+`glam` is in beta stage. Base functionality has been implemented and the look
+and feel of the API has solidified.
+
+## Features
+
+* `f32` types
+ * vectors: `Vec2`, `Vec3`, `Vec3A` and `Vec4`
+ * square matrices: `Mat2`, `Mat3`, `Mat3A` and `Mat4`
+ * a quaternion type: `Quat`
+ * affine transformation types: `Affine2` and `Affine3A`
+* `f64` types
+ * vectors: `DVec2`, `DVec3` and `DVec4`
+ * square matrices: `DMat2`, `DMat3` and `DMat4`
+ * a quaternion type: `DQuat`
+ * affine transformation types: `DAffine2` and `DAffine3`
+* `i32` types
+ * vectors: `IVec2`, `IVec3` and `IVec4`
+* `u32` types
+ * vectors: `UVec2`, `UVec3` and `UVec4`
+* `bool` types
+ * vectors: `BVec2`, `BVec3` and `BVec4`
+
+### SIMD
+
+The `Vec3A`, `Vec4`, `Quat`, `Mat2`, `Mat3A`, `Mat4`, `Affine2` and `Affine3A`
+types use 128-bit wide SIMD vector types for storage on `x86`, `x86_64` and
+`wasm32` architectures. As a result, these types are all 16 byte aligned and
+depending on the size of the type or the type's members, they may contain
+internal padding. This results in some wasted space in the cases of `Vec3A`,
+`Mat3A`, `Affine2` and `Affine3A`. However, the use of SIMD generally results
+in better performance than scalar math.
+
+`glam` outperforms similar Rust libraries for common operations as tested by the
+[`mathbench`][mathbench] project.
+
+[mathbench]: https://github.com/bitshifter/mathbench-rs
+
+### Enabling SIMD
+
+SIMD is supported on `x86`, `x86_64` and `wasm32` targets.
+
+* `SSE2` is enabled by default on `x86_64` targets.
+* To enable `SSE2` on `x86` targets add `-C target-feature=+sse2` to
+ `RUSTCFLAGS`.
+* To enable `simd128` on `wasm32` targets add `-C target-feature=+simd128` to
+ `RUSTFLAGS`.
+
+Note that SIMD on `wasm32` passes tests but has not been benchmarked,
+performance may or may not be better than scalar math.
+
+### `no_std` support
+
+`no_std` support can be enabled by compiling with `--no-default-features` to
+disable `std` support and `--features libm` for math functions that are only
+defined in `std`. For example:
+
+```toml
+[dependencies]
+glam = { version = "0.20.3", default-features = false, features = ["libm"] }
+```
+
+To support both `std` and `no_std` builds in project, you can use the following
+in your `Cargo.toml`:
+
+```toml
+[features]
+default = ["std"]
+
+std = ["glam/std"]
+libm = ["glam/libm"]
+
+[dependencies]
+glam = { version = "0.20.3", default-features = false }
+```
+
+### Optional features
+
+* [`approx`] - traits and macros for approximate float comparisons
+* [`bytemuck`] - for casting into slices of bytes
+* [`libm`] - required to compile with `no_std`
+* [`mint`] - for interoperating with other 3D math libraries
+* [`num-traits`] - required to compile `no_std`, will be included when enabling
+ the `libm` feature
+* [`rand`] - implementations of `Distribution` trait for all `glam` types.
+* [`serde`] - implementations of `Serialize` and `Deserialize` for all `glam`
+ types. Note that serialization should work between builds of `glam` with and
+ without SIMD enabled
+* [`rkyv`] - implementations of `Archive`, `Serialize` and `Deserialize` for all
+ `glam` types. Note that serialization is not interoperable with and without the
+ `scalar-math` feature. It should work between all other builds of `glam`.
+ Endian conversion is currently not supported
+* [`bytecheck`] - to perform archive validation when using the `rkyv` feature
+* [`fast-math`] - By default, glam attempts to provide bit-for-bit identical
+ results on all platforms. Using this feature will enable platform specific
+ optimizations that may not be identical to other platforms. **Intermediate
+ libraries should not use this feature and defer the decision to the final
+ binary build**.
+
+[`approx`]: https://docs.rs/approx
+[`bytemuck`]: https://docs.rs/bytemuck
+[`libm`]: https://github.com/rust-lang/libm
+[`mint`]: https://github.com/kvark/mint
+[`num-traits`]: https://github.com/rust-num/num-traits
+[`rand`]: https://github.com/rust-random/rand
+[`serde`]: https://serde.rs
+[`rkyv`]: https://github.com/rkyv/rkyv
+[`bytecheck`]: https://github.com/rkyv/bytecheck
+
+### Feature gates
+
+* `scalar-math` - compiles with SIMD support disabled
+* `debug-glam-assert` - adds assertions in debug builds which check the validity
+ of parameters passed to `glam` to help catch runtime errors
+* `glam-assert` - adds validation assertions to all builds
+* `cuda` - forces `glam` types to match expected [cuda alignment]
+
+[cuda alignment]: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#built-in-vector-types
+
+### Minimum Supported Rust Version (MSRV)
+
+The minimum supported version of Rust for `glam` is `1.52.1`.
+
+`wasm32` SIMD intrinsics require Rust `1.54.0`.
+
+## Conventions
+
+### Column vectors
+
+`glam` interprets vectors as column matrices (also known as "column vectors")
+meaning when transforming a vector with a matrix the matrix goes on the left,
+e.g. `v' = Mv`. DirectX uses row vectors, OpenGL uses column vectors. There
+are pros and cons to both.
+
+### Column-major order
+
+Matrices are stored in column major format. Each column vector is stored in
+contiguous memory.
+
+### Co-ordinate system
+
+`glam` is co-ordinate system agnostic and intends to support both right-handed
+and left-handed conventions.
+
+## Design Philosophy
+
+The design of this library is guided by a desire for simplicity and good
+performance.
+
+* No generics and minimal traits in the public API for simplicity of usage
+* All dependencies are optional (e.g. `mint`, `rand` and `serde`)
+* Follows the [Rust API Guidelines] where possible
+* Aiming for 100% test [coverage]
+* Common functionality is benchmarked using [Criterion.rs]
+
+[Rust API Guidelines]: https://rust-lang-nursery.github.io/api-guidelines/
+[coverage]: coveralls.io
+[Criterion.rs]: https://bheisler.github.io/criterion.rs/book/index.html
+
+## Architecture
+
+See [ARCHITECTURE.md] for details on `glam`'s internals.
+
+[ARCHITECTURE.md]: ARCHITECTURE.md
+
+## Inspirations
+
+There were many inspirations for the interface and internals of glam from the
+Rust and C++ worlds. In particular:
+
+* [How to write a maths library in 2016] inspired the initial `Vec3A`
+ implementation
+* [Realtime Math] - header only C++11 with SSE and NEON SIMD intrinsic support
+* [DirectXMath] - header only SIMD C++ linear algebra library for use in games
+ and graphics apps
+* `glam` is a play on the name of the popular C++ library [GLM]
+
+[How to write a maths library in 2016]: http://www.codersnotes.com/notes/maths-lib-2016/
+[Realtime Math]: https://github.com/nfrechette/rtm
+[DirectXMath]: https://docs.microsoft.com/en-us/windows/desktop/dxmath/directxmath-portal
+[GLM]: https://glm.g-truc.net
+
+## License
+
+Licensed under either of
+
+* Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE)
+ or http://www.apache.org/licenses/LICENSE-2.0)
+* MIT license ([LICENSE-MIT](LICENSE-MIT)
+ or http://opensource.org/licenses/MIT)
+
+at your option.
+
+## Contribution
+
+Contributions in any form (issues, pull requests, etc.) to this project must
+adhere to Rust's [Code of Conduct].
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
+dual licensed as above, without any additional terms or conditions.
+
+Thank you to all of the `glam` [contributors]!
+
+[Code of Conduct]: https://www.rust-lang.org/en-US/conduct.html
+[contributors]: https://github.com/bitshifter/glam-rs/graphs/contributors
+
+## Support
+
+If you are interested in contributing or have a request or suggestion
+[start a discussion] on GitHub. See [CONTRIBUTING.md] for more information for
+contributors.
+
+The [Game Development in Rust Discord] and [Bevy Engine Discord] servers are
+not official support channels but can be good places to ask for help with
+`glam`.
+
+[start a discussion]: https://github.com/bitshifter/glam-rs/discussions
+[CONTRIBUTING.md]: CONTRIBUTING.md
+[Game Development in Rust Discord]: https://discord.gg/yNtPTb2
+[Bevy Engine Discord]: https://discord.gg/gMUk5Ph
+
+## Attribution
+
+`glam` contains code ported from the following C++ libraries:
+
+* [DirectXMath] - MIT License - Copyright (c) 2011-2020 Microsoft Corp
+* [Realtime Math] - MIT License - Copyright (c) 2018 Nicholas Frechette
+* [GLM] - MIT License - Copyright (c) 2005 - G-Truc Creation
+
+See [ATTRIBUTION.md] for details.
+
+[ATTRIBUTION.md]: ATTRIBUTION.md
+
+[Build Status]: https://github.com/bitshifter/glam-rs/actions/workflows/ci.yml/badge.svg
+[github-ci]: https://github.com/bitshifter/glam-rs/actions/workflows/ci.yml
+[Coverage Status]: https://coveralls.io/repos/github/bitshifter/glam-rs/badge.svg?branch=main
+[coveralls.io]: https://coveralls.io/github/bitshifter/glam-rs?branch=main
+[Latest Version]: https://img.shields.io/crates/v/glam.svg
+[crates.io]: https://crates.io/crates/glam/
+[docs]: https://docs.rs/glam/badge.svg
+[docs.rs]: https://docs.rs/glam/
+[Minimum Supported Rust Version]: https://img.shields.io/badge/Rust-1.52.1-blue?color=fc8d62&logo=rust
+[Rust 1.52]: https://github.com/rust-lang/rust/blob/master/RELEASES.md#version-1521-2021-05-10
diff --git a/benches/affine2.rs b/benches/affine2.rs
new file mode 100644
index 0000000..9ea42a9
--- /dev/null
+++ b/benches/affine2.rs
@@ -0,0 +1,79 @@
+#[path = "support/macros.rs"]
+#[macro_use]
+mod macros;
+mod support;
+
+use criterion::{criterion_group, criterion_main, Criterion};
+use glam::Affine2;
+use std::ops::Mul;
+use support::*;
+
+pub fn random_srt_affine2(rng: &mut PCG32) -> Affine2 {
+ Affine2::from_scale_angle_translation(
+ random_nonzero_vec2(rng),
+ random_radians(rng),
+ random_vec2(rng),
+ )
+}
+
+bench_unop!(affine2_inverse, "affine2 inverse", op => inverse, from => random_srt_affine2);
+bench_binop!(
+ affine2_transform_point2,
+ "affine2 transform point2",
+ op => transform_point2,
+ from1 => random_srt_affine2,
+ from2 => random_vec2
+);
+
+bench_binop!(
+ affine2_transform_vector2,
+ "affine2 transform vector2",
+ op => transform_vector2,
+ from1 => random_srt_affine2,
+ from2 => random_vec2
+);
+bench_binop!(affine2_mul_affine2, "affine2 mul affine2", op => mul, from => random_srt_affine2);
+bench_binop!(affine2_mul_mat3, "affine2 mul mat3", op => mul, from1 => random_srt_affine2, from2 => random_srt_mat3);
+bench_binop!(mat3_mul_affine2, "mat3 mul affine2", op => mul, from1 => random_srt_mat3, from2 => random_srt_affine2);
+
+pub fn affine2_from_srt(c: &mut Criterion) {
+ use glam::Vec2;
+ const SIZE: usize = 1 << 13;
+ let mut rng = support::PCG32::default();
+ let inputs = criterion::black_box(
+ (0..SIZE)
+ .map(|_| {
+ (
+ random_nonzero_vec2(&mut rng),
+ random_radians(&mut rng),
+ random_vec2(&mut rng),
+ )
+ })
+ .collect::<Vec<(Vec2, f32, Vec2)>>(),
+ );
+ let mut outputs = vec![Affine2::default(); SIZE];
+ let mut i = 0;
+ c.bench_function("affine2 from srt", |b| {
+ b.iter(|| {
+ i = (i + 1) & (SIZE - 1);
+ unsafe {
+ let data = inputs.get_unchecked(i);
+ *outputs.get_unchecked_mut(i) =
+ Affine2::from_scale_angle_translation(data.0, data.1, data.2)
+ }
+ })
+ });
+}
+
+criterion_group!(
+ benches,
+ affine2_inverse,
+ affine2_transform_point2,
+ affine2_transform_vector2,
+ affine2_mul_affine2,
+ affine2_mul_mat3,
+ mat3_mul_affine2,
+ affine2_from_srt,
+);
+
+criterion_main!(benches);
diff --git a/benches/affine3.rs b/benches/affine3.rs
new file mode 100644
index 0000000..1900684
--- /dev/null
+++ b/benches/affine3.rs
@@ -0,0 +1,117 @@
+#[path = "support/macros.rs"]
+#[macro_use]
+mod macros;
+mod support;
+
+use criterion::{criterion_group, criterion_main, Criterion};
+use glam::Affine3A;
+use std::ops::Mul;
+use support::*;
+
+pub fn random_srt_affine3a(rng: &mut PCG32) -> Affine3A {
+ Affine3A::from_scale_rotation_translation(
+ random_nonzero_vec3(rng),
+ random_quat(rng),
+ random_vec3(rng),
+ )
+}
+
+bench_unop!(affine3a_inverse, "affine3a inverse", op => inverse, from => random_srt_affine3a);
+
+bench_binop!(
+ affine3a_transform_point3,
+ "affine3a transform point3",
+ op => transform_point3,
+ from1 => random_srt_affine3a,
+ from2 => random_vec3
+);
+
+bench_binop!(
+ affine3a_transform_vector3,
+ "affine3a transform vector3",
+ op => transform_vector3,
+ from1 => random_srt_affine3a,
+ from2 => random_vec3
+);
+
+bench_binop!(
+ affine3a_transform_point3a,
+ "affine3a transform point3a",
+ op => transform_point3a,
+ from1 => random_srt_affine3a,
+ from2 => random_vec3a
+);
+
+bench_binop!(
+ affine3a_transform_vector3a,
+ "affine3a transform vector3a",
+ op => transform_vector3a,
+ from1 => random_srt_affine3a,
+ from2 => random_vec3a
+);
+
+bench_binop!(
+ affine3a_mul_affine3a,
+ "affine3a mul affine3a",
+ op => mul,
+ from => random_srt_affine3a
+);
+
+bench_binop!(affine3a_mul_mat4,
+ "affine3a mul mat4",
+ op => mul,
+ from1 => random_srt_affine3a,
+ from2 => random_srt_mat4
+);
+
+bench_binop!(
+ mat4_mul_affine3a,
+ "mat4 mul affine3a",
+ op => mul,
+ from1 => random_srt_mat4,
+ from2 => random_srt_affine3a
+);
+
+pub fn affine3a_from_srt(c: &mut Criterion) {
+ use glam::{Quat, Vec3};
+ const SIZE: usize = 1 << 13;
+ let mut rng = support::PCG32::default();
+ let inputs = criterion::black_box(
+ (0..SIZE)
+ .map(|_| {
+ (
+ random_nonzero_vec3(&mut rng),
+ random_quat(&mut rng),
+ random_vec3(&mut rng),
+ )
+ })
+ .collect::<Vec<(Vec3, Quat, Vec3)>>(),
+ );
+ let mut outputs = vec![Affine3A::IDENTITY; SIZE];
+ let mut i = 0;
+ c.bench_function("affine3a from srt", |b| {
+ b.iter(|| {
+ i = (i + 1) & (SIZE - 1);
+ unsafe {
+ let data = inputs.get_unchecked(i);
+ *outputs.get_unchecked_mut(i) =
+ Affine3A::from_scale_rotation_translation(data.0, data.1, data.2)
+ }
+ })
+ });
+}
+
+criterion_group!(
+ benches,
+ affine3a_from_srt,
+ affine3a_inverse,
+ affine3a_mul_affine3a,
+ affine3a_mul_mat4,
+ affine3a_transform_point3,
+ affine3a_transform_point3a,
+ affine3a_transform_vector3,
+ affine3a_transform_vector3a,
+ mat4_mul_affine3a,
+);
+
+criterion_main!(benches);
diff --git a/benches/mat2.rs b/benches/mat2.rs
new file mode 100644
index 0000000..a66ab24
--- /dev/null
+++ b/benches/mat2.rs
@@ -0,0 +1,42 @@
+#[path = "support/macros.rs"]
+#[macro_use]
+mod macros;
+mod support;
+
+use criterion::{criterion_group, criterion_main, Criterion};
+use std::ops::Mul;
+use support::*;
+
+bench_binop!(
+ mat2_mul_vec2,
+ "mat2 mul vec2",
+ op => mul,
+ from1 => random_mat2,
+ from2 => random_vec2
+);
+
+bench_unop!(
+ mat2_transpose,
+ "mat2 transpose",
+ op => transpose,
+ from => random_mat2
+);
+bench_unop!(
+ mat2_determinant,
+ "mat2 determinant",
+ op => determinant,
+ from => random_mat2
+);
+bench_unop!(mat2_inverse, "mat2 inverse", op => inverse, from => random_mat2);
+bench_binop!(mat2_mul_mat2, "mat2 mul mat2", op => mul, from => random_mat2);
+
+criterion_group!(
+ benches,
+ mat2_transpose,
+ mat2_determinant,
+ mat2_inverse,
+ mat2_mul_vec2,
+ mat2_mul_mat2,
+);
+
+criterion_main!(benches);
diff --git a/benches/mat3.rs b/benches/mat3.rs
new file mode 100644
index 0000000..d0faad2
--- /dev/null
+++ b/benches/mat3.rs
@@ -0,0 +1,72 @@
+#[path = "support/macros.rs"]
+#[macro_use]
+mod macros;
+mod support;
+
+use criterion::{criterion_group, criterion_main, Criterion};
+use glam::Mat3;
+use std::ops::Mul;
+use support::*;
+
+bench_unop!(
+ mat3_transpose,
+ "mat3 transpose",
+ op => transpose,
+ from => random_mat3
+);
+bench_unop!(
+ mat3_determinant,
+ "mat3 determinant",
+ op => determinant,
+ from => random_mat3
+);
+bench_unop!(mat3_inverse, "mat3 inverse", op => inverse, from => random_mat3);
+bench_binop!(mat3_mul_mat3, "mat3 mul mat3", op => mul, from => random_mat3);
+bench_from_ypr!(mat3_from_ypr, "mat3 from ypr", ty => Mat3);
+
+bench_binop!(
+ mat3_mul_vec3,
+ "mat3 mul vec3",
+ op => mul,
+ from1 => random_mat3,
+ from2 => random_vec3
+);
+
+bench_binop!(
+ mat3_mul_vec3a,
+ "mat3 mul vec3a",
+ op => mul,
+ from1 => random_mat3,
+ from2 => random_vec3a
+);
+
+bench_binop!(
+ mat3_transform_point2,
+ "mat3 transform point2",
+ op => transform_point2,
+ from1 => random_srt_mat3,
+ from2 => random_vec2
+);
+
+bench_binop!(
+ mat3_transform_vector2,
+ "mat3 transform vector2",
+ op => transform_vector2,
+ from1 => random_srt_mat3,
+ from2 => random_vec2
+);
+
+criterion_group!(
+ benches,
+ mat3_transpose,
+ mat3_determinant,
+ mat3_inverse,
+ mat3_mul_vec3,
+ mat3_mul_vec3a,
+ mat3_mul_mat3,
+ mat3_from_ypr,
+ mat3_transform_vector2,
+ mat3_transform_point2,
+);
+
+criterion_main!(benches);
diff --git a/benches/mat3a.rs b/benches/mat3a.rs
new file mode 100644
index 0000000..6e0bb7b
--- /dev/null
+++ b/benches/mat3a.rs
@@ -0,0 +1,72 @@
+#[path = "support/macros.rs"]
+#[macro_use]
+mod macros;
+mod support;
+
+use criterion::{criterion_group, criterion_main, Criterion};
+use glam::Mat3A;
+use std::ops::Mul;
+use support::*;
+
+bench_unop!(
+ mat3a_transpose,
+ "mat3a transpose",
+ op => transpose,
+ from => random_mat3a
+);
+bench_unop!(
+ mat3a_determinant,
+ "mat3a determinant",
+ op => determinant,
+ from => random_mat3a
+);
+bench_unop!(mat3a_inverse, "mat3a inverse", op => inverse, from => random_mat3a);
+bench_binop!(mat3a_mul_mat3a, "mat3a mul mat3a", op => mul, from => random_mat3a);
+bench_from_ypr!(mat3a_from_ypr, "mat3a from ypr", ty => Mat3A);
+
+bench_binop!(
+ mat3a_mul_vec3,
+ "mat3a mul vec3",
+ op => mul,
+ from1 => random_mat3a,
+ from2 => random_vec3
+);
+
+bench_binop!(
+ mat3a_mul_vec3a,
+ "mat3a mul vec3a",
+ op => mul,
+ from1 => random_mat3a,
+ from2 => random_vec3a
+);
+
+bench_binop!(
+ mat3a_transform_point2,
+ "mat3a transform point2",
+ op => transform_point2,
+ from1 => random_srt_mat3a,
+ from2 => random_vec2
+);
+
+bench_binop!(
+ mat3a_transform_vector2,
+ "mat3a transform vector2",
+ op => transform_vector2,
+ from1 => random_srt_mat3a,
+ from2 => random_vec2
+);
+
+criterion_group!(
+ benches,
+ mat3a_transpose,
+ mat3a_determinant,
+ mat3a_inverse,
+ mat3a_mul_vec3,
+ mat3a_mul_vec3a,
+ mat3a_mul_mat3a,
+ mat3a_from_ypr,
+ mat3a_transform_vector2,
+ mat3a_transform_point2,
+);
+
+criterion_main!(benches);
diff --git a/benches/mat4.rs b/benches/mat4.rs
new file mode 100644
index 0000000..f5eb5e3
--- /dev/null
+++ b/benches/mat4.rs
@@ -0,0 +1,129 @@
+#[path = "support/macros.rs"]
+#[macro_use]
+mod macros;
+mod support;
+
+use criterion::{criterion_group, criterion_main, Criterion};
+use glam::Mat4;
+use std::ops::Mul;
+use support::*;
+
+bench_unop!(
+ mat4_transpose,
+ "mat4 transpose",
+ op => transpose,
+ from => random_srt_mat4
+);
+
+bench_unop!(
+ mat4_determinant,
+ "mat4 determinant",
+ op => determinant,
+ from => random_srt_mat4
+);
+
+bench_unop!(
+ mat4_inverse,
+ "mat4 inverse",
+ op => inverse,
+ from => random_srt_mat4
+);
+
+bench_binop!(
+ mat4_mul_vec4,
+ "mat4 mul vec4",
+ op => mul,
+ from1 => random_srt_mat4,
+ from2 => random_vec4
+);
+
+bench_binop!(
+ mat4_transform_point3,
+ "mat4 transform point3",
+ op => transform_point3,
+ from1 => random_srt_mat4,
+ from2 => random_vec3
+);
+
+bench_binop!(
+ mat4_transform_vector3,
+ "mat4 transform vector3",
+ op => transform_vector3,
+ from1 => random_srt_mat4,
+ from2 => random_vec3
+);
+
+bench_binop!(
+ mat4_transform_point3a,
+ "mat4 transform point3a",
+ op => transform_point3a,
+ from1 => random_srt_mat4,
+ from2 => random_vec3a
+);
+
+bench_binop!(
+ mat4_transform_vector3a,
+ "mat4 transform vector3a",
+ op => transform_vector3a,
+ from1 => random_srt_mat4,
+ from2 => random_vec3a
+);
+
+bench_binop!(
+ mat4_mul_mat4,
+ "mat4 mul mat4",
+ op => mul,
+ from => random_srt_mat4
+);
+
+bench_from_ypr!(
+ mat4_from_ypr,
+ "mat4 from ypr",
+ ty => Mat4
+);
+
+pub fn mat4_from_srt(c: &mut Criterion) {
+ use glam::{Quat, Vec3};
+ const SIZE: usize = 1 << 13;
+ let mut rng = support::PCG32::default();
+ let inputs = criterion::black_box(
+ (0..SIZE)
+ .map(|_| {
+ (
+ random_nonzero_vec3(&mut rng),
+ random_quat(&mut rng),
+ random_vec3(&mut rng),
+ )
+ })
+ .collect::<Vec<(Vec3, Quat, Vec3)>>(),
+ );
+ let mut outputs = vec![Mat4::default(); SIZE];
+ let mut i = 0;
+ c.bench_function("mat4 from srt", |b| {
+ b.iter(|| {
+ i = (i + 1) & (SIZE - 1);
+ unsafe {
+ let data = inputs.get_unchecked(i);
+ *outputs.get_unchecked_mut(i) =
+ Mat4::from_scale_rotation_translation(data.0, data.1, data.2)
+ }
+ })
+ });
+}
+
+criterion_group!(
+ benches,
+ mat4_determinant,
+ mat4_from_srt,
+ mat4_from_ypr,
+ mat4_inverse,
+ mat4_mul_mat4,
+ mat4_mul_vec4,
+ mat4_transform_point3,
+ mat4_transform_point3a,
+ mat4_transform_vector3,
+ mat4_transform_vector3a,
+ mat4_transpose,
+);
+
+criterion_main!(benches);
diff --git a/benches/quat.rs b/benches/quat.rs
new file mode 100644
index 0000000..507c1c3
--- /dev/null
+++ b/benches/quat.rs
@@ -0,0 +1,80 @@
+#[path = "support/macros.rs"]
+#[macro_use]
+mod macros;
+mod support;
+
+use criterion::{criterion_group, criterion_main, Criterion};
+use glam::Quat;
+use std::ops::Mul;
+use support::*;
+
+bench_unop!(
+ quat_conjugate,
+ "quat conjugate",
+ op => conjugate,
+ from => random_quat
+);
+
+bench_binop!(
+ quat_mul_vec3,
+ "quat mul vec3",
+ op => mul,
+ from1 => random_quat,
+ from2 => random_vec3
+);
+
+bench_binop!(
+ quat_mul_vec3a,
+ "quat mul vec3a",
+ op => mul,
+ from1 => random_quat,
+ from2 => random_vec3a
+);
+
+bench_binop!(
+ quat_mul_quat,
+ "quat mul quat",
+ op => mul,
+ from => random_quat
+);
+
+bench_binop!(
+ quat_dot,
+ "quat dot",
+ op => dot,
+ from => random_quat
+);
+
+bench_trinop!(
+ quat_lerp,
+ "quat lerp",
+ op => lerp,
+ from1 => random_quat,
+ from2 => random_quat,
+ from3 => random_f32
+);
+
+bench_trinop!(
+ quat_slerp,
+ "quat slerp",
+ op => slerp,
+ from1 => random_quat,
+ from2 => random_quat,
+ from3 => random_f32
+);
+
+bench_from_ypr!(quat_from_ypr, "quat from ypr", ty => Quat);
+
+criterion_group!(
+ benches,
+ quat_conjugate,
+ quat_dot,
+ quat_lerp,
+ quat_slerp,
+ quat_mul_quat,
+ quat_mul_vec3,
+ quat_mul_vec3a,
+ quat_from_ypr
+);
+
+criterion_main!(benches);
diff --git a/benches/support/macros.rs b/benches/support/macros.rs
new file mode 100644
index 0000000..0d64c5e
--- /dev/null
+++ b/benches/support/macros.rs
@@ -0,0 +1,210 @@
+#[macro_export]
+macro_rules! bench_func {
+ ($name: ident, $desc: expr, op => $func: ident, from => $from: expr) => {
+ pub(crate) fn $name(c: &mut Criterion) {
+ const SIZE: usize = 1 << 13;
+ let mut rng = support::PCG32::default();
+ let inputs =
+ criterion::black_box((0..SIZE).map(|_| $from(&mut rng)).collect::<Vec<_>>());
+ // pre-fill output vector with some random value
+ let mut outputs = vec![$func($from(&mut rng)); SIZE];
+ let mut i = 0;
+ c.bench_function($desc, |b| {
+ b.iter(|| {
+ i = (i + 1) & (SIZE - 1);
+ unsafe {
+ *outputs.get_unchecked_mut(i) = $func(*inputs.get_unchecked(i));
+ }
+ })
+ });
+ criterion::black_box(outputs);
+ }
+ };
+}
+
+#[macro_export]
+macro_rules! bench_unop {
+ ($name: ident, $desc: expr, op => $unop: ident, from => $from: expr) => {
+ pub(crate) fn $name(c: &mut Criterion) {
+ const SIZE: usize = 1 << 13;
+ let mut rng = support::PCG32::default();
+ let inputs =
+ criterion::black_box((0..SIZE).map(|_| $from(&mut rng)).collect::<Vec<_>>());
+ // pre-fill output vector with some random value
+ let mut outputs = vec![$from(&mut rng).$unop(); SIZE];
+ let mut i = 0;
+ c.bench_function($desc, |b| {
+ b.iter(|| {
+ i = (i + 1) & (SIZE - 1);
+ unsafe {
+ *outputs.get_unchecked_mut(i) = inputs.get_unchecked(i).$unop();
+ }
+ })
+ });
+ criterion::black_box(outputs);
+ }
+ };
+}
+
+#[macro_export]
+macro_rules! bench_binop {
+ ($name: ident, $desc: expr, op => $binop: ident, from1 => $from1:expr, from2 => $from2:expr) => {
+ pub(crate) fn $name(c: &mut Criterion) {
+ const SIZE: usize = 1 << 13;
+ let mut rng = support::PCG32::default();
+ let inputs1 =
+ criterion::black_box((0..SIZE).map(|_| $from1(&mut rng)).collect::<Vec<_>>());
+ let inputs2 =
+ criterion::black_box((0..SIZE).map(|_| $from2(&mut rng)).collect::<Vec<_>>());
+ // pre-fill output vector with some random value
+ let mut outputs = vec![$from1(&mut rng).$binop($from2(&mut rng)); SIZE];
+ let mut i = 0;
+ c.bench_function($desc, |b| {
+ b.iter(|| {
+ i = (i + 1) & (SIZE - 1);
+ unsafe {
+ *outputs.get_unchecked_mut(i) = inputs1.get_unchecked(i).$binop(*inputs2.get_unchecked(i));
+ }
+ })
+ });
+ criterion::black_box(outputs);
+ }
+ };
+ ($name: ident, $desc: expr, op => $binop: ident, from => $from: expr) => {
+ bench_binop!($name, $desc, op => $binop, from1 => $from, from2 => $from);
+ };
+}
+
+#[macro_export]
+macro_rules! bench_trinop {
+ ($name: ident, $desc: expr, op => $trinop: ident, from1 => $from1:expr, from2 => $from2:expr, from3 => $from3:expr) => {
+ pub(crate) fn $name(c: &mut Criterion) {
+ const SIZE: usize = 1 << 13;
+ let mut rng = support::PCG32::default();
+ let inputs1 =
+ criterion::black_box((0..SIZE).map(|_| $from1(&mut rng)).collect::<Vec<_>>());
+ let inputs2 =
+ criterion::black_box((0..SIZE).map(|_| $from2(&mut rng)).collect::<Vec<_>>());
+ let inputs3 =
+ criterion::black_box((0..SIZE).map(|_| $from3(&mut rng)).collect::<Vec<_>>());
+ // pre-fill output vector with some random value
+ let mut outputs =
+ vec![$from1(&mut rng).$trinop($from2(&mut rng), $from3(&mut rng)); SIZE];
+ let mut i = 0;
+ c.bench_function($desc, |b| {
+ b.iter(|| {
+ i = (i + 1) & (SIZE - 1);
+ unsafe {
+ *outputs.get_unchecked_mut(i) = inputs1
+ .get_unchecked(i)
+ .$trinop(*inputs2.get_unchecked(i), *inputs3.get_unchecked(i));
+ }
+ })
+ });
+ criterion::black_box(outputs);
+ }
+ };
+}
+
+#[macro_export]
+macro_rules! bench_select {
+ ($name:ident, $desc:expr, ty => $ty: ident, op => $op: ident, from => $from:expr) => {
+ pub(crate) fn $name(c: &mut Criterion) {
+ const SIZE: usize = 1 << 13;
+ let mut rng = support::PCG32::default();
+ let inputs1 =
+ criterion::black_box((0..SIZE).map(|_| $from(&mut rng)).collect::<Vec<_>>());
+ let inputs2 =
+ criterion::black_box((0..SIZE).map(|_| $from(&mut rng)).collect::<Vec<_>>());
+ let masks = vec![$from(&mut rng).$op($from(&mut rng)); SIZE];
+ // pre-fill output vector with some random value
+ let mut outputs = vec![$from(&mut rng); SIZE];
+ let mut i = 0;
+ c.bench_function($desc, |b| {
+ b.iter(|| {
+ i = (i + 1) & (SIZE - 1);
+ unsafe {
+ *outputs.get_unchecked_mut(i) = $ty::select(
+ *masks.get_unchecked(i),
+ *inputs1.get_unchecked(i),
+ *inputs2.get_unchecked(i),
+ );
+ }
+ })
+ });
+ criterion::black_box(outputs);
+ }
+ };
+}
+#[macro_export]
+macro_rules! bench_from_ypr {
+ ($name: ident, $desc: expr, ty => $ty:ty) => {
+ pub(crate) fn $name(c: &mut Criterion) {
+ const SIZE: usize = 1 << 13;
+ let mut rng = support::PCG32::default();
+ let inputs = criterion::black_box(
+ (0..SIZE)
+ .map(|_| {
+ (
+ random_radians(&mut rng),
+ random_radians(&mut rng),
+ random_radians(&mut rng),
+ )
+ })
+ .collect::<Vec<_>>(),
+ );
+ let mut outputs = vec![<$ty>::default(); SIZE];
+ let mut i = 0;
+ c.bench_function($desc, |b| {
+ b.iter(|| {
+ i = (i + 1) & (SIZE - 1);
+ unsafe {
+ let data = inputs.get_unchecked(i);
+ *outputs.get_unchecked_mut(i) =
+ <$ty>::from_euler(glam::EulerRot::YXZ, data.0, data.1, data.2)
+ }
+ })
+ });
+ }
+ };
+}
+
+#[macro_export]
+macro_rules! euler {
+ ($name: ident, $desc: expr, ty => $t: ty, storage => $storage: ty, zero => $zero: expr, rand => $rand: ident) => {
+ pub(crate) fn $name(c: &mut Criterion) {
+ const UPDATE_RATE: f32 = 1.0 / 60.0;
+ const NUM_OBJECTS: usize = 10000;
+
+ struct TestData {
+ acc: Vec<$storage>,
+ vel: Vec<$storage>,
+ pos: Vec<$storage>,
+ }
+
+ let mut rng = support::PCG32::default();
+ let mut data = TestData {
+ acc: vec![$rand(&mut rng); NUM_OBJECTS],
+ vel: vec![$zero; NUM_OBJECTS],
+ pos: vec![$zero; NUM_OBJECTS],
+ };
+ let dt = <$t>::splat(UPDATE_RATE);
+
+ c.bench_function($desc, |b| {
+ b.iter(|| {
+ for ((position, acceleration), velocity) in
+ data.pos.iter_mut().zip(&data.acc).zip(&mut data.vel)
+ {
+ let local_acc: $t = (*acceleration).into();
+ let mut local_pos: $t = (*position).into();
+ let mut local_vel: $t = (*velocity).into();
+ local_vel += local_acc * dt;
+ local_pos += local_vel * dt;
+ *velocity = local_vel.into();
+ *position = local_pos.into();
+ }
+ })
+ });
+ }
+ };
+}
diff --git a/benches/support/mod.rs b/benches/support/mod.rs
new file mode 100644
index 0000000..3fe5dd7
--- /dev/null
+++ b/benches/support/mod.rs
@@ -0,0 +1,129 @@
+#![allow(dead_code)]
+use core::f32;
+use glam::{Mat2, Mat3, Mat3A, Mat4, Quat, Vec2, Vec3, Vec3A, Vec4};
+
+pub struct PCG32 {
+ state: u64,
+ inc: u64,
+}
+
+impl PCG32 {
+ pub fn seed(initstate: u64, initseq: u64) -> Self {
+ let mut rng = PCG32 {
+ state: 0,
+ inc: (initseq << 1) | 1,
+ };
+ rng.next_u32();
+ rng.state = rng.state.wrapping_add(initstate);
+ rng.next_u32();
+ rng
+ }
+
+ pub fn default() -> Self {
+ PCG32::seed(0x853c49e6748fea9b, 0xda3e39cb94b95bdb)
+ }
+
+ pub fn next_u32(&mut self) -> u32 {
+ let oldstate = self.state;
+ self.state = oldstate
+ .wrapping_mul(6364136223846793005)
+ .wrapping_add(self.inc | 1);
+ let xorshifted = ((oldstate >> 18) ^ oldstate) >> 27;
+ let rot = oldstate >> 59;
+ ((xorshifted >> rot) | (xorshifted << (rot.wrapping_neg() & 31))) as u32
+ }
+
+ pub fn next_f32(&mut self) -> f32 {
+ (self.next_u32() & 0xffffff) as f32 / 16777216.0
+ }
+}
+
+pub fn random_vec2(rng: &mut PCG32) -> Vec2 {
+ Vec2::new(rng.next_f32(), rng.next_f32())
+}
+
+pub fn random_vec3(rng: &mut PCG32) -> Vec3 {
+ Vec3::new(rng.next_f32(), rng.next_f32(), rng.next_f32())
+}
+
+pub fn random_vec3a(rng: &mut PCG32) -> Vec3A {
+ Vec3A::new(rng.next_f32(), rng.next_f32(), rng.next_f32())
+}
+
+pub fn random_vec4(rng: &mut PCG32) -> Vec4 {
+ Vec4::new(
+ rng.next_f32(),
+ rng.next_f32(),
+ rng.next_f32(),
+ rng.next_f32(),
+ )
+}
+
+pub fn random_nonzero_vec2(rng: &mut PCG32) -> Vec2 {
+ loop {
+ let v = random_vec2(rng);
+ if v.length_squared() > 0.01 {
+ return v;
+ }
+ }
+}
+
+pub fn random_nonzero_vec3(rng: &mut PCG32) -> Vec3 {
+ loop {
+ let v = random_vec3(rng);
+ if v.length_squared() > 0.01 {
+ return v;
+ }
+ }
+}
+
+pub fn random_f32(rng: &mut PCG32) -> f32 {
+ rng.next_f32()
+}
+
+pub fn random_radians(rng: &mut PCG32) -> f32 {
+ -f32::consts::PI + rng.next_f32() * 2.0 * f32::consts::PI
+}
+
+pub fn random_quat(rng: &mut PCG32) -> Quat {
+ let yaw = random_radians(rng);
+ let pitch = random_radians(rng);
+ let roll = random_radians(rng);
+ Quat::from_euler(glam::EulerRot::YXZ, yaw, pitch, roll)
+}
+
+pub fn random_mat2(rng: &mut PCG32) -> Mat2 {
+ Mat2::from_cols(random_vec2(rng), random_vec2(rng))
+}
+
+pub fn random_mat3(rng: &mut PCG32) -> Mat3 {
+ Mat3::from_cols(random_vec3(rng), random_vec3(rng), random_vec3(rng))
+}
+
+pub fn random_srt_mat3(rng: &mut PCG32) -> Mat3 {
+ Mat3::from_scale_angle_translation(
+ random_nonzero_vec2(rng),
+ random_radians(rng),
+ random_vec2(rng),
+ )
+}
+
+pub fn random_mat3a(rng: &mut PCG32) -> Mat3A {
+ Mat3A::from_cols(random_vec3a(rng), random_vec3a(rng), random_vec3a(rng))
+}
+
+pub fn random_srt_mat3a(rng: &mut PCG32) -> Mat3A {
+ Mat3A::from_scale_angle_translation(
+ random_nonzero_vec2(rng),
+ random_radians(rng),
+ random_vec2(rng),
+ )
+}
+
+pub fn random_srt_mat4(rng: &mut PCG32) -> Mat4 {
+ Mat4::from_scale_rotation_translation(
+ random_nonzero_vec3(rng),
+ random_quat(rng),
+ random_vec3(rng),
+ )
+}
diff --git a/benches/transform.rs b/benches/transform.rs
new file mode 100644
index 0000000..2510397
--- /dev/null
+++ b/benches/transform.rs
@@ -0,0 +1,132 @@
+#![allow(deprecated)]
+
+#[path = "support/macros.rs"]
+#[macro_use]
+mod macros;
+mod support;
+
+use criterion::{criterion_group, criterion_main, Criterion};
+use glam::{TransformRT, TransformSRT};
+use std::ops::Mul;
+use support::*;
+
+fn random_transformsrt(rng: &mut PCG32) -> TransformSRT {
+ TransformSRT::from_scale_rotation_translation(
+ random_nonzero_vec3(rng),
+ random_quat(rng),
+ random_vec3(rng),
+ )
+}
+
+fn random_transformrt(rng: &mut PCG32) -> TransformRT {
+ TransformRT::from_rotation_translation(random_quat(rng), random_vec3(rng))
+}
+
+bench_unop!(
+ transformrt_inverse,
+ "transform_rt inverse",
+ op => inverse,
+ from => random_transformrt
+);
+
+bench_unop!(
+ transformsrt_inverse,
+ "transform_srt inverse",
+ op => inverse,
+ from => random_transformsrt
+);
+
+bench_binop!(
+ transformrt_transform_point3,
+ "transform_rt transform point3",
+ op => transform_point3,
+ from1 => random_transformrt,
+ from2 => random_vec3
+);
+
+bench_binop!(
+ transformrt_transform_point3a,
+ "transform_rt transform point3a",
+ op => transform_point3a,
+ from1 => random_transformrt,
+ from2 => random_vec3a
+);
+
+bench_binop!(
+ transformrt_transform_vector3,
+ "transform_rt transform vector3",
+ op => transform_vector3,
+ from1 => random_transformrt,
+ from2 => random_vec3
+);
+
+bench_binop!(
+ transformrt_transform_vector3a,
+ "transform_rt transform vector3a",
+ op => transform_vector3a,
+ from1 => random_transformrt,
+ from2 => random_vec3a
+);
+
+bench_binop!(
+ transformsrt_transform_point3,
+ "transform_srt transform point3",
+ op => transform_point3,
+ from1 => random_transformsrt,
+ from2 => random_vec3
+);
+
+bench_binop!(
+ transformsrt_transform_point3a,
+ "transform_srt transform point3a",
+ op => transform_point3a,
+ from1 => random_transformsrt,
+ from2 => random_vec3a
+);
+
+bench_binop!(
+ transformsrt_transform_vector3,
+ "transform_srt transform vector3",
+ op => transform_vector3,
+ from1 => random_transformsrt,
+ from2 => random_vec3
+);
+
+bench_binop!(
+ transformsrt_transform_vector3a,
+ "transform_srt transform vector3a",
+ op => transform_vector3a,
+ from1 => random_transformsrt,
+ from2 => random_vec3a
+);
+bench_binop!(
+ transformsrt_mul_transformsrt,
+ "transform_srt mul transform_srt",
+ op => mul,
+ from => random_transformsrt
+);
+
+bench_binop!(
+ transformrt_mul_transformrt,
+ "transform_rt mul transform_rt",
+ op => mul,
+ from => random_transformrt
+);
+
+criterion_group!(
+ benches,
+ transformrt_inverse,
+ transformrt_mul_transformrt,
+ transformrt_transform_point3,
+ transformrt_transform_point3a,
+ transformrt_transform_vector3,
+ transformrt_transform_vector3a,
+ transformsrt_inverse,
+ transformsrt_mul_transformsrt,
+ transformsrt_transform_point3,
+ transformsrt_transform_point3a,
+ transformsrt_transform_vector3,
+ transformsrt_transform_vector3a,
+);
+
+criterion_main!(benches);
diff --git a/benches/vec2.rs b/benches/vec2.rs
new file mode 100644
index 0000000..a334e5d
--- /dev/null
+++ b/benches/vec2.rs
@@ -0,0 +1,51 @@
+#[path = "support/macros.rs"]
+#[macro_use]
+mod macros;
+mod support;
+
+use criterion::{criterion_group, criterion_main, Criterion};
+use glam::Vec2;
+use std::ops::Mul;
+use support::*;
+
+euler!(
+ vec2_euler,
+ "vec2 euler",
+ ty => Vec2,
+ storage => Vec2,
+ zero => Vec2::ZERO,
+ rand => random_vec2);
+
+bench_binop!(
+ vec2_mul_vec2,
+ "vec2 mul vec2",
+ op => mul,
+ from1 => random_vec2,
+ from2 => random_vec2
+);
+
+bench_binop!(
+ vec2_angle_between,
+ "vec2 angle_between",
+ op => angle_between,
+ from1 => random_vec2,
+ from2 => random_vec2
+);
+
+bench_select!(
+ vec2_select,
+ "vec2 select",
+ ty => Vec2,
+ op => cmple,
+ from => random_vec2
+);
+
+criterion_group!(
+ benches,
+ vec2_mul_vec2,
+ vec2_euler,
+ vec2_select,
+ vec2_angle_between
+);
+
+criterion_main!(benches);
diff --git a/benches/vec3.rs b/benches/vec3.rs
new file mode 100644
index 0000000..a8d8b6a
--- /dev/null
+++ b/benches/vec3.rs
@@ -0,0 +1,169 @@
+#[path = "support/macros.rs"]
+#[macro_use]
+mod macros;
+mod support;
+
+use criterion::{criterion_group, criterion_main, Criterion};
+use glam::Vec3;
+use std::ops::Mul;
+use support::*;
+
+bench_binop!(
+ vec3_mul_vec3,
+ "vec3 mul vec3",
+ op => mul,
+ from1 => random_vec3,
+ from2 => random_vec3
+);
+
+#[inline]
+fn vec3_to_rgb_op(v: Vec3) -> u32 {
+ let (red, green, blue) = (v.min(Vec3::ONE).max(Vec3::ZERO) * 255.0).into();
+ ((red as u32) << 16 | (green as u32) << 8 | (blue as u32)).into()
+}
+
+#[inline]
+fn vec3_fields(v: Vec3) -> [f32; 3] {
+ [v.x, v.y, v.z]
+}
+
+#[inline]
+fn vec3_into_array(v: Vec3) -> [f32; 3] {
+ v.into()
+}
+
+#[inline]
+fn vec3_into_tuple(v: Vec3) -> (f32, f32, f32) {
+ v.into()
+}
+
+bench_func!(
+vec3_to_rgb,
+"vec3 to rgb",
+op => vec3_to_rgb_op,
+from => random_vec3
+);
+
+bench_func!(
+vec3_to_array_fields,
+"vec3 into array fields",
+op => vec3_fields,
+from => random_vec3
+);
+
+bench_func!(
+vec3_to_array_into,
+"vec3 into array fast",
+op => vec3_into_array,
+from => random_vec3
+);
+
+bench_func!(
+vec3_to_tuple_into,
+"vec3 into tuple fast",
+op => vec3_into_tuple,
+from => random_vec3
+);
+
+// ---
+
+#[inline]
+fn vec3_normalize(v: Vec3) -> Vec3 {
+ v.normalize()
+}
+
+bench_func!(
+ vec3_normalize_bench,
+ "vec3 normalize",
+ op => vec3_normalize,
+ from => random_vec3
+);
+
+#[inline]
+fn vec3_normalize_or_zero(v: Vec3) -> Vec3 {
+ v.normalize_or_zero()
+}
+
+bench_func!(
+ vec3_normalize_or_zero_bench,
+ "vec3 normalize_or_zero",
+ op => vec3_normalize_or_zero,
+ from => random_vec3
+);
+
+// ---
+
+#[inline(always)]
+fn vec3_any_orthogonal_vector(v: Vec3) -> Vec3 {
+ v.any_orthogonal_vector()
+}
+
+bench_func!(
+ vec3_any_orthogonal_vector_bench,
+ "vec3 any_orthogonal_vector",
+ op => vec3_any_orthogonal_vector,
+ from => random_vec3
+);
+
+#[inline(always)]
+fn vec3_any_orthonormal_vector(v: Vec3) -> Vec3 {
+ v.any_orthonormal_vector()
+}
+
+bench_func!(
+ vec3_any_orthonormal_vector_bench,
+ "vec3 any_orthonormal_vector",
+ op => vec3_any_orthonormal_vector,
+ from => random_vec3
+);
+
+#[inline(always)]
+fn vec3_any_orthonormal_pair(v: Vec3) -> (Vec3, Vec3) {
+ v.any_orthonormal_pair()
+}
+
+bench_func!(
+ vec3_any_orthonormal_pair_bench,
+ "vec3 any_orthonormal_pair",
+ op => vec3_any_orthonormal_pair,
+ from => random_vec3
+);
+
+// ---
+
+euler!(vec3_euler, "vec3 euler", ty => Vec3, storage => Vec3, zero => Vec3::ZERO, rand => random_vec3);
+
+bench_binop!(
+ vec3_angle_between,
+ "vec3 angle_between",
+ op => angle_between,
+ from1 => random_vec3,
+ from2 => random_vec3
+);
+
+bench_select!(
+ vec3_select,
+ "vec3 select",
+ ty => Vec3,
+ op => cmple,
+ from => random_vec3
+);
+
+criterion_group!(
+ benches,
+ vec3_mul_vec3,
+ vec3_angle_between,
+ vec3_normalize_bench,
+ vec3_normalize_or_zero_bench,
+ vec3_any_orthogonal_vector_bench,
+ vec3_any_orthonormal_vector_bench,
+ vec3_any_orthonormal_pair_bench,
+ vec3_euler,
+ vec3_select,
+ vec3_to_array_fields,
+ vec3_to_array_into,
+ vec3_to_rgb,
+ vec3_to_tuple_into,
+);
+
+criterion_main!(benches);
diff --git a/benches/vec3a.rs b/benches/vec3a.rs
new file mode 100644
index 0000000..c0c5e25
--- /dev/null
+++ b/benches/vec3a.rs
@@ -0,0 +1,111 @@
+#[path = "support/macros.rs"]
+#[macro_use]
+mod macros;
+mod support;
+
+use criterion::{criterion_group, criterion_main, Criterion};
+use glam::{Vec3, Vec3A};
+use std::ops::Mul;
+use support::*;
+
+bench_binop!(
+ vec3a_mul_vec3a,
+ "vec3a mul vec3a",
+ op => mul,
+ from1 => random_vec3a,
+ from2 => random_vec3a
+);
+
+#[inline]
+fn vec3a_to_rgb_op(v: Vec3A) -> u32 {
+ let (red, green, blue) = (v.min(Vec3A::ONE).max(Vec3A::ZERO) * 255.0).into();
+ (red as u32) << 16 | (green as u32) << 8 | (blue as u32)
+}
+
+#[inline]
+fn vec3a_deref(v: Vec3A) -> [f32; 3] {
+ [v.x, v.y, v.z]
+}
+
+#[inline]
+fn vec3a_into_array(v: Vec3A) -> [f32; 3] {
+ v.into()
+}
+
+#[inline]
+fn vec3a_into_tuple(v: Vec3A) -> (f32, f32, f32) {
+ v.into()
+}
+
+#[inline]
+fn vec3a_into_vec3(v: Vec3A) -> Vec3 {
+ v.into()
+}
+
+bench_func!(
+vec3a_to_vec3,
+"vec3a into vec3",
+op => vec3a_into_vec3,
+from => random_vec3a
+);
+
+bench_func!(
+vec3a_to_rgb,
+"vec3a to rgb",
+op => vec3a_to_rgb_op,
+from => random_vec3a
+);
+
+bench_func!(
+vec3a_to_array_deref,
+"vec3a into array deref",
+op => vec3a_deref,
+from => random_vec3a
+);
+
+bench_func!(
+vec3a_to_array_into,
+"vec3a into array fast",
+op => vec3a_into_array,
+from => random_vec3a
+);
+
+bench_func!(
+vec3a_to_tuple_into,
+"vec3a into tuple fast",
+op => vec3a_into_tuple,
+from => random_vec3a
+);
+
+euler!(vec3a_euler, "vec3a euler", ty => Vec3A, storage => Vec3A, zero => Vec3A::ZERO, rand => random_vec3a);
+
+bench_binop!(
+ vec3a_angle_between,
+ "vec3a angle_between",
+ op => angle_between,
+ from1 => random_vec3a,
+ from2 => random_vec3a
+);
+
+bench_select!(
+ vec3a_select,
+ "vec3a select",
+ ty => Vec3A,
+ op => cmple,
+ from => random_vec3a
+);
+
+criterion_group!(
+ benches,
+ vec3a_mul_vec3a,
+ vec3a_angle_between,
+ vec3a_euler,
+ vec3a_select,
+ vec3a_to_array_deref,
+ vec3a_to_array_into,
+ vec3a_to_rgb,
+ vec3a_to_tuple_into,
+ vec3a_to_vec3,
+);
+
+criterion_main!(benches);
diff --git a/benches/vec4.rs b/benches/vec4.rs
new file mode 100644
index 0000000..ec926d8
--- /dev/null
+++ b/benches/vec4.rs
@@ -0,0 +1,29 @@
+#[path = "support/macros.rs"]
+#[macro_use]
+mod macros;
+mod support;
+
+use criterion::{criterion_group, criterion_main, Criterion};
+use glam::Vec4;
+use std::ops::Mul;
+use support::random_vec4;
+
+bench_binop!(
+ vec4_mul_vec4,
+ "vec4 mul vec4",
+ op => mul,
+ from1 => random_vec4,
+ from2 => random_vec4
+);
+
+bench_select!(
+ vec4_select,
+ "vec4 select",
+ ty => Vec4,
+ op => cmple,
+ from => random_vec4
+);
+
+criterion_group!(benches, vec4_mul_vec4, vec4_select);
+
+criterion_main!(benches);
diff --git a/build_all_msrv.sh b/build_all_msrv.sh
new file mode 100755
index 0000000..eea2672
--- /dev/null
+++ b/build_all_msrv.sh
@@ -0,0 +1,9 @@
+#!/bin/sh
+
+set -e
+
+CARGO='rustup run 1.52.1 cargo'
+$CARGO test --features "bytemuck mint rand serde debug-glam-assert transform-types" && \
+$CARGO test --features "scalar-math bytemuck mint rand serde debug-glam-assert transform-types" && \
+$CARGO test --no-default-features --features "libm scalar-math bytemuck mint rand serde debug-glam-assert transform-types" && \
+$CARGO bench --no-run
diff --git a/build_and_test_features.sh b/build_and_test_features.sh
new file mode 100755
index 0000000..8ceb4b4
--- /dev/null
+++ b/build_and_test_features.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+set -e
+
+# Set of features to build & test.
+FEATURE_SETS=(
+ # std
+ "std"
+ "std approx bytemuck mint rand serde debug-glam-assert transform-types"
+ "std scalar-math approx bytemuck mint rand serde debug-glam-assert transform-types"
+ "std cuda"
+ "std scalar-math cuda"
+ # no_std
+ "libm"
+ "libm scalar-math approx bytemuck mint rand serde debug-glam-assert transform-types"
+)
+
+rustc --version
+
+for features in "${FEATURE_SETS[@]}"
+do
+ :
+ cargo build --tests --no-default-features --features="$features"
+ echo cargo test --no-default-features --features=\"$features\"
+ cargo test --no-default-features --features="$features"
+done
+
+pushd test_no_std && cargo check
diff --git a/build_and_test_wasm32_chrome.sh b/build_and_test_wasm32_chrome.sh
new file mode 100755
index 0000000..810ee55
--- /dev/null
+++ b/build_and_test_wasm32_chrome.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+set -e
+
+RUSTFLAGS="-Ctarget-feature=+simd128" wasm-pack test --headless --chrome
+wasm-pack test --headless --chrome
diff --git a/build_and_test_wasm32_firefox.sh b/build_and_test_wasm32_firefox.sh
new file mode 100755
index 0000000..9194ea6
--- /dev/null
+++ b/build_and_test_wasm32_firefox.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+set -e
+
+RUSTFLAGS="-Ctarget-feature=+simd128" wasm-pack test --headless --firefox
+wasm-pack test --headless --firefox
diff --git a/clippy.toml b/clippy.toml
new file mode 100644
index 0000000..829dd1c
--- /dev/null
+++ b/clippy.toml
@@ -0,0 +1 @@
+msrv = "1.51"
diff --git a/deny.toml b/deny.toml
new file mode 100644
index 0000000..a9cd9cb
--- /dev/null
+++ b/deny.toml
@@ -0,0 +1,18 @@
+[bans]
+multiple-versions = "deny"
+deny = []
+skip-tree = [
+ # ignore criterion dev-dependency that often have duplicate dependencies internally
+ { name = "criterion" },
+]
+
+[licenses]
+unlicensed = "deny"
+allow = [
+ "Apache-2.0",
+ "BSD-2-Clause",
+ "BSD-3-Clause",
+ "ISC",
+ "MIT",
+ "MPL-2.0",
+]
diff --git a/src/affine2.rs b/src/affine2.rs
new file mode 100644
index 0000000..f3b9e3b
--- /dev/null
+++ b/src/affine2.rs
@@ -0,0 +1,519 @@
+use crate::core::storage::Columns3;
+use crate::{DMat2, DMat3, DVec2, Mat2, Mat3, Mat3A, Vec2, Vec3A};
+use core::ops::{Add, Deref, DerefMut, Mul, Sub};
+
+#[cfg(not(feature = "std"))]
+use num_traits::Float;
+
+macro_rules! define_affine2_struct {
+ ($affine2:ident, $matrix:ident, $column:ident) => {
+ /// A 2D affine transform, which can represent translation, rotation, scaling and shear.
+ #[derive(Copy, Clone)]
+ #[repr(C)]
+ pub struct $affine2 {
+ pub matrix2: $matrix,
+ pub translation: $column,
+ }
+ };
+}
+
+macro_rules! impl_affine2_methods {
+ ($t:ty, $mat2:ident, $mat3:ident, $vec2:ident, $affine2:ident, $matrix:ident, $column:ident) => {
+ impl $affine2 {
+ /// The degenerate zero transform.
+ ///
+ /// This transforms any finite vector and point to zero.
+ /// The zero transform is non-invertible.
+ pub const ZERO: Self = Self {
+ matrix2: $matrix::ZERO,
+ translation: $column::ZERO,
+ };
+
+ /// The identity transform.
+ ///
+ /// Multiplying a vector with this returns the same vector.
+ pub const IDENTITY: Self = Self {
+ matrix2: $matrix::IDENTITY,
+ translation: $column::ZERO,
+ };
+
+ /// All NAN:s.
+ pub const NAN: Self = Self {
+ matrix2: $matrix::NAN,
+ translation: $column::NAN,
+ };
+
+ /// Creates an affine transform from three column vectors.
+ #[inline(always)]
+ pub fn from_cols(x_axis: $column, y_axis: $column, z_axis: $column) -> Self {
+ Self {
+ matrix2: $matrix::from_cols(x_axis, y_axis),
+ translation: z_axis,
+ }
+ }
+
+ /// Creates an affine transform from a `[S; 6]` array stored in column major order.
+ /// If your data is stored in row major you will need to `transpose` the returned
+ /// matrix.
+ #[inline(always)]
+ pub fn from_cols_array(m: &[$t; 6]) -> Self {
+ Self {
+ matrix2: $matrix::from_cols_slice(&m[0..4]),
+ translation: $column::from_slice(&m[4..6]),
+ }
+ }
+
+ /// Creates a `[S; 6]` array storing data in column major order.
+ /// If you require data in row major order `transpose` the matrix first.
+ #[inline(always)]
+ pub fn to_cols_array(&self) -> [$t; 6] {
+ let x = &self.matrix2.x_axis;
+ let y = &self.matrix2.y_axis;
+ let z = &self.translation;
+ [x.x, x.y, y.x, y.y, z.x, z.y]
+ }
+
+ /// Creates an affine transform from a `[[S; 2]; 3]` 2D array stored in column major order.
+ /// If your data is in row major order you will need to `transpose` the returned
+ /// matrix.
+ #[inline(always)]
+ pub fn from_cols_array_2d(m: &[[$t; 2]; 3]) -> Self {
+ Self {
+ matrix2: $matrix::from_cols(m[0].into(), m[1].into()),
+ translation: m[2].into(),
+ }
+ }
+
+ /// Creates a `[[S; 2]; 3]` 2D array storing data in column major order.
+ /// If you require data in row major order `transpose` the matrix first.
+ #[inline(always)]
+ pub fn to_cols_array_2d(&self) -> [[$t; 2]; 3] {
+ [
+ self.matrix2.x_axis.into(),
+ self.matrix2.y_axis.into(),
+ self.translation.into(),
+ ]
+ }
+
+ /// Creates an affine transform from the first 6 values in `slice`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `slice` is less than 6 elements long.
+ #[inline(always)]
+ pub fn from_cols_slice(slice: &[$t]) -> Self {
+ Self {
+ matrix2: $matrix::from_cols_slice(&slice[0..4]),
+ translation: $column::from_slice(&slice[4..6]),
+ }
+ }
+
+ /// Writes the columns of `self` to the first 12 elements in `slice`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `slice` is less than 12 elements long.
+ #[inline(always)]
+ pub fn write_cols_to_slice(self, slice: &mut [$t]) {
+ self.matrix2.write_cols_to_slice(&mut slice[0..4]);
+ self.translation.write_to_slice(&mut slice[4..6]);
+ }
+
+ /// Creates an affine transform that changes scale.
+ /// Note that if any scale is zero the transform will be non-invertible.
+ #[inline(always)]
+ pub fn from_scale(scale: $vec2) -> Self {
+ Self {
+ matrix2: $matrix::from_diagonal(scale),
+ translation: $column::ZERO,
+ }
+ }
+
+ /// Creates an affine transform from the given rotation `angle`.
+ #[inline(always)]
+ pub fn from_angle(angle: $t) -> Self {
+ Self {
+ matrix2: $matrix::from_angle(angle),
+ translation: $column::ZERO,
+ }
+ }
+
+ /// Creates an affine transformation from the given 2D `translation`.
+ #[inline(always)]
+ pub fn from_translation(translation: $vec2) -> Self {
+ Self {
+ matrix2: $matrix::IDENTITY,
+ translation,
+ }
+ }
+
+ /// Creates an affine transform from a 2x2 matrix (expressing scale, shear and
+ /// rotation)
+ #[inline(always)]
+ pub fn from_mat2(matrix2: $mat2) -> Self {
+ Self {
+ matrix2,
+ translation: $column::ZERO,
+ }
+ }
+
+ /// Creates an affine transform from a 2x2 matrix (expressing scale, shear and rotation)
+ /// and a translation vector.
+ ///
+ /// Equivalent to `Affine2::from_translation(translation) * Affine2::from_mat2(mat2)`
+ #[inline(always)]
+ pub fn from_mat2_translation(matrix2: $mat2, translation: $vec2) -> Self {
+ Self {
+ matrix2,
+ translation,
+ }
+ }
+
+ /// Creates an affine transform from the given 2D `scale`, rotation `angle` (in
+ /// radians) and `translation`.
+ ///
+ /// Equivalent to `Affine2::from_translation(translation) *
+ /// Affine2::from_angle(angle) * Affine2::from_scale(scale)`
+ #[inline]
+ pub fn from_scale_angle_translation(
+ scale: $vec2,
+ angle: $t,
+ translation: $vec2,
+ ) -> Self {
+ let rotation = $matrix::from_angle(angle);
+ Self {
+ matrix2: $matrix::from_cols(
+ rotation.x_axis * scale.x,
+ rotation.y_axis * scale.y,
+ ),
+ translation,
+ }
+ }
+
+ /// Creates an affine transform from the given 2D rotation `angle` (in radians) and
+ /// `translation`.
+ ///
+ /// Equivalent to `Affine2::from_translation(translation) * Affine2::from_angle(angle)`
+ #[inline(always)]
+ pub fn from_angle_translation(angle: $t, translation: $vec2) -> Self {
+ Self {
+ matrix2: $matrix::from_angle(angle),
+ translation,
+ }
+ }
+
+ /// The given `Mat3` must be an affine transform,
+ #[inline]
+ pub fn from_mat3(m: $mat3) -> Self {
+ Self {
+ matrix2: $matrix::from_cols($vec2(m.x_axis.0.into()), $vec2(m.y_axis.0.into())),
+ translation: $vec2(m.z_axis.0.into()),
+ }
+ }
+
+ /// Transforms the given 2D point, applying shear, scale, rotation and translation.
+ #[inline(always)]
+ pub fn transform_point2(&self, other: $vec2) -> $vec2 {
+ self.matrix2 * other + self.translation
+ }
+
+ /// Transforms the given 2D vector, applying shear, scale and rotation (but NOT
+ /// translation).
+ ///
+ /// To also apply translation, use [`Self::transform_point2`] instead.
+ #[inline(always)]
+ pub fn transform_vector2(&self, other: $vec2) -> $vec2 {
+ self.matrix2 * other
+ }
+
+ /// Returns `true` if, and only if, all elements are finite.
+ ///
+ /// If any element is either `NaN`, positive or negative infinity, this will return
+ /// `false`.
+ #[inline]
+ pub fn is_finite(&self) -> bool {
+ self.matrix2.is_finite() && self.translation.is_finite()
+ }
+
+ /// Returns `true` if any elements are `NaN`.
+ #[inline]
+ pub fn is_nan(&self) -> bool {
+ self.matrix2.is_nan() || self.translation.is_nan()
+ }
+
+ /// Returns true if the absolute difference of all elements between `self` and `other`
+ /// is less than or equal to `max_abs_diff`.
+ ///
+ /// This can be used to compare if two 3x4 matrices contain similar elements. It works
+ /// best when comparing with a known value. The `max_abs_diff` that should be used used
+ /// depends on the values being compared against.
+ ///
+ /// For more see
+ /// [comparing floating point numbers](https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/).
+ #[inline]
+ pub fn abs_diff_eq(&self, other: Self, max_abs_diff: $t) -> bool {
+ self.matrix2.abs_diff_eq(&other.matrix2, max_abs_diff)
+ && self
+ .translation
+ .abs_diff_eq(other.translation, max_abs_diff)
+ }
+
+ /// Return the inverse of this transform.
+ ///
+ /// Note that if the transform is not invertible the result will be invalid.
+ #[must_use]
+ #[inline]
+ pub fn inverse(&self) -> Self {
+ let matrix2 = self.matrix2.inverse();
+ // transform negative translation by the 2x2 inverse:
+ let translation = -(matrix2 * self.translation);
+
+ Self {
+ matrix2,
+ translation,
+ }
+ }
+ }
+ };
+}
+
+macro_rules! impl_affine2_traits {
+ ($t:ty, $mat2:ident, $mat3:ident, $vec2:ident, $affine2:ident, $matrix:ident, $column:ident, $deref:ident) => {
+ impl Default for $affine2 {
+ #[inline(always)]
+ fn default() -> Self {
+ Self::IDENTITY
+ }
+ }
+
+ impl Deref for $affine2 {
+ type Target = $deref;
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*(self as *const Self as *const Self::Target) }
+ }
+ }
+
+ impl DerefMut for $affine2 {
+ #[inline(always)]
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ unsafe { &mut *(self as *mut Self as *mut Self::Target) }
+ }
+ }
+
+ impl PartialEq for $affine2 {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ self.matrix2.eq(&other.matrix2) && self.translation.eq(&other.translation)
+ }
+ }
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl core::fmt::Debug for $affine2 {
+ fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ fmt.debug_struct(stringify!($affine2))
+ .field("matrix2", &self.matrix2)
+ .field("translation", &self.translation)
+ .finish()
+ }
+ }
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl core::fmt::Display for $affine2 {
+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ write!(f, "[{}, {}, {}]", self.x_axis, self.y_axis, self.z_axis)
+ }
+ }
+
+ impl From<$affine2> for $mat3 {
+ #[inline]
+ fn from(m: $affine2) -> $mat3 {
+ Self::from_cols(
+ m.matrix2.x_axis.extend(0.0),
+ m.matrix2.y_axis.extend(0.0),
+ m.translation.extend(1.0),
+ )
+ }
+ }
+
+ impl Mul for $affine2 {
+ type Output = $affine2;
+
+ #[inline(always)]
+ fn mul(self, other: $affine2) -> Self::Output {
+ Self {
+ matrix2: self.matrix2 * other.matrix2,
+ translation: self.matrix2 * other.translation + self.translation,
+ }
+ }
+ }
+
+ impl Mul<$affine2> for $t {
+ type Output = $affine2;
+ #[inline(always)]
+ fn mul(self, other: $affine2) -> Self::Output {
+ $affine2 {
+ matrix2: self * other.matrix2,
+ translation: self * other.translation,
+ }
+ }
+ }
+
+ impl Mul<$t> for $affine2 {
+ type Output = Self;
+ #[inline(always)]
+ fn mul(self, other: $t) -> Self::Output {
+ Self {
+ matrix2: self.matrix2 * other,
+ translation: self.translation * other,
+ }
+ }
+ }
+
+ impl Add<$affine2> for $affine2 {
+ type Output = Self;
+ #[inline(always)]
+ fn add(self, other: Self) -> Self::Output {
+ Self {
+ matrix2: self.matrix2 + other.matrix2,
+ translation: self.translation + other.translation,
+ }
+ }
+ }
+
+ impl Sub<$affine2> for $affine2 {
+ type Output = Self;
+ #[inline(always)]
+ fn sub(self, other: Self) -> Self::Output {
+ Self {
+ matrix2: self.matrix2 - other.matrix2,
+ translation: self.translation - other.translation,
+ }
+ }
+ }
+
+ impl Mul<$mat3> for $affine2 {
+ type Output = $mat3;
+
+ #[inline(always)]
+ fn mul(self, other: $mat3) -> Self::Output {
+ $mat3::from(self) * other
+ }
+ }
+
+ impl Mul<$affine2> for $mat3 {
+ type Output = $mat3;
+
+ #[inline(always)]
+ fn mul(self, other: $affine2) -> Self::Output {
+ self * $mat3::from(other)
+ }
+ }
+
+ impl<'a> core::iter::Product<&'a Self> for $affine2 {
+ fn product<I>(iter: I) -> Self
+ where
+ I: Iterator<Item = &'a Self>,
+ {
+ iter.fold(Self::IDENTITY, |a, &b| a * b)
+ }
+ }
+ };
+}
+
+type TransformF32 = Mat2;
+type TranslateF32 = Vec2;
+type DerefTargetF32 = Columns3<crate::Vec2>;
+
+define_affine2_struct!(Affine2, TransformF32, TranslateF32);
+impl_affine2_methods!(f32, Mat2, Mat3, Vec2, Affine2, TransformF32, TranslateF32);
+impl_affine2_traits!(
+ f32,
+ Mat2,
+ Mat3,
+ Vec2,
+ Affine2,
+ TransformF32,
+ TranslateF32,
+ DerefTargetF32
+);
+
+impl From<Affine2> for Mat3A {
+ #[inline]
+ fn from(m: Affine2) -> Mat3A {
+ Self::from_cols(
+ Vec3A::from((m.matrix2.x_axis, 0.0)),
+ Vec3A::from((m.matrix2.y_axis, 0.0)),
+ Vec3A::from((m.translation, 1.0)),
+ )
+ }
+}
+
+impl Mul<Mat3A> for Affine2 {
+ type Output = Mat3A;
+
+ #[inline(always)]
+ fn mul(self, other: Mat3A) -> Self::Output {
+ Mat3A::from(self) * other
+ }
+}
+
+impl Mul<Affine2> for Mat3A {
+ type Output = Mat3A;
+
+ #[inline(always)]
+ fn mul(self, other: Affine2) -> Self::Output {
+ self * Mat3A::from(other)
+ }
+}
+
+type TransformF64 = DMat2;
+type TranslateF64 = DVec2;
+type DerefTargetF64 = Columns3<DVec2>;
+
+define_affine2_struct!(DAffine2, TransformF64, TranslateF64);
+impl_affine2_methods!(
+ f64,
+ DMat2,
+ DMat3,
+ DVec2,
+ DAffine2,
+ TransformF64,
+ TranslateF64
+);
+impl_affine2_traits!(
+ f64,
+ DMat2,
+ DMat3,
+ DVec2,
+ DAffine2,
+ TransformF64,
+ TranslateF64,
+ DerefTargetF64
+);
+
+#[cfg(all(
+ not(feature = "cuda"),
+ any(feature = "scalar-math", target_arch = "spirv")
+))]
+mod const_test_affine2 {
+ const_assert_eq!(
+ core::mem::align_of::<super::Vec2>(),
+ core::mem::align_of::<super::Affine2>()
+ );
+ const_assert_eq!(24, core::mem::size_of::<super::Affine2>());
+}
+
+#[cfg(not(any(feature = "scalar-math", target_arch = "spirv")))]
+mod const_test_affine2 {
+ const_assert_eq!(16, core::mem::align_of::<super::Affine2>());
+ const_assert_eq!(32, core::mem::size_of::<super::Affine2>());
+}
+
+mod const_test_daffine2 {
+ const_assert_eq!(
+ core::mem::align_of::<super::DVec2>(),
+ core::mem::align_of::<super::DAffine2>()
+ );
+ const_assert_eq!(48, core::mem::size_of::<super::DAffine2>());
+}
diff --git a/src/affine3.rs b/src/affine3.rs
new file mode 100644
index 0000000..afde949
--- /dev/null
+++ b/src/affine3.rs
@@ -0,0 +1,631 @@
+use crate::core::storage::Columns4;
+use crate::{DMat3, DMat4, DQuat, DVec3, Mat3, Mat3A, Mat4, Quat, Vec3, Vec3A};
+use core::ops::{Add, Deref, DerefMut, Mul, Sub};
+
+#[cfg(not(feature = "std"))]
+use num_traits::Float;
+
+macro_rules! define_affine3_struct {
+ ($affine3:ident, $matrix:ident, $column:ident) => {
+ /// A 3D affine transform, which can represent translation, rotation, scaling and shear.
+ ///
+ /// The type is composed of a 3x3 matrix containing a linear transformation (e.g. scale,
+ /// rotation, shear, reflection) and a 3D vector translation.
+ #[derive(Copy, Clone)]
+ #[repr(C)]
+ pub struct $affine3 {
+ pub matrix3: $matrix,
+ pub translation: $column,
+ }
+ };
+}
+
+macro_rules! impl_affine3_methods {
+ ($t:ty, $mat3:ident, $mat4:ident, $quat:ident, $vec3:ident, $affine3:ident, $matrix:ident, $column:ident) => {
+ impl $affine3 {
+ /// The degenerate zero transform.
+ ///
+ /// This transforms any finite vector and point to zero.
+ /// The zero transform is non-invertible.
+ pub const ZERO: Self = Self {
+ matrix3: $matrix::ZERO,
+ translation: $column::ZERO,
+ };
+
+ /// The identity transform.
+ ///
+ /// Multiplying a vector with this returns the same vector.
+ pub const IDENTITY: Self = Self {
+ matrix3: $matrix::IDENTITY,
+ translation: $column::ZERO,
+ };
+
+ /// All NAN.
+ pub const NAN: Self = Self {
+ matrix3: $matrix::NAN,
+ translation: $column::NAN,
+ };
+
+ /// Creates an affine transform from four column vectors.
+ #[inline(always)]
+ pub fn from_cols(
+ x_axis: $column,
+ y_axis: $column,
+ z_axis: $column,
+ w_axis: $column,
+ ) -> Self {
+ Self {
+ matrix3: $matrix::from_cols(x_axis, y_axis, z_axis),
+ translation: w_axis,
+ }
+ }
+
+ /// Creates an affine transform from a `[S; 12]` array stored in column major order.
+ /// If your data is stored in row major you will need to `transpose` the returned
+ /// matrix.
+ #[inline(always)]
+ pub fn from_cols_array(m: &[$t; 12]) -> Self {
+ Self {
+ matrix3: $matrix::from_cols_slice(&m[0..9]),
+ translation: $column::from_slice(&m[9..12]),
+ }
+ }
+
+ /// Creates a `[S; 12]` array storing data in column major order.
+ /// If you require data in row major order `transpose` the matrix first.
+ #[inline(always)]
+ pub fn to_cols_array(&self) -> [$t; 12] {
+ let x = &self.matrix3.x_axis;
+ let y = &self.matrix3.y_axis;
+ let z = &self.matrix3.z_axis;
+ let w = &self.translation;
+ [x.x, x.y, x.z, y.x, y.y, y.z, z.x, z.y, z.z, w.x, w.y, w.z]
+ }
+
+ /// Creates an affine transform from a `[[S; 3]; 4]` 2D array stored in column major order.
+ /// If your data is in row major order you will need to `transpose` the returned
+ /// matrix.
+ #[inline(always)]
+ pub fn from_cols_array_2d(m: &[[$t; 3]; 4]) -> Self {
+ Self {
+ matrix3: $matrix::from_cols(m[0].into(), m[1].into(), m[2].into()),
+ translation: m[3].into(),
+ }
+ }
+
+ /// Creates a `[[S; 3]; 4]` 2D array storing data in column major order.
+ /// If you require data in row major order `transpose` the matrix first.
+ #[inline(always)]
+ pub fn to_cols_array_2d(&self) -> [[$t; 3]; 4] {
+ [
+ self.matrix3.x_axis.into(),
+ self.matrix3.y_axis.into(),
+ self.matrix3.z_axis.into(),
+ self.translation.into(),
+ ]
+ }
+
+ /// Creates an affine transform from the first 12 values in `slice`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `slice` is less than 12 elements long.
+ #[inline(always)]
+ pub fn from_cols_slice(slice: &[$t]) -> Self {
+ Self {
+ matrix3: $matrix::from_cols_slice(&slice[0..9]),
+ translation: $column::from_slice(&slice[9..12]),
+ }
+ }
+
+ /// Writes the columns of `self` to the first 12 elements in `slice`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `slice` is less than 12 elements long.
+ #[inline(always)]
+ pub fn write_cols_to_slice(self, slice: &mut [$t]) {
+ self.matrix3.write_cols_to_slice(&mut slice[0..9]);
+ self.translation.write_to_slice(&mut slice[9..12]);
+ }
+
+ /// Creates an affine transform that changes scale.
+ /// Note that if any scale is zero the transform will be non-invertible.
+ #[inline(always)]
+ pub fn from_scale(scale: $vec3) -> Self {
+ Self {
+ matrix3: $matrix::from_diagonal(scale),
+ translation: $column::ZERO,
+ }
+ }
+
+ /// Creates an affine transform from the given `rotation` quaternion.
+ #[inline(always)]
+ pub fn from_quat(rotation: $quat) -> Self {
+ Self {
+ matrix3: $matrix::from_quat(rotation),
+ translation: $column::ZERO,
+ }
+ }
+
+ /// Creates an affine transform containing a 3D rotation around a normalized
+ /// rotation `axis` of `angle` (in radians).
+ #[inline(always)]
+ pub fn from_axis_angle(axis: $vec3, angle: $t) -> Self {
+ Self {
+ matrix3: $matrix::from_axis_angle(axis, angle),
+ translation: $column::ZERO,
+ }
+ }
+
+ /// Creates an affine transform containing a 3D rotation around the x axis of
+ /// `angle` (in radians).
+ #[inline(always)]
+ pub fn from_rotation_x(angle: $t) -> Self {
+ Self {
+ matrix3: $matrix::from_rotation_x(angle),
+ translation: $column::ZERO,
+ }
+ }
+
+ /// Creates an affine transform containing a 3D rotation around the y axis of
+ /// `angle` (in radians).
+ #[inline]
+ pub fn from_rotation_y(angle: $t) -> Self {
+ Self {
+ matrix3: $matrix::from_rotation_y(angle),
+ translation: $column::ZERO,
+ }
+ }
+
+ /// Creates an affine transform containing a 3D rotation around the z axis of
+ /// `angle` (in radians).
+ #[inline]
+ pub fn from_rotation_z(angle: $t) -> Self {
+ Self {
+ matrix3: $matrix::from_rotation_z(angle),
+ translation: $column::ZERO,
+ }
+ }
+
+ /// Creates an affine transformation from the given 3D `translation`.
+ #[inline(always)]
+ pub fn from_translation(translation: $vec3) -> Self {
+ Self {
+ matrix3: $matrix::IDENTITY,
+ translation: translation.into(),
+ }
+ }
+
+ /// Creates an affine transform from a 3x3 matrix (expressing scale, shear and
+ /// rotation)
+ #[inline(always)]
+ pub fn from_mat3(mat3: $mat3) -> Self {
+ Self {
+ matrix3: mat3.into(),
+ translation: $column::ZERO,
+ }
+ }
+
+ /// Creates an affine transform from a 3x3 matrix (expressing scale, shear and rotation)
+ /// and a translation vector.
+ ///
+ /// Equivalent to `Affine3::from_translation(translation) * Affine3::from_mat3(mat3)`
+ #[inline(always)]
+ pub fn from_mat3_translation(mat3: $mat3, translation: $vec3) -> Self {
+ Self {
+ matrix3: mat3.into(),
+ translation: translation.into(),
+ }
+ }
+
+ /// Creates an affine transform from the given 3D `scale`, `rotation` and
+ /// `translation`.
+ ///
+ /// Equivalent to `Affine3::from_translation(translation) *
+ /// Affine3::from_quat(rotation) * Affine3::from_scale(scale)`
+ #[inline(always)]
+ pub fn from_scale_rotation_translation(
+ scale: $vec3,
+ rotation: $quat,
+ translation: $vec3,
+ ) -> Self {
+ let rotation = $matrix::from_quat(rotation);
+ Self {
+ matrix3: $matrix::from_cols(
+ rotation.x_axis * scale.x,
+ rotation.y_axis * scale.y,
+ rotation.z_axis * scale.z,
+ ),
+ translation: translation.into(),
+ }
+ }
+
+ /// Creates an affine transform from the given 3D `rotation` and `translation`.
+ ///
+ /// Equivalent to `Affine3::from_translation(translation) * Affine3::from_quat(rotation)`
+ #[inline(always)]
+ pub fn from_rotation_translation(rotation: $quat, translation: $vec3) -> Self {
+ Self {
+ matrix3: $matrix::from_quat(rotation.into()),
+ translation: translation.into(),
+ }
+ }
+
+ /// The given `Mat4` must be an affine transform,
+ /// i.e. contain no perspective transform.
+ #[inline]
+ pub fn from_mat4(m: $mat4) -> Self {
+ Self {
+ matrix3: $matrix::from_cols(
+ $column(m.x_axis.0.into()),
+ $column(m.y_axis.0.into()),
+ $column(m.z_axis.0.into()),
+ ),
+ translation: $column(m.w_axis.0.into()),
+ }
+ }
+
+ /// Extracts `scale`, `rotation` and `translation` from `self`.
+ ///
+ /// The transform is expected to be non-degenerate and without shearing, or the output
+ /// will be invalid.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if the determinant `self.matrix3` is zero or if the resulting scale
+ /// vector contains any zero elements when `glam_assert` is enabled.
+ #[inline(always)]
+ pub fn to_scale_rotation_translation(&self) -> ($vec3, $quat, $vec3) {
+ // TODO: migrate to core module
+ let det = self.matrix3.determinant();
+ glam_assert!(det != 0.0);
+
+ let scale = $vec3::new(
+ self.matrix3.x_axis.length() * det.signum(),
+ self.matrix3.y_axis.length(),
+ self.matrix3.z_axis.length(),
+ );
+
+ glam_assert!(scale.cmpne($vec3::ZERO).all());
+
+ let inv_scale = scale.recip();
+
+ let rotation = $quat::from_mat3(&$mat3::from_cols(
+ (self.matrix3.x_axis * inv_scale.x).into(),
+ (self.matrix3.y_axis * inv_scale.y).into(),
+ (self.matrix3.z_axis * inv_scale.z).into(),
+ ));
+
+ (scale, rotation, self.translation.into())
+ }
+
+ #[inline]
+ fn look_to_lh(eye: $vec3, dir: $vec3, up: $vec3) -> Self {
+ let f = dir.normalize();
+ let s = up.cross(f).normalize();
+ let u = f.cross(s);
+ Self {
+ matrix3: $matrix::from_cols(
+ $vec3::new(s.x, u.x, f.x).into(),
+ $vec3::new(s.y, u.y, f.y).into(),
+ $vec3::new(s.z, u.z, f.z).into(),
+ ),
+ translation: $column::new(-s.dot(eye), -u.dot(eye), -f.dot(eye)),
+ }
+ }
+
+ /// Creates a left-handed view transform using a camera position, an up direction, and
+ /// a focal point.
+ ///
+ /// For a view coordinate system with `+X=right`, `+Y=up` and `+Z=forward`.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `up` is not normalized when `glam_assert` is enabled.
+ #[inline]
+ pub fn look_at_lh(eye: $vec3, center: $vec3, up: $vec3) -> Self {
+ glam_assert!(up.is_normalized());
+ Self::look_to_lh(eye, center - eye, up)
+ }
+
+ /// Creates a right-handed view transform using a camera position, an up direction, and
+ /// a focal point.
+ ///
+ /// For a view coordinate system with `+X=right`, `+Y=up` and `+Z=back`.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `up` is not normalized when `glam_assert` is enabled.
+ #[inline]
+ pub fn look_at_rh(eye: $vec3, center: $vec3, up: $vec3) -> Self {
+ glam_assert!(up.is_normalized());
+ Self::look_to_lh(eye, eye - center, up)
+ }
+
+ /// Transforms the given 3D points, applying shear, scale, rotation and translation.
+ #[inline(always)]
+ pub fn transform_point3(&self, other: $vec3) -> $vec3 {
+ ((self.matrix3.x_axis * other.x)
+ + (self.matrix3.y_axis * other.y)
+ + (self.matrix3.z_axis * other.z)
+ + self.translation)
+ .into()
+ }
+
+ /// Transforms the given 3D vector, applying shear, scale and rotation (but NOT
+ /// translation).
+ ///
+ /// To also apply translation, use [`Self::transform_point3`] instead.
+ #[inline(always)]
+ pub fn transform_vector3(&self, other: $vec3) -> $vec3 {
+ ((self.matrix3.x_axis * other.x)
+ + (self.matrix3.y_axis * other.y)
+ + (self.matrix3.z_axis * other.z))
+ .into()
+ }
+
+ /// Returns `true` if, and only if, all elements are finite.
+ ///
+ /// If any element is either `NaN`, positive or negative infinity, this will return
+ /// `false`.
+ #[inline]
+ pub fn is_finite(&self) -> bool {
+ self.matrix3.is_finite() && self.translation.is_finite()
+ }
+
+ /// Returns `true` if any elements are `NaN`.
+ #[inline]
+ pub fn is_nan(&self) -> bool {
+ self.matrix3.is_nan() || self.translation.is_nan()
+ }
+
+ /// Returns true if the absolute difference of all elements between `self` and `other`
+ /// is less than or equal to `max_abs_diff`.
+ ///
+ /// This can be used to compare if two 3x4 matrices contain similar elements. It works
+ /// best when comparing with a known value. The `max_abs_diff` that should be used used
+ /// depends on the values being compared against.
+ ///
+ /// For more see
+ /// [comparing floating point numbers](https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/).
+ #[inline]
+ pub fn abs_diff_eq(&self, other: Self, max_abs_diff: $t) -> bool {
+ self.matrix3.abs_diff_eq(other.matrix3, max_abs_diff)
+ && self
+ .translation
+ .abs_diff_eq(other.translation, max_abs_diff)
+ }
+
+ /// Return the inverse of this transform.
+ ///
+ /// Note that if the transform is not invertible the result will be invalid.
+ #[must_use]
+ #[inline]
+ pub fn inverse(&self) -> Self {
+ let matrix3 = self.matrix3.inverse();
+ // transform negative translation by the 3x3 inverse:
+ let translation = -(matrix3 * self.translation);
+
+ Self {
+ matrix3,
+ translation,
+ }
+ }
+ }
+ };
+}
+
+macro_rules! impl_affine3_traits {
+ ($t:ty, $mat3:ident, $mat4:ident, $vec3:ident, $vec4:ident, $affine3:ident, $matrix:ident, $column:ident, $deref:ident) => {
+ impl Default for $affine3 {
+ #[inline(always)]
+ fn default() -> Self {
+ Self::IDENTITY
+ }
+ }
+
+ impl Deref for $affine3 {
+ type Target = $deref;
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*(self as *const Self as *const Self::Target) }
+ }
+ }
+
+ impl DerefMut for $affine3 {
+ #[inline(always)]
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ unsafe { &mut *(self as *mut Self as *mut Self::Target) }
+ }
+ }
+
+ impl PartialEq for $affine3 {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ self.matrix3.eq(&other.matrix3) && self.translation.eq(&other.translation)
+ }
+ }
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl core::fmt::Debug for $affine3 {
+ fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ fmt.debug_struct(stringify!($affine3))
+ .field("matrix3", &self.matrix3)
+ .field("translation", &self.translation)
+ .finish()
+ }
+ }
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl core::fmt::Display for $affine3 {
+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ write!(
+ f,
+ "[{}, {}, {}, {}]",
+ self.x_axis, self.y_axis, self.z_axis, self.w_axis
+ )
+ }
+ }
+
+ impl From<$affine3> for $mat4 {
+ #[inline]
+ fn from(m: $affine3) -> $mat4 {
+ $mat4::from_cols(
+ m.matrix3.x_axis.extend(0.0),
+ m.matrix3.y_axis.extend(0.0),
+ m.matrix3.z_axis.extend(0.0),
+ m.translation.extend(1.0),
+ )
+ }
+ }
+
+ impl Mul for $affine3 {
+ type Output = $affine3;
+
+ #[inline(always)]
+ fn mul(self, other: $affine3) -> Self::Output {
+ Self {
+ matrix3: self.matrix3 * other.matrix3,
+ translation: self.matrix3 * other.translation + self.translation,
+ }
+ }
+ }
+
+ impl Mul<$affine3> for $t {
+ type Output = $affine3;
+ #[inline(always)]
+ fn mul(self, other: $affine3) -> Self::Output {
+ $affine3 {
+ matrix3: self * other.matrix3,
+ translation: self * other.translation,
+ }
+ }
+ }
+
+ impl Mul<$t> for $affine3 {
+ type Output = Self;
+ #[inline(always)]
+ fn mul(self, other: $t) -> Self::Output {
+ Self {
+ matrix3: self.matrix3 * other,
+ translation: self.translation * other,
+ }
+ }
+ }
+
+ impl Add<$affine3> for $affine3 {
+ type Output = Self;
+ #[inline(always)]
+ fn add(self, other: Self) -> Self::Output {
+ Self {
+ matrix3: self.matrix3 + other.matrix3,
+ translation: self.translation + other.translation,
+ }
+ }
+ }
+
+ impl Sub<$affine3> for $affine3 {
+ type Output = Self;
+ #[inline(always)]
+ fn sub(self, other: Self) -> Self::Output {
+ Self {
+ matrix3: self.matrix3 - other.matrix3,
+ translation: self.translation - other.translation,
+ }
+ }
+ }
+
+ impl Mul<$mat4> for $affine3 {
+ type Output = $mat4;
+
+ #[inline(always)]
+ fn mul(self, rhs: $mat4) -> Self::Output {
+ $mat4::from(self) * rhs
+ }
+ }
+
+ impl Mul<$affine3> for $mat4 {
+ type Output = $mat4;
+
+ #[inline(always)]
+ fn mul(self, rhs: $affine3) -> Self::Output {
+ self * $mat4::from(rhs)
+ }
+ }
+
+ impl<'a> core::iter::Product<&'a Self> for $affine3 {
+ fn product<I>(iter: I) -> Self
+ where
+ I: Iterator<Item = &'a Self>,
+ {
+ iter.fold(Self::IDENTITY, |a, &b| a * b)
+ }
+ }
+ };
+}
+
+type DerefTargetF32 = Columns4<crate::Vec3A>;
+
+define_affine3_struct!(Affine3A, Mat3A, Vec3A);
+impl_affine3_methods!(f32, Mat3, Mat4, Quat, Vec3, Affine3A, Mat3A, Vec3A);
+impl_affine3_traits!(
+ f32,
+ Mat3,
+ Mat4,
+ Vec3,
+ Vec4,
+ Affine3A,
+ Mat3A,
+ Vec3A,
+ DerefTargetF32
+);
+
+impl Affine3A {
+ /// Transforms the given `Vec3A`, applying shear, scale, rotation and translation.
+ #[inline(always)]
+ pub fn transform_point3a(&self, other: Vec3A) -> Vec3A {
+ self.matrix3 * other + self.translation
+ }
+
+ /// Transforms the given `Vec3A`, applying shear, scale and rotation (but NOT
+ /// translation).
+ ///
+ /// To also apply translation, use [`Self::transform_point3`] instead.
+ #[inline(always)]
+ pub fn transform_vector3a(&self, other: Vec3A) -> Vec3A {
+ self.matrix3 * other
+ }
+}
+
+type DerefTargetF64 = Columns4<DVec3>;
+
+define_affine3_struct!(DAffine3, DMat3, DVec3);
+impl_affine3_methods!(f64, DMat3, DMat4, DQuat, DVec3, DAffine3, DMat3, DVec3);
+impl_affine3_traits!(
+ f64,
+ DMat3,
+ DMat4,
+ DVec3,
+ DVec4,
+ DAffine3,
+ DMat3,
+ DVec3,
+ DerefTargetF64
+);
+
+mod const_test_affine3a {
+ const_assert_eq!(
+ core::mem::align_of::<super::Vec3A>(),
+ core::mem::align_of::<super::Affine3A>()
+ );
+ const_assert_eq!(64, core::mem::size_of::<super::Affine3A>());
+}
+
+mod const_test_daffine3 {
+ const_assert_eq!(
+ core::mem::align_of::<super::DVec3>(),
+ core::mem::align_of::<super::DAffine3>()
+ );
+ const_assert_eq!(96, core::mem::size_of::<super::DAffine3>());
+}
diff --git a/src/cast.rs b/src/cast.rs
new file mode 100644
index 0000000..7742260
--- /dev/null
+++ b/src/cast.rs
@@ -0,0 +1,167 @@
+use crate::{DMat2, DMat3, DMat4, DQuat, DVec2, DVec3, DVec4};
+use crate::{IVec2, IVec3, IVec4};
+use crate::{Mat2, Mat3, Mat3A, Mat4, Quat, Vec2, Vec3, Vec3A, Vec4};
+use crate::{UVec2, UVec3, UVec4};
+#[cfg(target_feature = "simd128")]
+use core::arch::wasm32::v128;
+#[cfg(target_arch = "x86")]
+use core::arch::x86::*;
+#[cfg(target_arch = "x86_64")]
+use core::arch::x86_64::*;
+
+#[repr(C)]
+pub union Vec4Cast {
+ #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+ pub m128: __m128,
+ #[cfg(target_feature = "simd128")]
+ pub v128: v128,
+ pub fx4: [f32; 4],
+ pub fx2x2: [[f32; 2]; 2],
+ pub v4: Vec4,
+ pub v3a: Vec3A,
+ pub q: Quat,
+}
+
+#[repr(C)]
+pub union Vec3Cast {
+ pub fx3: [f32; 3],
+ pub v3: Vec3,
+}
+
+#[repr(C)]
+pub union Vec2Cast {
+ pub fx2: [f32; 2],
+ pub v2: Vec2,
+}
+
+#[repr(C)]
+pub union F32x9Cast {
+ pub fx3x3: [[f32; 3]; 3],
+ pub fx9: [f32; 9],
+}
+
+#[repr(C)]
+pub union F32x16Cast {
+ pub fx4x4: [[f32; 4]; 4],
+ pub fx16: [f32; 16],
+}
+
+#[repr(C)]
+pub union Mat4Cast {
+ pub v4x4: [Vec4; 4],
+ pub m4: Mat4,
+}
+
+#[repr(C)]
+pub union Mat3Cast {
+ pub v3x3: [Vec3; 3],
+ pub m3: Mat3,
+}
+
+#[repr(C)]
+pub union Mat3ACast {
+ pub v3x3: [Vec3A; 3],
+ pub m3: Mat3A,
+}
+
+#[repr(C)]
+pub union Mat2Cast {
+ pub v2x2: [Vec2; 2],
+ pub m2: Mat2,
+}
+
+#[repr(C)]
+pub union DVec4Cast {
+ pub fx4: [f64; 4],
+ pub fx2x2: [[f64; 2]; 2],
+ pub v4: DVec4,
+ pub q: DQuat,
+}
+
+#[repr(C)]
+pub union DVec3Cast {
+ pub fx3: [f64; 3],
+ pub v3: DVec3,
+}
+
+#[repr(C)]
+pub union DVec2Cast {
+ pub fx2: [f64; 2],
+ pub v2: DVec2,
+}
+
+#[repr(C)]
+pub union F64x9Cast {
+ pub fx3x3: [[f64; 3]; 3],
+ pub fx9: [f64; 9],
+}
+
+#[repr(C)]
+pub union F64x16Cast {
+ pub fx4x4: [[f64; 4]; 4],
+ pub fx16: [f64; 16],
+}
+
+#[repr(C)]
+pub union DMat4Cast {
+ pub v4x4: [DVec4; 4],
+ pub m4: DMat4,
+}
+
+#[repr(C)]
+pub union DMat3Cast {
+ pub v3x3: [DVec3; 3],
+ pub m3: DMat3,
+}
+
+#[repr(C)]
+pub union DMat2Cast {
+ pub v2x2: [DVec2; 2],
+ pub m2: DMat2,
+}
+
+#[repr(C)]
+pub union IVec4Cast {
+ #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+ pub m128: __m128i,
+ #[cfg(target_feature = "simd128")]
+ pub v128: v128,
+ pub ix4: [i32; 4],
+ pub ix2x2: [[i32; 2]; 2],
+ pub v4: IVec4,
+}
+
+#[repr(C)]
+pub union IVec3Cast {
+ pub ix3: [i32; 3],
+ pub v3: IVec3,
+}
+
+#[repr(C)]
+pub union IVec2Cast {
+ pub ix2: [i32; 2],
+ pub v2: IVec2,
+}
+
+#[repr(C)]
+pub union UVec4Cast {
+ #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+ pub m128: __m128,
+ #[cfg(target_feature = "simd128")]
+ pub v128: v128,
+ pub ux4: [u32; 4],
+ pub ux2x2: [[u32; 2]; 2],
+ pub v4: UVec4,
+}
+
+#[repr(C)]
+pub union UVec3Cast {
+ pub ux3: [u32; 3],
+ pub v3: UVec3,
+}
+
+#[repr(C)]
+pub union UVec2Cast {
+ pub ux2: [u32; 2],
+ pub v2: UVec2,
+}
diff --git a/src/core/mod.rs b/src/core/mod.rs
new file mode 100644
index 0000000..8eba6e4
--- /dev/null
+++ b/src/core/mod.rs
@@ -0,0 +1,18 @@
+// the core module provides traits for implementing vector, quaternion and matrix operations,
+// storage structs for scalar vector, quaternion and matrix data and implementations of the traits
+// for those structs and for supported SIMD types such as SSE2's `__m128`.
+//
+// The higher level glam library types have an inner type which either uses one of these storage
+// structs, or `__m128` and the actual implementation is provided by the core module.
+//
+// This architecture allows the public API to not require generics or traits, while still
+// supporting a number of Rust primitive types and SIMD architectures such as SSE2.
+//
+pub mod storage;
+pub mod traits;
+
+mod scalar;
+#[cfg(all(target_feature = "sse2", not(feature = "scalar-math")))]
+mod sse2;
+#[cfg(all(target_feature = "simd128", not(feature = "scalar-math")))]
+mod wasm32;
diff --git a/src/core/scalar/mask.rs b/src/core/scalar/mask.rs
new file mode 100644
index 0000000..7745826
--- /dev/null
+++ b/src/core/scalar/mask.rs
@@ -0,0 +1,452 @@
+use crate::core::{
+ storage::{XY, XYZ, XYZW},
+ traits::{scalar::*, vector::*},
+};
+
+impl MaskConst for u32 {
+ const MASK: [u32; 2] = [0, 0xff_ff_ff_ff];
+}
+
+impl MaskConst for u64 {
+ const MASK: [u64; 2] = [0, 0xff_ff_ff_ff_ff_ff_ff_ff];
+}
+
+// u32 (currently unused)
+
+/*
+impl MaskVectorConst for XY<u32> {
+ const FALSE: Self = Self { x: 0, y: 0 };
+}
+
+impl MaskVectorConst for XYZ<u32> {
+ const FALSE: Self = Self { x: 0, y: 0, z: 0 };
+}
+
+impl MaskVectorConst for XYZW<u32> {
+ const FALSE: Self = Self {
+ x: 0,
+ y: 0,
+ z: 0,
+ w: 0,
+ };
+}
+
+impl MaskVector for XY<u32> {
+ #[inline]
+ fn bitand(self, other: Self) -> Self {
+ Self {
+ x: self.x & other.x,
+ y: self.y & other.y,
+ }
+ }
+
+ #[inline]
+ fn bitor(self, other: Self) -> Self {
+ Self {
+ x: self.x | other.x,
+ y: self.y | other.y,
+ }
+ }
+
+ #[inline]
+ fn not(self) -> Self {
+ Self {
+ x: !self.x,
+ y: !self.y,
+ }
+ }
+}
+
+impl MaskVector for XYZ<u32> {
+ #[inline]
+ fn bitand(self, other: Self) -> Self {
+ Self {
+ x: self.x & other.x,
+ y: self.y & other.y,
+ z: self.z & other.z,
+ }
+ }
+
+ #[inline]
+ fn bitor(self, other: Self) -> Self {
+ Self {
+ x: self.x | other.x,
+ y: self.y | other.y,
+ z: self.z | other.z,
+ }
+ }
+
+ #[inline]
+ fn not(self) -> Self {
+ Self {
+ x: !self.x,
+ y: !self.y,
+ z: !self.z,
+ }
+ }
+}
+
+impl MaskVector for XYZW<u32> {
+ #[inline]
+ fn bitand(self, other: Self) -> Self {
+ Self {
+ x: self.x & other.x,
+ y: self.y & other.y,
+ z: self.z & other.z,
+ w: self.w & other.w,
+ }
+ }
+
+ #[inline]
+ fn bitor(self, other: Self) -> Self {
+ Self {
+ x: self.x | other.x,
+ y: self.y | other.y,
+ z: self.z | other.z,
+ w: self.w | other.w,
+ }
+ }
+
+ #[inline]
+ fn not(self) -> Self {
+ Self {
+ x: !self.x,
+ y: !self.y,
+ z: !self.z,
+ w: !self.w,
+ }
+ }
+}
+
+impl MaskVector2 for XY<u32> {
+ #[inline(always)]
+ fn new(x: bool, y: bool) -> Self {
+ Self {
+ x: MaskConst::MASK[x as usize],
+ y: MaskConst::MASK[y as usize],
+ }
+ }
+
+ #[inline]
+ fn bitmask(self) -> u32 {
+ (self.x as u32 & 0x1) | (self.y as u32 & 0x1) << 1
+ }
+
+ #[inline]
+ fn any(self) -> bool {
+ ((self.x | self.y) & 0x1) != 0
+ }
+
+ #[inline]
+ fn all(self) -> bool {
+ ((self.x & self.y) & 0x1) != 0
+ }
+
+ #[inline]
+ fn into_bool_array(self) -> [bool; 2] {
+ [self.x != 0, self.y != 0]
+ }
+
+ #[inline]
+ fn into_u32_array(self) -> [u32; 2] {
+ [self.x, self.y]
+ }
+}
+
+impl MaskVector3 for XYZ<u32> {
+ #[inline(always)]
+ fn new(x: bool, y: bool, z: bool) -> Self {
+ // A SSE2 mask can be any bit pattern but for the `Vec3Mask` implementation of select
+ // we expect either 0 or 0xff_ff_ff_ff. This should be a safe assumption as this type
+ // can only be created via this function or by `Vec3` methods.
+ Self {
+ x: MaskConst::MASK[x as usize],
+ y: MaskConst::MASK[y as usize],
+ z: MaskConst::MASK[z as usize],
+ }
+ }
+
+ #[inline]
+ fn bitmask(self) -> u32 {
+ (self.x & 0x1) | (self.y & 0x1) << 1 | (self.z & 0x1) << 2
+ }
+
+ #[inline]
+ fn any(self) -> bool {
+ ((self.x | self.y | self.z) & 0x1) != 0
+ }
+
+ #[inline]
+ fn all(self) -> bool {
+ ((self.x & self.y & self.z) & 0x1) != 0
+ }
+
+ #[inline]
+ fn into_bool_array(self) -> [bool; 3] {
+ [self.x != 0, self.y != 0, self.z != 0]
+ }
+
+ #[inline]
+ fn into_u32_array(self) -> [u32; 3] {
+ [self.x, self.y, self.z]
+ }
+}
+
+impl MaskVector4 for XYZW<u32> {
+ #[inline(always)]
+ fn new(x: bool, y: bool, z: bool, w: bool) -> Self {
+ // A SSE2 mask can be any bit pattern but for the `Vec4Mask` implementation of select
+ // we expect either 0 or 0xff_ff_ff_ff. This should be a safe assumption as this type
+ // can only be created via this function or by `Vec4` methods.
+ Self {
+ x: MaskConst::MASK[x as usize],
+ y: MaskConst::MASK[y as usize],
+ z: MaskConst::MASK[z as usize],
+ w: MaskConst::MASK[w as usize],
+ }
+ }
+
+ #[inline]
+ fn bitmask(self) -> u32 {
+ (self.x & 0x1) | (self.y & 0x1) << 1 | (self.z & 0x1) << 2 | (self.w & 0x1) << 3
+ }
+
+ #[inline]
+ fn any(self) -> bool {
+ ((self.x | self.y | self.z | self.w) & 0x1) != 0
+ }
+
+ #[inline]
+ fn all(self) -> bool {
+ ((self.x & self.y & self.z & self.w) & 0x1) != 0
+ }
+
+ #[inline]
+ fn into_bool_array(self) -> [bool; 4] {
+ [self.x != 0, self.y != 0, self.z != 0, self.w != 0]
+ }
+
+ #[inline]
+ fn into_u32_array(self) -> [u32; 4] {
+ [self.x, self.y, self.z, self.w]
+ }
+}
+*/
+
+// bool
+
+impl MaskVectorConst for XY<bool> {
+ const FALSE: Self = Self { x: false, y: false };
+}
+
+impl MaskVectorConst for XYZ<bool> {
+ const FALSE: Self = Self {
+ x: false,
+ y: false,
+ z: false,
+ };
+}
+
+impl MaskVectorConst for XYZW<bool> {
+ const FALSE: Self = Self {
+ x: false,
+ y: false,
+ z: false,
+ w: false,
+ };
+}
+
+impl MaskVector for XY<bool> {
+ #[inline]
+ fn bitand(self, other: Self) -> Self {
+ Self {
+ x: self.x && other.x,
+ y: self.y && other.y,
+ }
+ }
+
+ #[inline]
+ fn bitor(self, other: Self) -> Self {
+ Self {
+ x: self.x || other.x,
+ y: self.y || other.y,
+ }
+ }
+
+ #[inline]
+ fn not(self) -> Self {
+ Self {
+ x: !self.x,
+ y: !self.y,
+ }
+ }
+}
+
+impl MaskVector for XYZ<bool> {
+ #[inline]
+ fn bitand(self, other: Self) -> Self {
+ Self {
+ x: self.x && other.x,
+ y: self.y && other.y,
+ z: self.z && other.z,
+ }
+ }
+
+ #[inline]
+ fn bitor(self, other: Self) -> Self {
+ Self {
+ x: self.x || other.x,
+ y: self.y || other.y,
+ z: self.z || other.z,
+ }
+ }
+
+ #[inline]
+ fn not(self) -> Self {
+ Self {
+ x: !self.x,
+ y: !self.y,
+ z: !self.z,
+ }
+ }
+}
+
+impl MaskVector for XYZW<bool> {
+ #[inline]
+ fn bitand(self, other: Self) -> Self {
+ Self {
+ x: self.x && other.x,
+ y: self.y && other.y,
+ z: self.z && other.z,
+ w: self.w && other.w,
+ }
+ }
+
+ #[inline]
+ fn bitor(self, other: Self) -> Self {
+ Self {
+ x: self.x || other.x,
+ y: self.y || other.y,
+ z: self.z || other.z,
+ w: self.w || other.w,
+ }
+ }
+
+ #[inline]
+ fn not(self) -> Self {
+ Self {
+ x: !self.x,
+ y: !self.y,
+ z: !self.z,
+ w: !self.w,
+ }
+ }
+}
+
+impl MaskVector2 for XY<bool> {
+ #[inline(always)]
+ fn new(x: bool, y: bool) -> Self {
+ Self { x, y }
+ }
+
+ #[inline]
+ fn bitmask(self) -> u32 {
+ (self.x as u32) | (self.y as u32) << 1
+ }
+
+ #[inline]
+ fn any(self) -> bool {
+ self.x || self.y
+ }
+
+ #[inline]
+ fn all(self) -> bool {
+ self.x && self.y
+ }
+
+ #[inline]
+ fn into_bool_array(self) -> [bool; 2] {
+ [self.x, self.y]
+ }
+
+ #[inline]
+ fn into_u32_array(self) -> [u32; 2] {
+ [
+ MaskConst::MASK[self.x as usize],
+ MaskConst::MASK[self.y as usize],
+ ]
+ }
+}
+
+impl MaskVector3 for XYZ<bool> {
+ #[inline(always)]
+ fn new(x: bool, y: bool, z: bool) -> Self {
+ Self { x, y, z }
+ }
+
+ #[inline]
+ fn bitmask(self) -> u32 {
+ (self.x as u32) | (self.y as u32) << 1 | (self.z as u32) << 2
+ }
+
+ #[inline]
+ fn any(self) -> bool {
+ self.x || self.y || self.z
+ }
+
+ #[inline]
+ fn all(self) -> bool {
+ self.x && self.y && self.z
+ }
+
+ #[inline]
+ fn into_bool_array(self) -> [bool; 3] {
+ [self.x, self.y, self.z]
+ }
+
+ #[inline]
+ fn into_u32_array(self) -> [u32; 3] {
+ [
+ MaskConst::MASK[self.x as usize],
+ MaskConst::MASK[self.y as usize],
+ MaskConst::MASK[self.z as usize],
+ ]
+ }
+}
+
+impl MaskVector4 for XYZW<bool> {
+ #[inline(always)]
+ fn new(x: bool, y: bool, z: bool, w: bool) -> Self {
+ Self { x, y, z, w }
+ }
+
+ #[inline]
+ fn bitmask(self) -> u32 {
+ (self.x as u32) | (self.y as u32) << 1 | (self.z as u32) << 2 | (self.w as u32) << 3
+ }
+
+ #[inline]
+ fn any(self) -> bool {
+ self.x || self.y || self.z || self.w
+ }
+
+ #[inline]
+ fn all(self) -> bool {
+ self.x && self.y && self.z && self.w
+ }
+
+ #[inline]
+ fn into_bool_array(self) -> [bool; 4] {
+ [self.x, self.y, self.z, self.w]
+ }
+
+ #[inline]
+ fn into_u32_array(self) -> [u32; 4] {
+ [
+ MaskConst::MASK[self.x as usize],
+ MaskConst::MASK[self.y as usize],
+ MaskConst::MASK[self.z as usize],
+ MaskConst::MASK[self.w as usize],
+ ]
+ }
+}
diff --git a/src/core/scalar/matrix.rs b/src/core/scalar/matrix.rs
new file mode 100644
index 0000000..34b0211
--- /dev/null
+++ b/src/core/scalar/matrix.rs
@@ -0,0 +1,285 @@
+use crate::core::{
+ storage::{Columns2, Columns3, Columns4, XY, XYZ, XYZF32A16, XYZW},
+ traits::{
+ matrix::{
+ FloatMatrix2x2, FloatMatrix3x3, FloatMatrix4x4, Matrix, Matrix2x2, Matrix3x3,
+ Matrix4x4, MatrixConst,
+ },
+ projection::ProjectionMatrix,
+ scalar::{FloatEx, NanConstEx, NumEx},
+ vector::*,
+ },
+};
+
+impl<T: NumEx> MatrixConst for Columns2<XY<T>> {
+ const ZERO: Self = Self {
+ x_axis: XY::ZERO,
+ y_axis: XY::ZERO,
+ };
+ const IDENTITY: Self = Self {
+ x_axis: XY::X,
+ y_axis: XY::Y,
+ };
+}
+
+impl<T: NanConstEx> NanConstEx for Columns2<XY<T>> {
+ const NAN: Self = Self {
+ x_axis: XY::NAN,
+ y_axis: XY::NAN,
+ };
+}
+
+impl<T: NumEx> Matrix<T> for Columns2<XY<T>> {}
+
+impl<T: NumEx> Matrix2x2<T, XY<T>> for Columns2<XY<T>> {
+ #[inline(always)]
+ fn from_cols(x_axis: XY<T>, y_axis: XY<T>) -> Self {
+ Self { x_axis, y_axis }
+ }
+
+ #[inline(always)]
+ fn x_axis(&self) -> &XY<T> {
+ &self.x_axis
+ }
+
+ #[inline(always)]
+ fn y_axis(&self) -> &XY<T> {
+ &self.y_axis
+ }
+}
+
+impl<T: FloatEx> FloatMatrix2x2<T, XY<T>> for Columns2<XY<T>> {}
+
+impl<T: NumEx> MatrixConst for Columns3<XYZ<T>> {
+ const ZERO: Self = Self {
+ x_axis: XYZ::ZERO,
+ y_axis: XYZ::ZERO,
+ z_axis: XYZ::ZERO,
+ };
+ const IDENTITY: Self = Self {
+ x_axis: XYZ::X,
+ y_axis: XYZ::Y,
+ z_axis: XYZ::Z,
+ };
+}
+
+impl<T: NanConstEx> NanConstEx for Columns3<XYZ<T>> {
+ const NAN: Self = Self {
+ x_axis: XYZ::NAN,
+ y_axis: XYZ::NAN,
+ z_axis: XYZ::NAN,
+ };
+}
+
+impl<T: NumEx> Matrix<T> for Columns3<XYZ<T>> {}
+
+impl<T: NumEx> Matrix3x3<T, XYZ<T>> for Columns3<XYZ<T>> {
+ #[inline(always)]
+ fn from_cols(x_axis: XYZ<T>, y_axis: XYZ<T>, z_axis: XYZ<T>) -> Self {
+ Self {
+ x_axis,
+ y_axis,
+ z_axis,
+ }
+ }
+
+ #[inline(always)]
+ fn x_axis(&self) -> &XYZ<T> {
+ &self.x_axis
+ }
+
+ #[inline(always)]
+ fn y_axis(&self) -> &XYZ<T> {
+ &self.y_axis
+ }
+
+ #[inline(always)]
+ fn z_axis(&self) -> &XYZ<T> {
+ &self.z_axis
+ }
+
+ #[inline]
+ fn mul_vector(&self, other: XYZ<T>) -> XYZ<T> {
+ // default implementation uses splat_x etc, which might not be optimal. Need to check.
+ let mut res = self.x_axis.mul_scalar(other.x);
+ res = self.y_axis.mul_scalar(other.y).add(res);
+ res = self.z_axis.mul_scalar(other.z).add(res);
+ res
+ }
+}
+
+impl<T: FloatEx> FloatMatrix3x3<T, XYZ<T>> for Columns3<XYZ<T>> {
+ #[inline]
+ fn transform_point2(&self, other: XY<T>) -> XY<T> {
+ // TODO: This is untested, probably slower than the high level code that uses a SIMD mat2
+ Columns2::from_cols(self.x_axis.into(), self.y_axis.into())
+ .mul_vector(other)
+ .add(self.z_axis.into())
+ }
+
+ #[inline]
+ fn transform_vector2(&self, other: XY<T>) -> XY<T> {
+ // TODO: This is untested, probably slower than the high level code that uses a SIMD mat2
+ Columns2::from_cols(self.x_axis.into(), self.y_axis.into()).mul_vector(other)
+ }
+}
+
+impl MatrixConst for Columns3<XYZF32A16> {
+ const ZERO: Self = Self {
+ x_axis: XYZF32A16::ZERO,
+ y_axis: XYZF32A16::ZERO,
+ z_axis: XYZF32A16::ZERO,
+ };
+ const IDENTITY: Self = Self {
+ x_axis: XYZF32A16::X,
+ y_axis: XYZF32A16::Y,
+ z_axis: XYZF32A16::Z,
+ };
+}
+
+impl NanConstEx for Columns3<XYZF32A16> {
+ const NAN: Self = Self {
+ x_axis: XYZF32A16::NAN,
+ y_axis: XYZF32A16::NAN,
+ z_axis: XYZF32A16::NAN,
+ };
+}
+
+impl Matrix<f32> for Columns3<XYZF32A16> {}
+
+impl Matrix3x3<f32, XYZF32A16> for Columns3<XYZF32A16> {
+ #[inline(always)]
+ fn from_cols(x_axis: XYZF32A16, y_axis: XYZF32A16, z_axis: XYZF32A16) -> Self {
+ Self {
+ x_axis,
+ y_axis,
+ z_axis,
+ }
+ }
+
+ #[inline(always)]
+ fn x_axis(&self) -> &XYZF32A16 {
+ &self.x_axis
+ }
+
+ #[inline(always)]
+ fn y_axis(&self) -> &XYZF32A16 {
+ &self.y_axis
+ }
+
+ #[inline(always)]
+ fn z_axis(&self) -> &XYZF32A16 {
+ &self.z_axis
+ }
+}
+
+impl FloatMatrix3x3<f32, XYZF32A16> for Columns3<XYZF32A16> {
+ #[inline]
+ fn transform_point2(&self, other: XY<f32>) -> XY<f32> {
+ // TODO: This is untested, probably slower than the high level code that uses a SIMD mat2
+ Columns2::from_cols(self.x_axis.into_xy(), self.y_axis.into_xy())
+ .mul_vector(other)
+ .add(self.z_axis.into_xy())
+ }
+
+ #[inline]
+ fn transform_vector2(&self, other: XY<f32>) -> XY<f32> {
+ // TODO: This is untested, probably slower than the high level code that uses a SIMD mat2
+ Columns2::from_cols(self.x_axis.into_xy(), self.y_axis.into_xy()).mul_vector(other)
+ }
+}
+
+impl<T: NumEx> MatrixConst for Columns4<XYZW<T>> {
+ const ZERO: Self = Self {
+ x_axis: XYZW::ZERO,
+ y_axis: XYZW::ZERO,
+ z_axis: XYZW::ZERO,
+ w_axis: XYZW::ZERO,
+ };
+ const IDENTITY: Self = Self {
+ x_axis: XYZW::X,
+ y_axis: XYZW::Y,
+ z_axis: XYZW::Z,
+ w_axis: XYZW::W,
+ };
+}
+
+impl<T: NanConstEx> NanConstEx for Columns4<XYZW<T>> {
+ const NAN: Self = Self {
+ x_axis: XYZW::NAN,
+ y_axis: XYZW::NAN,
+ z_axis: XYZW::NAN,
+ w_axis: XYZW::NAN,
+ };
+}
+
+impl<T: NumEx> Matrix<T> for Columns4<XYZW<T>> {}
+
+impl<T: NumEx> Matrix4x4<T, XYZW<T>> for Columns4<XYZW<T>> {
+ #[rustfmt::skip]
+ #[inline(always)]
+ fn from_cols(x_axis: XYZW<T>, y_axis: XYZW<T>, z_axis: XYZW<T>, w_axis: XYZW<T>) -> Self {
+ Self { x_axis, y_axis, z_axis, w_axis }
+ }
+
+ #[inline(always)]
+ fn x_axis(&self) -> &XYZW<T> {
+ &self.x_axis
+ }
+
+ #[inline(always)]
+ fn y_axis(&self) -> &XYZW<T> {
+ &self.y_axis
+ }
+
+ #[inline(always)]
+ fn z_axis(&self) -> &XYZW<T> {
+ &self.z_axis
+ }
+
+ #[inline(always)]
+ fn w_axis(&self) -> &XYZW<T> {
+ &self.w_axis
+ }
+}
+
+impl<T: FloatEx> FloatMatrix4x4<T, XYZW<T>> for Columns4<XYZW<T>> {
+ type SIMDVector3 = XYZ<T>;
+
+ #[inline(always)]
+ fn transform_float4_as_point3(&self, other: XYZ<T>) -> XYZ<T> {
+ self.transform_point3(other)
+ }
+
+ #[inline(always)]
+ fn transform_float4_as_vector3(&self, other: XYZ<T>) -> XYZ<T> {
+ self.transform_vector3(other)
+ }
+
+ #[inline(always)]
+ fn project_float4_as_point3(&self, other: XYZ<T>) -> XYZ<T> {
+ self.project_point3(other)
+ }
+}
+
+impl<T: FloatEx> ProjectionMatrix<T, XYZW<T>> for Columns4<XYZW<T>> {}
+
+impl From<Columns3<XYZ<f32>>> for Columns3<XYZF32A16> {
+ fn from(v: Columns3<XYZ<f32>>) -> Columns3<XYZF32A16> {
+ Self {
+ x_axis: v.x_axis.into(),
+ y_axis: v.y_axis.into(),
+ z_axis: v.z_axis.into(),
+ }
+ }
+}
+
+impl From<Columns3<XYZF32A16>> for Columns3<XYZ<f32>> {
+ fn from(v: Columns3<XYZF32A16>) -> Columns3<XYZ<f32>> {
+ Self {
+ x_axis: v.x_axis.into(),
+ y_axis: v.y_axis.into(),
+ z_axis: v.z_axis.into(),
+ }
+ }
+}
diff --git a/src/core/scalar/mod.rs b/src/core/scalar/mod.rs
new file mode 100644
index 0000000..88050c0
--- /dev/null
+++ b/src/core/scalar/mod.rs
@@ -0,0 +1,4 @@
+pub mod mask;
+pub mod matrix;
+pub mod quaternion;
+pub mod vector;
diff --git a/src/core/scalar/quaternion.rs b/src/core/scalar/quaternion.rs
new file mode 100644
index 0000000..a483ed1
--- /dev/null
+++ b/src/core/scalar/quaternion.rs
@@ -0,0 +1,87 @@
+use crate::core::{
+ storage::{XYZ, XYZW},
+ traits::{quaternion::Quaternion, scalar::*, vector::*},
+};
+
+impl<T: FloatEx> Quaternion<T> for XYZW<T> {
+ // fallback
+ type SIMDVector3 = XYZ<T>;
+
+ #[inline(always)]
+ fn conjugate(self) -> Self {
+ Self::new(-self.x, -self.y, -self.z, self.w)
+ }
+
+ #[inline]
+ fn lerp(self, end: Self, s: T) -> Self {
+ glam_assert!(FloatVector4::is_normalized(self));
+ glam_assert!(FloatVector4::is_normalized(end));
+
+ let start = self;
+ let end = end;
+ let dot = start.dot(end);
+ let bias = if dot >= T::ZERO { T::ONE } else { T::NEG_ONE };
+ let interpolated = start.add(end.mul_scalar(bias).sub(start).mul_scalar(s));
+ interpolated.normalize()
+ }
+
+ #[inline]
+ fn slerp(self, end: Self, s: T) -> Self {
+ // http://number-none.com/product/Understanding%20Slerp,%20Then%20Not%20Using%20It/
+
+ glam_assert!(FloatVector4::is_normalized(self));
+ glam_assert!(FloatVector4::is_normalized(end));
+
+ let dot = self.dot(end);
+
+ if dot > T::from_f32(0.9995) {
+ // assumes lerp returns a normalized quaternion
+ self.lerp(end, s)
+ } else {
+ // assumes scalar_acos clamps the input to [-1.0, 1.0]
+ let theta = dot.acos_approx();
+ let scale1 = (theta * (T::ONE - s)).sin();
+ let scale2 = (theta * s).sin();
+ let theta_sin = theta.sin();
+
+ self.mul_scalar(scale1)
+ .add(end.mul_scalar(scale2))
+ .mul_scalar(theta_sin.recip())
+ }
+ }
+
+ #[inline]
+ fn mul_vector3(self, other: XYZ<T>) -> XYZ<T> {
+ glam_assert!(FloatVector4::is_normalized(self));
+ let w = self.w;
+ let b = XYZ {
+ x: self.x,
+ y: self.y,
+ z: self.z,
+ };
+ let b2 = b.dot(b);
+ other
+ .mul_scalar(w * w - b2)
+ .add(b.mul_scalar(other.dot(b) * T::TWO))
+ .add(b.cross(other).mul_scalar(w * T::TWO))
+ }
+
+ #[inline]
+ fn mul_quaternion(self, other: Self) -> Self {
+ glam_assert!(FloatVector4::is_normalized(self));
+ glam_assert!(FloatVector4::is_normalized(other));
+ let (x0, y0, z0, w0) = self.into_tuple();
+ let (x1, y1, z1, w1) = other.into_tuple();
+ Self::new(
+ w0 * x1 + x0 * w1 + y0 * z1 - z0 * y1,
+ w0 * y1 - x0 * z1 + y0 * w1 + z0 * x1,
+ w0 * z1 + x0 * y1 - y0 * x1 + z0 * w1,
+ w0 * w1 - x0 * x1 - y0 * y1 - z0 * z1,
+ )
+ }
+
+ #[inline(always)]
+ fn mul_float4_as_vector3(self, other: XYZ<T>) -> XYZ<T> {
+ self.mul_vector3(other)
+ }
+}
diff --git a/src/core/scalar/vector.rs b/src/core/scalar/vector.rs
new file mode 100644
index 0000000..4712502
--- /dev/null
+++ b/src/core/scalar/vector.rs
@@ -0,0 +1,1465 @@
+use crate::core::{
+ storage::{XY, XYZ, XYZF32A16, XYZW},
+ traits::{scalar::*, vector::*},
+};
+
+impl<T: NumEx> VectorConst for XY<T> {
+ const ZERO: Self = Self {
+ x: <T as NumConstEx>::ZERO,
+ y: <T as NumConstEx>::ZERO,
+ };
+ const ONE: Self = Self {
+ x: <T as NumConstEx>::ONE,
+ y: <T as NumConstEx>::ONE,
+ };
+}
+
+impl<T: NanConstEx> NanConstEx for XY<T> {
+ const NAN: Self = Self {
+ x: <T as NanConstEx>::NAN,
+ y: <T as NanConstEx>::NAN,
+ };
+}
+
+impl<T: NumEx> Vector2Const for XY<T> {
+ const X: Self = Self {
+ x: <T as NumConstEx>::ONE,
+ y: <T as NumConstEx>::ZERO,
+ };
+ const Y: Self = Self {
+ x: <T as NumConstEx>::ZERO,
+ y: <T as NumConstEx>::ONE,
+ };
+}
+
+impl<T: NumEx> VectorConst for XYZ<T> {
+ const ZERO: Self = Self {
+ x: <T as NumConstEx>::ZERO,
+ y: <T as NumConstEx>::ZERO,
+ z: <T as NumConstEx>::ZERO,
+ };
+ const ONE: Self = Self {
+ x: <T as NumConstEx>::ONE,
+ y: <T as NumConstEx>::ONE,
+ z: <T as NumConstEx>::ONE,
+ };
+}
+
+impl<T: NanConstEx> NanConstEx for XYZ<T> {
+ const NAN: Self = Self {
+ x: <T as NanConstEx>::NAN,
+ y: <T as NanConstEx>::NAN,
+ z: <T as NanConstEx>::NAN,
+ };
+}
+
+impl<T: NumEx> Vector3Const for XYZ<T> {
+ const X: Self = Self {
+ x: <T as NumConstEx>::ONE,
+ y: <T as NumConstEx>::ZERO,
+ z: <T as NumConstEx>::ZERO,
+ };
+ const Y: Self = Self {
+ x: <T as NumConstEx>::ZERO,
+ y: <T as NumConstEx>::ONE,
+ z: <T as NumConstEx>::ZERO,
+ };
+ const Z: Self = Self {
+ x: <T as NumConstEx>::ZERO,
+ y: <T as NumConstEx>::ZERO,
+ z: <T as NumConstEx>::ONE,
+ };
+}
+
+impl<T: NumEx> VectorConst for XYZW<T> {
+ const ZERO: Self = Self {
+ x: <T as NumConstEx>::ZERO,
+ y: <T as NumConstEx>::ZERO,
+ z: <T as NumConstEx>::ZERO,
+ w: <T as NumConstEx>::ZERO,
+ };
+ const ONE: Self = Self {
+ x: <T as NumConstEx>::ONE,
+ y: <T as NumConstEx>::ONE,
+ z: <T as NumConstEx>::ONE,
+ w: <T as NumConstEx>::ONE,
+ };
+}
+
+impl<T: NanConstEx> NanConstEx for XYZW<T> {
+ const NAN: Self = Self {
+ x: <T as NanConstEx>::NAN,
+ y: <T as NanConstEx>::NAN,
+ z: <T as NanConstEx>::NAN,
+ w: <T as NanConstEx>::NAN,
+ };
+}
+
+impl<T: NumEx> Vector4Const for XYZW<T> {
+ const X: Self = Self {
+ x: <T as NumConstEx>::ONE,
+ y: <T as NumConstEx>::ZERO,
+ z: <T as NumConstEx>::ZERO,
+ w: <T as NumConstEx>::ZERO,
+ };
+ const Y: Self = Self {
+ x: <T as NumConstEx>::ZERO,
+ y: <T as NumConstEx>::ONE,
+ z: <T as NumConstEx>::ZERO,
+ w: <T as NumConstEx>::ZERO,
+ };
+ const Z: Self = Self {
+ x: <T as NumConstEx>::ZERO,
+ y: <T as NumConstEx>::ZERO,
+ z: <T as NumConstEx>::ONE,
+ w: <T as NumConstEx>::ZERO,
+ };
+ const W: Self = Self {
+ x: <T as NumConstEx>::ZERO,
+ y: <T as NumConstEx>::ZERO,
+ z: <T as NumConstEx>::ZERO,
+ w: <T as NumConstEx>::ONE,
+ };
+}
+
+impl<T: NumEx> Vector<T> for XY<T> {
+ type Mask = XY<bool>;
+
+ #[inline]
+ fn splat(s: T) -> Self {
+ Self { x: s, y: s }
+ }
+
+ #[inline]
+ fn select(mask: Self::Mask, if_true: Self, if_false: Self) -> Self {
+ Self {
+ x: if mask.x { if_true.x } else { if_false.x },
+ y: if mask.y { if_true.y } else { if_false.y },
+ }
+ }
+
+ #[inline]
+ fn cmpeq(self, other: Self) -> Self::Mask {
+ Self::Mask {
+ x: self.x.eq(&other.x),
+ y: self.y.eq(&other.y),
+ }
+ }
+
+ #[inline]
+ fn cmpne(self, other: Self) -> Self::Mask {
+ Self::Mask {
+ x: self.x.ne(&other.x),
+ y: self.y.ne(&other.y),
+ }
+ }
+
+ #[inline]
+ fn cmpge(self, other: Self) -> Self::Mask {
+ Self::Mask {
+ x: self.x.ge(&other.x),
+ y: self.y.ge(&other.y),
+ }
+ }
+
+ #[inline]
+ fn cmpgt(self, other: Self) -> Self::Mask {
+ Self::Mask {
+ x: self.x.gt(&other.x),
+ y: self.y.gt(&other.y),
+ }
+ }
+
+ #[inline]
+ fn cmple(self, other: Self) -> Self::Mask {
+ Self::Mask {
+ x: self.x.le(&other.x),
+ y: self.y.le(&other.y),
+ }
+ }
+
+ #[inline]
+ fn cmplt(self, other: Self) -> Self::Mask {
+ Self::Mask {
+ x: self.x.lt(&other.x),
+ y: self.y.lt(&other.y),
+ }
+ }
+
+ #[inline]
+ fn add(self, other: Self) -> Self {
+ Self {
+ x: self.x + other.x,
+ y: self.y + other.y,
+ }
+ }
+
+ #[inline]
+ fn div(self, other: Self) -> Self {
+ Self {
+ x: self.x / other.x,
+ y: self.y / other.y,
+ }
+ }
+
+ #[inline]
+ fn mul(self, other: Self) -> Self {
+ Self {
+ x: self.x * other.x,
+ y: self.y * other.y,
+ }
+ }
+
+ #[inline]
+ fn sub(self, other: Self) -> Self {
+ Self {
+ x: self.x - other.x,
+ y: self.y - other.y,
+ }
+ }
+
+ #[inline]
+ fn add_scalar(self, other: T) -> Self {
+ Self {
+ x: self.x + other,
+ y: self.y + other,
+ }
+ }
+
+ #[inline]
+ fn sub_scalar(self, other: T) -> Self {
+ Self {
+ x: self.x - other,
+ y: self.y - other,
+ }
+ }
+
+ #[inline]
+ fn mul_scalar(self, other: T) -> Self {
+ Self {
+ x: self.x * other,
+ y: self.y * other,
+ }
+ }
+
+ #[inline]
+ fn div_scalar(self, other: T) -> Self {
+ Self {
+ x: self.x / other,
+ y: self.y / other,
+ }
+ }
+
+ #[inline]
+ fn rem(self, other: Self) -> Self {
+ Self {
+ x: self.x % other.x,
+ y: self.y % other.y,
+ }
+ }
+
+ #[inline]
+ fn rem_scalar(self, other: T) -> Self {
+ Self {
+ x: self.x % other,
+ y: self.y % other,
+ }
+ }
+
+ #[inline]
+ fn min(self, other: Self) -> Self {
+ Self {
+ x: self.x.min(other.x),
+ y: self.y.min(other.y),
+ }
+ }
+
+ #[inline]
+ fn max(self, other: Self) -> Self {
+ Self {
+ x: self.x.max(other.x),
+ y: self.y.max(other.y),
+ }
+ }
+}
+
+impl<T: NumEx> Vector<T> for XYZ<T> {
+ type Mask = XYZ<bool>;
+
+ #[inline]
+ fn splat(s: T) -> Self {
+ Self { x: s, y: s, z: s }
+ }
+
+ #[inline]
+ fn select(mask: Self::Mask, if_true: Self, if_false: Self) -> Self {
+ Self {
+ x: if mask.x { if_true.x } else { if_false.x },
+ y: if mask.y { if_true.y } else { if_false.y },
+ z: if mask.z { if_true.z } else { if_false.z },
+ }
+ }
+
+ #[inline]
+ fn cmpeq(self, other: Self) -> Self::Mask {
+ Self::Mask {
+ x: self.x.eq(&other.x),
+ y: self.y.eq(&other.y),
+ z: self.z.eq(&other.z),
+ }
+ }
+
+ #[inline]
+ fn cmpne(self, other: Self) -> Self::Mask {
+ Self::Mask {
+ x: self.x.ne(&other.x),
+ y: self.y.ne(&other.y),
+ z: self.z.ne(&other.z),
+ }
+ }
+
+ #[inline]
+ fn cmpge(self, other: Self) -> Self::Mask {
+ Self::Mask {
+ x: self.x.ge(&other.x),
+ y: self.y.ge(&other.y),
+ z: self.z.ge(&other.z),
+ }
+ }
+
+ #[inline]
+ fn cmpgt(self, other: Self) -> Self::Mask {
+ Self::Mask {
+ x: self.x.gt(&other.x),
+ y: self.y.gt(&other.y),
+ z: self.z.gt(&other.z),
+ }
+ }
+
+ #[inline]
+ fn cmple(self, other: Self) -> Self::Mask {
+ Self::Mask {
+ x: self.x.le(&other.x),
+ y: self.y.le(&other.y),
+ z: self.z.le(&other.z),
+ }
+ }
+
+ #[inline]
+ fn cmplt(self, other: Self) -> Self::Mask {
+ Self::Mask {
+ x: self.x.lt(&other.x),
+ y: self.y.lt(&other.y),
+ z: self.z.lt(&other.z),
+ }
+ }
+
+ #[inline]
+ fn add(self, other: Self) -> Self {
+ Self {
+ x: self.x + other.x,
+ y: self.y + other.y,
+ z: self.z + other.z,
+ }
+ }
+
+ #[inline]
+ fn div(self, other: Self) -> Self {
+ Self {
+ x: self.x / other.x,
+ y: self.y / other.y,
+ z: self.z / other.z,
+ }
+ }
+
+ #[inline]
+ fn mul(self, other: Self) -> Self {
+ Self {
+ x: self.x * other.x,
+ y: self.y * other.y,
+ z: self.z * other.z,
+ }
+ }
+
+ #[inline]
+ fn sub(self, other: Self) -> Self {
+ Self {
+ x: self.x - other.x,
+ y: self.y - other.y,
+ z: self.z - other.z,
+ }
+ }
+
+ fn add_scalar(self, other: T) -> Self {
+ Self {
+ x: self.x + other,
+ y: self.y + other,
+ z: self.z + other,
+ }
+ }
+
+ fn sub_scalar(self, other: T) -> Self {
+ Self {
+ x: self.x - other,
+ y: self.y - other,
+ z: self.z - other,
+ }
+ }
+
+ #[inline]
+ fn mul_scalar(self, other: T) -> Self {
+ Self {
+ x: self.x * other,
+ y: self.y * other,
+ z: self.z * other,
+ }
+ }
+
+ #[inline]
+ fn div_scalar(self, other: T) -> Self {
+ Self {
+ x: self.x / other,
+ y: self.y / other,
+ z: self.z / other,
+ }
+ }
+
+ #[inline]
+ fn rem(self, other: Self) -> Self {
+ Self {
+ x: self.x % other.x,
+ y: self.y % other.y,
+ z: self.z % other.z,
+ }
+ }
+
+ #[inline]
+ fn rem_scalar(self, other: T) -> Self {
+ Self {
+ x: self.x % other,
+ y: self.y % other,
+ z: self.z % other,
+ }
+ }
+
+ #[inline]
+ fn min(self, other: Self) -> Self {
+ Self {
+ x: self.x.min(other.x),
+ y: self.y.min(other.y),
+ z: self.z.min(other.z),
+ }
+ }
+
+ #[inline]
+ fn max(self, other: Self) -> Self {
+ Self {
+ x: self.x.max(other.x),
+ y: self.y.max(other.y),
+ z: self.z.max(other.z),
+ }
+ }
+}
+
+impl<T: NumEx> Vector<T> for XYZW<T> {
+ type Mask = XYZW<bool>;
+
+ #[inline]
+ fn splat(s: T) -> Self {
+ Self {
+ x: s,
+ y: s,
+ z: s,
+ w: s,
+ }
+ }
+
+ #[inline]
+ fn select(mask: Self::Mask, if_true: Self, if_false: Self) -> Self {
+ Self {
+ x: if mask.x { if_true.x } else { if_false.x },
+ y: if mask.y { if_true.y } else { if_false.y },
+ z: if mask.z { if_true.z } else { if_false.z },
+ w: if mask.w { if_true.w } else { if_false.w },
+ }
+ }
+
+ #[inline]
+ fn cmpeq(self, other: Self) -> Self::Mask {
+ Self::Mask {
+ x: self.x.eq(&other.x),
+ y: self.y.eq(&other.y),
+ z: self.z.eq(&other.z),
+ w: self.w.eq(&other.w),
+ }
+ }
+
+ #[inline]
+ fn cmpne(self, other: Self) -> Self::Mask {
+ Self::Mask {
+ x: self.x.ne(&other.x),
+ y: self.y.ne(&other.y),
+ z: self.z.ne(&other.z),
+ w: self.w.ne(&other.w),
+ }
+ }
+
+ #[inline]
+ fn cmpge(self, other: Self) -> Self::Mask {
+ Self::Mask {
+ x: self.x.ge(&other.x),
+ y: self.y.ge(&other.y),
+ z: self.z.ge(&other.z),
+ w: self.w.ge(&other.w),
+ }
+ }
+
+ #[inline]
+ fn cmpgt(self, other: Self) -> Self::Mask {
+ Self::Mask {
+ x: self.x.gt(&other.x),
+ y: self.y.gt(&other.y),
+ z: self.z.gt(&other.z),
+ w: self.w.gt(&other.w),
+ }
+ }
+
+ #[inline]
+ fn cmple(self, other: Self) -> Self::Mask {
+ Self::Mask {
+ x: self.x.le(&other.x),
+ y: self.y.le(&other.y),
+ z: self.z.le(&other.z),
+ w: self.w.le(&other.w),
+ }
+ }
+
+ #[inline]
+ fn cmplt(self, other: Self) -> Self::Mask {
+ Self::Mask {
+ x: self.x.lt(&other.x),
+ y: self.y.lt(&other.y),
+ z: self.z.lt(&other.z),
+ w: self.w.lt(&other.w),
+ }
+ }
+
+ #[inline]
+ fn add(self, other: Self) -> Self {
+ Self {
+ x: self.x + other.x,
+ y: self.y + other.y,
+ z: self.z + other.z,
+ w: self.w + other.w,
+ }
+ }
+
+ #[inline]
+ fn div(self, other: Self) -> Self {
+ Self {
+ x: self.x / other.x,
+ y: self.y / other.y,
+ z: self.z / other.z,
+ w: self.w / other.w,
+ }
+ }
+
+ #[inline]
+ fn mul(self, other: Self) -> Self {
+ Self {
+ x: self.x * other.x,
+ y: self.y * other.y,
+ z: self.z * other.z,
+ w: self.w * other.w,
+ }
+ }
+
+ #[inline]
+ fn sub(self, other: Self) -> Self {
+ Self {
+ x: self.x - other.x,
+ y: self.y - other.y,
+ z: self.z - other.z,
+ w: self.w - other.w,
+ }
+ }
+
+ fn add_scalar(self, other: T) -> Self {
+ Self {
+ x: self.x + other,
+ y: self.y + other,
+ z: self.z + other,
+ w: self.w + other,
+ }
+ }
+
+ fn sub_scalar(self, other: T) -> Self {
+ Self {
+ x: self.x - other,
+ y: self.y - other,
+ z: self.z - other,
+ w: self.w - other,
+ }
+ }
+
+ #[inline]
+ fn mul_scalar(self, other: T) -> Self {
+ Self {
+ x: self.x * other,
+ y: self.y * other,
+ z: self.z * other,
+ w: self.w * other,
+ }
+ }
+
+ #[inline]
+ fn div_scalar(self, other: T) -> Self {
+ Self {
+ x: self.x / other,
+ y: self.y / other,
+ z: self.z / other,
+ w: self.w / other,
+ }
+ }
+
+ #[inline]
+ fn rem(self, other: Self) -> Self {
+ Self {
+ x: self.x % other.x,
+ y: self.y % other.y,
+ z: self.z % other.z,
+ w: self.w % other.w,
+ }
+ }
+
+ #[inline]
+ fn rem_scalar(self, other: T) -> Self {
+ Self {
+ x: self.x % other,
+ y: self.y % other,
+ z: self.z % other,
+ w: self.w % other,
+ }
+ }
+
+ #[inline]
+ fn min(self, other: Self) -> Self {
+ Self {
+ x: self.x.min(other.x),
+ y: self.y.min(other.y),
+ z: self.z.min(other.z),
+ w: self.w.min(other.w),
+ }
+ }
+
+ #[inline]
+ fn max(self, other: Self) -> Self {
+ Self {
+ x: self.x.max(other.x),
+ y: self.y.max(other.y),
+ z: self.z.max(other.z),
+ w: self.w.max(other.w),
+ }
+ }
+}
+
+impl<T: NumEx> Vector2<T> for XY<T> {
+ #[inline(always)]
+ fn new(x: T, y: T) -> Self {
+ Self { x, y }
+ }
+
+ #[inline(always)]
+ fn x(self) -> T {
+ self.x
+ }
+
+ #[inline(always)]
+ fn y(self) -> T {
+ self.y
+ }
+
+ #[inline(always)]
+ fn as_ref_xy(&self) -> &XY<T> {
+ self
+ }
+
+ #[inline(always)]
+ fn as_mut_xy(&mut self) -> &mut XY<T> {
+ self
+ }
+
+ #[inline]
+ fn min_element(self) -> T {
+ self.x.min(self.y)
+ }
+
+ #[inline]
+ fn max_element(self) -> T {
+ self.x.max(self.y)
+ }
+
+ #[inline]
+ fn clamp(self, min: Self, max: Self) -> Self {
+ glam_assert!(min.x <= max.x);
+ glam_assert!(min.y <= max.y);
+ // we intentionally do not use `f32::clamp` because we don't
+ // want panics unless `glam-assert` feature is on.
+ Self {
+ x: self.x.max(min.x).min(max.x),
+ y: self.y.max(min.y).min(max.y),
+ }
+ }
+}
+
+impl<T: NumEx> Vector3<T> for XYZ<T> {
+ #[inline(always)]
+ fn new(x: T, y: T, z: T) -> Self {
+ Self { x, y, z }
+ }
+
+ #[inline(always)]
+ fn x(self) -> T {
+ self.x
+ }
+
+ #[inline(always)]
+ fn y(self) -> T {
+ self.y
+ }
+
+ #[inline(always)]
+ fn z(self) -> T {
+ self.z
+ }
+
+ #[inline(always)]
+ fn as_ref_xyz(&self) -> &XYZ<T> {
+ self
+ }
+
+ #[inline(always)]
+ fn as_mut_xyz(&mut self) -> &mut XYZ<T> {
+ self
+ }
+
+ #[inline]
+ fn min_element(self) -> T {
+ self.x.min(self.y.min(self.z))
+ }
+
+ #[inline]
+ fn max_element(self) -> T {
+ self.x.max(self.y.max(self.z))
+ }
+
+ #[inline]
+ fn clamp(self, min: Self, max: Self) -> Self {
+ glam_assert!(min.x <= max.x);
+ glam_assert!(min.y <= max.y);
+ glam_assert!(min.z <= max.z);
+ // we intentionally do not use `f32::clamp` because we don't
+ // want panics unless `glam-assert` feature is on.
+ Self::new(
+ self.x.max(min.x).min(max.x),
+ self.y.max(min.y).min(max.y),
+ self.z.max(min.z).min(max.z),
+ )
+ }
+}
+
+impl<T: NumEx> Vector4<T> for XYZW<T> {
+ #[inline(always)]
+ fn new(x: T, y: T, z: T, w: T) -> Self {
+ Self { x, y, z, w }
+ }
+
+ #[inline(always)]
+ fn x(self) -> T {
+ self.x
+ }
+
+ #[inline(always)]
+ fn y(self) -> T {
+ self.y
+ }
+
+ #[inline(always)]
+ fn z(self) -> T {
+ self.z
+ }
+
+ #[inline(always)]
+ fn w(self) -> T {
+ self.w
+ }
+
+ #[inline(always)]
+ fn as_ref_xyzw(&self) -> &XYZW<T> {
+ self
+ }
+
+ #[inline(always)]
+ fn as_mut_xyzw(&mut self) -> &mut XYZW<T> {
+ self
+ }
+
+ #[inline]
+ fn min_element(self) -> T {
+ self.x.min(self.y.min(self.z.min(self.w)))
+ }
+
+ #[inline]
+ fn max_element(self) -> T {
+ self.x.max(self.y.max(self.z.min(self.w)))
+ }
+
+ #[inline]
+ fn clamp(self, min: Self, max: Self) -> Self {
+ glam_assert!(min.x <= max.x);
+ glam_assert!(min.y <= max.y);
+ glam_assert!(min.z <= max.z);
+ glam_assert!(min.w <= max.w);
+ // we intentionally do not use `f32::clamp` because we don't
+ // want panics unless `glam-assert` feature is on.
+ Self {
+ x: self.x.max(min.x).min(max.x),
+ y: self.y.max(min.y).min(max.y),
+ z: self.z.max(min.z).min(max.z),
+ w: self.w.max(min.w).min(max.w),
+ }
+ }
+}
+
+impl<T: SignedEx> SignedVector<T> for XY<T> {
+ #[inline]
+ fn neg(self) -> Self {
+ Self {
+ x: self.x.neg(),
+ y: self.y.neg(),
+ }
+ }
+}
+
+impl<T: SignedEx> SignedVector2<T> for XY<T> {}
+
+impl<T: SignedEx> SignedVector<T> for XYZ<T> {
+ #[inline]
+ fn neg(self) -> Self {
+ Self {
+ x: self.x.neg(),
+ y: self.y.neg(),
+ z: self.z.neg(),
+ }
+ }
+}
+
+impl<T: SignedEx> SignedVector<T> for XYZW<T> {
+ #[inline]
+ fn neg(self) -> Self {
+ Self {
+ x: self.x.neg(),
+ y: self.y.neg(),
+ z: self.z.neg(),
+ w: self.w.neg(),
+ }
+ }
+}
+
+impl<T: SignedEx> SignedVector3<T> for XYZ<T> {}
+impl<T: SignedEx> SignedVector4<T> for XYZW<T> {}
+
+impl<T: FloatEx> FloatVector2<T> for XY<T> {}
+impl<T: FloatEx> FloatVector3<T> for XYZ<T> {}
+impl<T: FloatEx> FloatVector4<T> for XYZW<T> {}
+
+impl<T> From<XYZ<T>> for XY<T> {
+ #[inline(always)]
+ fn from(v: XYZ<T>) -> Self {
+ Self { x: v.x, y: v.y }
+ }
+}
+
+impl<T> From<XYZW<T>> for XY<T> {
+ #[inline(always)]
+ fn from(v: XYZW<T>) -> Self {
+ Self { x: v.x, y: v.y }
+ }
+}
+
+impl<T> From<XYZW<T>> for XYZ<T> {
+ #[inline(always)]
+ fn from(v: XYZW<T>) -> Self {
+ Self {
+ x: v.x,
+ y: v.y,
+ z: v.z,
+ }
+ }
+}
+
+impl VectorConst for XYZF32A16 {
+ const ZERO: Self = Self {
+ x: 0.0,
+ y: 0.0,
+ z: 0.0,
+ };
+ const ONE: Self = Self {
+ x: 1.0,
+ y: 1.0,
+ z: 1.0,
+ };
+}
+
+impl NanConstEx for XYZF32A16 {
+ const NAN: Self = Self {
+ x: f32::NAN,
+ y: f32::NAN,
+ z: f32::NAN,
+ };
+}
+
+impl Vector3Const for XYZF32A16 {
+ const X: Self = Self {
+ x: 1.0,
+ y: 0.0,
+ z: 0.0,
+ };
+ const Y: Self = Self {
+ x: 0.0,
+ y: 1.0,
+ z: 0.0,
+ };
+ const Z: Self = Self {
+ x: 0.0,
+ y: 0.0,
+ z: 1.0,
+ };
+}
+
+impl Vector<f32> for XYZF32A16 {
+ type Mask = XYZ<bool>;
+
+ #[inline]
+ fn splat(s: f32) -> Self {
+ Self { x: s, y: s, z: s }
+ }
+
+ #[inline]
+ fn select(mask: Self::Mask, if_true: Self, if_false: Self) -> Self {
+ XYZ::select(mask, if_true.into(), if_false.into()).into()
+ }
+
+ #[inline]
+ fn cmpeq(self, other: Self) -> Self::Mask {
+ XYZ::cmpeq(self.into(), other.into())
+ }
+
+ #[inline]
+ fn cmpne(self, other: Self) -> Self::Mask {
+ XYZ::cmpne(self.into(), other.into())
+ }
+
+ #[inline]
+ fn cmpge(self, other: Self) -> Self::Mask {
+ XYZ::cmpge(self.into(), other.into())
+ }
+
+ #[inline]
+ fn cmpgt(self, other: Self) -> Self::Mask {
+ XYZ::cmpgt(self.into(), other.into())
+ }
+
+ #[inline]
+ fn cmple(self, other: Self) -> Self::Mask {
+ XYZ::cmple(self.into(), other.into())
+ }
+
+ #[inline]
+ fn cmplt(self, other: Self) -> Self::Mask {
+ XYZ::cmplt(self.into(), other.into())
+ }
+
+ #[inline]
+ fn add(self, other: Self) -> Self {
+ XYZ::add(self.into(), other.into()).into()
+ }
+
+ #[inline]
+ fn div(self, other: Self) -> Self {
+ XYZ::div(self.into(), other.into()).into()
+ }
+
+ #[inline]
+ fn mul(self, other: Self) -> Self {
+ XYZ::mul(self.into(), other.into()).into()
+ }
+
+ #[inline]
+ fn rem(self, other: Self) -> Self {
+ XYZ::rem(self.into(), other.into()).into()
+ }
+
+ #[inline]
+ fn sub(self, other: Self) -> Self {
+ XYZ::sub(self.into(), other.into()).into()
+ }
+
+ #[inline]
+ fn add_scalar(self, other: f32) -> Self {
+ XYZ::add_scalar(self.into(), other).into()
+ }
+
+ #[inline]
+ fn sub_scalar(self, other: f32) -> Self {
+ XYZ::sub_scalar(self.into(), other).into()
+ }
+
+ #[inline]
+ fn mul_scalar(self, other: f32) -> Self {
+ XYZ::mul_scalar(self.into(), other).into()
+ }
+
+ #[inline]
+ fn div_scalar(self, other: f32) -> Self {
+ XYZ::div_scalar(self.into(), other).into()
+ }
+
+ #[inline]
+ fn rem_scalar(self, other: f32) -> Self {
+ XYZ::rem_scalar(self.into(), other).into()
+ }
+
+ #[inline]
+ fn min(self, other: Self) -> Self {
+ XYZ::min(self.into(), other.into()).into()
+ }
+
+ #[inline]
+ fn max(self, other: Self) -> Self {
+ XYZ::max(self.into(), other.into()).into()
+ }
+}
+
+impl Vector3<f32> for XYZF32A16 {
+ #[inline(always)]
+ fn new(x: f32, y: f32, z: f32) -> Self {
+ XYZF32A16 { x, y, z }
+ }
+
+ #[inline(always)]
+ fn x(self) -> f32 {
+ self.x
+ }
+
+ #[inline(always)]
+ fn y(self) -> f32 {
+ self.y
+ }
+
+ #[inline(always)]
+ fn z(self) -> f32 {
+ self.z
+ }
+
+ #[inline(always)]
+ fn as_ref_xyz(&self) -> &XYZ<f32> {
+ unsafe { &*(self as *const Self).cast() }
+ }
+
+ #[inline(always)]
+ fn as_mut_xyz(&mut self) -> &mut XYZ<f32> {
+ unsafe { &mut *(self as *mut Self).cast() }
+ }
+
+ #[inline(always)]
+ fn min_element(self) -> f32 {
+ XYZ::min_element(self.into())
+ }
+
+ #[inline(always)]
+ fn max_element(self) -> f32 {
+ XYZ::max_element(self.into())
+ }
+
+ #[inline(always)]
+ fn clamp(self, min: Self, max: Self) -> Self {
+ XYZ::clamp(self.into(), min.into(), max.into()).into()
+ }
+}
+
+impl SignedVector<f32> for XYZF32A16 {
+ #[inline(always)]
+ fn neg(self) -> Self {
+ XYZ::neg(self.into()).into()
+ }
+}
+
+impl SignedVector3<f32> for XYZF32A16 {}
+impl FloatVector3<f32> for XYZF32A16 {}
+
+// 2D bitwise and shifting
+
+impl<T, Rhs> ScalarShiftOps<Rhs> for XY<T>
+where
+ T: IntegerShiftOps<Rhs>,
+ Rhs: Copy,
+{
+ #[inline(always)]
+ fn scalar_shl(self, rhs: Rhs) -> Self {
+ Self {
+ x: self.x << rhs,
+ y: self.y << rhs,
+ }
+ }
+
+ #[inline(always)]
+ fn scalar_shr(self, rhs: Rhs) -> Self {
+ Self {
+ x: self.x >> rhs,
+ y: self.y >> rhs,
+ }
+ }
+}
+
+impl<T> ScalarBitOps<T> for XY<T>
+where
+ T: Copy + IntegerBitOps,
+{
+ #[inline(always)]
+ fn scalar_bitand(self, rhs: T) -> Self {
+ Self {
+ x: self.x & rhs,
+ y: self.y & rhs,
+ }
+ }
+
+ #[inline(always)]
+ fn scalar_bitor(self, rhs: T) -> Self {
+ Self {
+ x: self.x | rhs,
+ y: self.y | rhs,
+ }
+ }
+
+ #[inline(always)]
+ fn scalar_bitxor(self, rhs: T) -> Self {
+ Self {
+ x: self.x ^ rhs,
+ y: self.y ^ rhs,
+ }
+ }
+}
+
+impl<T, Rhs> VectorShiftOps<XY<Rhs>> for XY<T>
+where
+ T: Copy + IntegerShiftOps<Rhs>,
+{
+ #[inline(always)]
+ fn vector_shl(self, rhs: XY<Rhs>) -> Self {
+ Self {
+ x: self.x << rhs.x,
+ y: self.y << rhs.y,
+ }
+ }
+
+ #[inline(always)]
+ fn vector_shr(self, rhs: XY<Rhs>) -> Self {
+ Self {
+ x: self.x >> rhs.x,
+ y: self.y >> rhs.y,
+ }
+ }
+}
+
+impl<T> VectorBitOps<XY<T>> for XY<T>
+where
+ T: Copy + IntegerBitOps,
+{
+ #[inline(always)]
+ fn not(self) -> Self {
+ Self {
+ x: !self.x,
+ y: !self.y,
+ }
+ }
+
+ #[inline(always)]
+ fn vector_bitand(self, rhs: Self) -> Self {
+ Self {
+ x: self.x & rhs.x,
+ y: self.y & rhs.y,
+ }
+ }
+
+ #[inline(always)]
+ fn vector_bitor(self, rhs: Self) -> Self {
+ Self {
+ x: self.x | rhs.x,
+ y: self.y | rhs.y,
+ }
+ }
+
+ #[inline(always)]
+ fn vector_bitxor(self, rhs: Self) -> Self {
+ Self {
+ x: self.x ^ rhs.x,
+ y: self.y ^ rhs.y,
+ }
+ }
+}
+
+// 3D bitwise and shifting
+
+impl<T, Rhs> ScalarShiftOps<Rhs> for XYZ<T>
+where
+ T: IntegerShiftOps<Rhs>,
+ Rhs: Copy,
+{
+ #[inline(always)]
+ fn scalar_shl(self, rhs: Rhs) -> Self {
+ Self {
+ x: self.x << rhs,
+ y: self.y << rhs,
+ z: self.z << rhs,
+ }
+ }
+
+ #[inline(always)]
+ fn scalar_shr(self, rhs: Rhs) -> Self {
+ Self {
+ x: self.x >> rhs,
+ y: self.y >> rhs,
+ z: self.z >> rhs,
+ }
+ }
+}
+
+impl<T> ScalarBitOps<T> for XYZ<T>
+where
+ T: Copy + IntegerBitOps,
+{
+ #[inline(always)]
+ fn scalar_bitand(self, rhs: T) -> Self {
+ Self {
+ x: self.x & rhs,
+ y: self.y & rhs,
+ z: self.z & rhs,
+ }
+ }
+
+ #[inline(always)]
+ fn scalar_bitor(self, rhs: T) -> Self {
+ Self {
+ x: self.x | rhs,
+ y: self.y | rhs,
+ z: self.z | rhs,
+ }
+ }
+
+ #[inline(always)]
+ fn scalar_bitxor(self, rhs: T) -> Self {
+ Self {
+ x: self.x ^ rhs,
+ y: self.y ^ rhs,
+ z: self.z ^ rhs,
+ }
+ }
+}
+
+impl<T, Rhs> VectorShiftOps<XYZ<Rhs>> for XYZ<T>
+where
+ T: Copy + IntegerShiftOps<Rhs>,
+{
+ #[inline(always)]
+ fn vector_shl(self, rhs: XYZ<Rhs>) -> Self {
+ Self {
+ x: self.x << rhs.x,
+ y: self.y << rhs.y,
+ z: self.z << rhs.z,
+ }
+ }
+
+ #[inline(always)]
+ fn vector_shr(self, rhs: XYZ<Rhs>) -> Self {
+ Self {
+ x: self.x >> rhs.x,
+ y: self.y >> rhs.y,
+ z: self.z >> rhs.z,
+ }
+ }
+}
+
+impl<T> VectorBitOps<XYZ<T>> for XYZ<T>
+where
+ T: Copy + IntegerBitOps,
+{
+ #[inline(always)]
+ fn not(self) -> Self {
+ Self {
+ x: !self.x,
+ y: !self.y,
+ z: !self.z,
+ }
+ }
+
+ #[inline(always)]
+ fn vector_bitand(self, rhs: Self) -> Self {
+ Self {
+ x: self.x & rhs.x,
+ y: self.y & rhs.y,
+ z: self.z & rhs.z,
+ }
+ }
+
+ #[inline(always)]
+ fn vector_bitor(self, rhs: Self) -> Self {
+ Self {
+ x: self.x | rhs.x,
+ y: self.y | rhs.y,
+ z: self.z | rhs.z,
+ }
+ }
+
+ #[inline(always)]
+ fn vector_bitxor(self, rhs: Self) -> Self {
+ Self {
+ x: self.x ^ rhs.x,
+ y: self.y ^ rhs.y,
+ z: self.z ^ rhs.z,
+ }
+ }
+}
+
+// 4D bitwise and shifting
+
+impl<T, Rhs> ScalarShiftOps<Rhs> for XYZW<T>
+where
+ T: IntegerShiftOps<Rhs>,
+ Rhs: Copy,
+{
+ #[inline(always)]
+ fn scalar_shl(self, rhs: Rhs) -> Self {
+ Self {
+ x: self.x << rhs,
+ y: self.y << rhs,
+ z: self.z << rhs,
+ w: self.w << rhs,
+ }
+ }
+
+ #[inline(always)]
+ fn scalar_shr(self, rhs: Rhs) -> Self {
+ Self {
+ x: self.x >> rhs,
+ y: self.y >> rhs,
+ z: self.z >> rhs,
+ w: self.w >> rhs,
+ }
+ }
+}
+
+impl<T> ScalarBitOps<T> for XYZW<T>
+where
+ T: Copy + IntegerBitOps,
+{
+ #[inline(always)]
+ fn scalar_bitand(self, rhs: T) -> Self {
+ Self {
+ x: self.x & rhs,
+ y: self.y & rhs,
+ z: self.z & rhs,
+ w: self.w & rhs,
+ }
+ }
+
+ #[inline(always)]
+ fn scalar_bitor(self, rhs: T) -> Self {
+ Self {
+ x: self.x | rhs,
+ y: self.y | rhs,
+ z: self.z | rhs,
+ w: self.w | rhs,
+ }
+ }
+
+ #[inline(always)]
+ fn scalar_bitxor(self, rhs: T) -> Self {
+ Self {
+ x: self.x ^ rhs,
+ y: self.y ^ rhs,
+ z: self.z ^ rhs,
+ w: self.w ^ rhs,
+ }
+ }
+}
+
+impl<T, Rhs> VectorShiftOps<XYZW<Rhs>> for XYZW<T>
+where
+ T: Copy + IntegerShiftOps<Rhs>,
+{
+ #[inline(always)]
+ fn vector_shl(self, rhs: XYZW<Rhs>) -> Self {
+ Self {
+ x: self.x << rhs.x,
+ y: self.y << rhs.y,
+ z: self.z << rhs.z,
+ w: self.w << rhs.w,
+ }
+ }
+
+ #[inline(always)]
+ fn vector_shr(self, rhs: XYZW<Rhs>) -> Self {
+ Self {
+ x: self.x >> rhs.x,
+ y: self.y >> rhs.y,
+ z: self.z >> rhs.z,
+ w: self.w >> rhs.w,
+ }
+ }
+}
+
+impl<T> VectorBitOps<XYZW<T>> for XYZW<T>
+where
+ T: Copy + IntegerBitOps,
+{
+ #[inline(always)]
+ fn not(self) -> Self {
+ Self {
+ x: !self.x,
+ y: !self.y,
+ z: !self.z,
+ w: !self.w,
+ }
+ }
+
+ #[inline(always)]
+ fn vector_bitand(self, rhs: Self) -> Self {
+ Self {
+ x: self.x & rhs.x,
+ y: self.y & rhs.y,
+ z: self.z & rhs.z,
+ w: self.w & rhs.w,
+ }
+ }
+
+ #[inline(always)]
+ fn vector_bitor(self, rhs: Self) -> Self {
+ Self {
+ x: self.x | rhs.x,
+ y: self.y | rhs.y,
+ z: self.z | rhs.z,
+ w: self.w | rhs.w,
+ }
+ }
+
+ #[inline(always)]
+ fn vector_bitxor(self, rhs: Self) -> Self {
+ Self {
+ x: self.x ^ rhs.x,
+ y: self.y ^ rhs.y,
+ z: self.z ^ rhs.z,
+ w: self.w ^ rhs.w,
+ }
+ }
+}
diff --git a/src/core/sse2/float.rs b/src/core/sse2/float.rs
new file mode 100644
index 0000000..9c3ac8e
--- /dev/null
+++ b/src/core/sse2/float.rs
@@ -0,0 +1,280 @@
+#[cfg(target_arch = "x86")]
+use core::arch::x86::*;
+#[cfg(target_arch = "x86_64")]
+use core::arch::x86_64::*;
+
+macro_rules! const_u32x4 {
+ ($ux4:expr) => {
+ unsafe { $crate::cast::UVec4Cast { ux4: $ux4 }.m128 }
+ };
+}
+
+const PS_INV_SIGN_MASK: __m128 = const_u32x4!([!0x8000_0000; 4]);
+const PS_SIGN_MASK: __m128 = const_u32x4!([0x8000_0000; 4]);
+const PS_NO_FRACTION: __m128 = const_f32x4!([8388608.0; 4]);
+const PS_NEGATIVE_ZERO: __m128 = const_u32x4!([0x8000_0000; 4]);
+const PS_PI: __m128 = const_f32x4!([core::f32::consts::PI; 4]);
+const PS_HALF_PI: __m128 = const_f32x4!([core::f32::consts::FRAC_PI_2; 4]);
+const PS_SIN_COEFFICIENTS0: __m128 =
+ const_f32x4!([-0.16666667, 0.008_333_331, -0.00019840874, 2.752_556_2e-6]);
+const PS_SIN_COEFFICIENTS1: __m128 = const_f32x4!([
+ -2.388_985_9e-8,
+ -0.16665852, /*Est1*/
+ 0.008_313_95, /*Est2*/
+ -0.000_185_246_7 /*Est3*/
+]);
+const PS_ONE: __m128 = const_f32x4!([1.0; 4]);
+const PS_TWO_PI: __m128 = const_f32x4!([core::f32::consts::TAU; 4]);
+const PS_RECIPROCAL_TWO_PI: __m128 = const_f32x4!([0.159_154_94; 4]);
+
+#[inline]
+pub(crate) unsafe fn m128_abs(v: __m128) -> __m128 {
+ _mm_and_ps(v, _mm_castsi128_ps(_mm_set1_epi32(0x7f_ff_ff_ff)))
+}
+
+#[inline]
+pub(crate) unsafe fn m128_round(v: __m128) -> __m128 {
+ // Based on https://github.com/microsoft/DirectXMath `XMVectorRound`
+ let sign = _mm_and_ps(v, PS_SIGN_MASK);
+ let s_magic = _mm_or_ps(PS_NO_FRACTION, sign);
+ let r1 = _mm_add_ps(v, s_magic);
+ let r1 = _mm_sub_ps(r1, s_magic);
+ let r2 = _mm_and_ps(v, PS_INV_SIGN_MASK);
+ let mask = _mm_cmple_ps(r2, PS_NO_FRACTION);
+ let r2 = _mm_andnot_ps(mask, v);
+ let r1 = _mm_and_ps(r1, mask);
+ _mm_xor_ps(r1, r2)
+}
+
+#[inline]
+pub(crate) unsafe fn m128_floor(v: __m128) -> __m128 {
+ // Based on https://github.com/microsoft/DirectXMath `XMVectorFloor`
+ // To handle NAN, INF and numbers greater than 8388608, use masking
+ let test = _mm_and_si128(_mm_castps_si128(v), _mm_castps_si128(PS_INV_SIGN_MASK));
+ let test = _mm_cmplt_epi32(test, _mm_castps_si128(PS_NO_FRACTION));
+ // Truncate
+ let vint = _mm_cvttps_epi32(v);
+ let result = _mm_cvtepi32_ps(vint);
+ let larger = _mm_cmpgt_ps(result, v);
+ // 0 -> 0, 0xffffffff -> -1.0f
+ let larger = _mm_cvtepi32_ps(_mm_castps_si128(larger));
+ let result = _mm_add_ps(result, larger);
+ // All numbers less than 8388608 will use the round to int
+ let result = _mm_and_ps(result, _mm_castsi128_ps(test));
+ // All others, use the ORIGINAL value
+ let test = _mm_andnot_si128(test, _mm_castps_si128(v));
+ _mm_or_ps(result, _mm_castsi128_ps(test))
+}
+
+#[inline]
+pub(crate) unsafe fn m128_ceil(v: __m128) -> __m128 {
+ // Based on https://github.com/microsoft/DirectXMath `XMVectorCeil`
+ // To handle NAN, INF and numbers greater than 8388608, use masking
+ let test = _mm_and_si128(_mm_castps_si128(v), _mm_castps_si128(PS_INV_SIGN_MASK));
+ let test = _mm_cmplt_epi32(test, _mm_castps_si128(PS_NO_FRACTION));
+ // Truncate
+ let vint = _mm_cvttps_epi32(v);
+ let result = _mm_cvtepi32_ps(vint);
+ let smaller = _mm_cmplt_ps(result, v);
+ // 0 -> 0, 0xffffffff -> -1.0f
+ let smaller = _mm_cvtepi32_ps(_mm_castps_si128(smaller));
+ let result = _mm_sub_ps(result, smaller);
+ // All numbers less than 8388608 will use the round to int
+ let result = _mm_and_ps(result, _mm_castsi128_ps(test));
+ // All others, use the ORIGINAL value
+ let test = _mm_andnot_si128(test, _mm_castps_si128(v));
+ _mm_or_ps(result, _mm_castsi128_ps(test))
+}
+
+#[inline(always)]
+pub(crate) unsafe fn m128_mul_add(a: __m128, b: __m128, c: __m128) -> __m128 {
+ // Only enable fused multiply-adds here if "fast-math" is enabled and the
+ // platform supports it. Otherwise this may break cross-platform determinism.
+ #[cfg(all(feature = "fast-math", target_feature = "fma"))]
+ {
+ _mm_fmadd_ps(a, b, c)
+ }
+
+ #[cfg(any(not(feature = "fast-math"), not(target_feature = "fma")))]
+ {
+ _mm_add_ps(_mm_mul_ps(a, b), c)
+ }
+}
+
+#[inline(always)]
+pub(crate) unsafe fn m128_neg_mul_sub(a: __m128, b: __m128, c: __m128) -> __m128 {
+ _mm_sub_ps(c, _mm_mul_ps(a, b))
+}
+
+/// Returns a vector whose components are the corresponding components of Angles modulo 2PI.
+#[inline]
+pub(crate) unsafe fn m128_mod_angles(angles: __m128) -> __m128 {
+ // Based on https://github.com/microsoft/DirectXMath `XMVectorModAngles`
+ let v = _mm_mul_ps(angles, PS_RECIPROCAL_TWO_PI);
+ let v = m128_round(v);
+ m128_neg_mul_sub(PS_TWO_PI, v, angles)
+}
+
+/// Computes the sine of the angle in each lane of `v`. Values outside
+/// the bounds of PI may produce an increasing error as the input angle
+/// drifts from `[-PI, PI]`.
+#[inline]
+pub(crate) unsafe fn m128_sin(v: __m128) -> __m128 {
+ // Based on https://github.com/microsoft/DirectXMath `XMVectorSin`
+
+ // 11-degree minimax approximation
+
+ // Force the value within the bounds of pi
+ let mut x = m128_mod_angles(v);
+
+ // Map in [-pi/2,pi/2] with sin(y) = sin(x).
+ let sign = _mm_and_ps(x, PS_NEGATIVE_ZERO);
+ // pi when x >= 0, -pi when x < 0
+ let c = _mm_or_ps(PS_PI, sign);
+ // |x|
+ let absx = _mm_andnot_ps(sign, x);
+ let rflx = _mm_sub_ps(c, x);
+ let comp = _mm_cmple_ps(absx, PS_HALF_PI);
+ let select0 = _mm_and_ps(comp, x);
+ let select1 = _mm_andnot_ps(comp, rflx);
+ x = _mm_or_ps(select0, select1);
+
+ let x2 = _mm_mul_ps(x, x);
+
+ // Compute polynomial approximation
+ const SC1: __m128 = PS_SIN_COEFFICIENTS1;
+ let v_constants_b = _mm_shuffle_ps(SC1, SC1, 0b00_00_00_00);
+
+ const SC0: __m128 = PS_SIN_COEFFICIENTS0;
+ let mut v_constants = _mm_shuffle_ps(SC0, SC0, 0b11_11_11_11);
+ let mut result = m128_mul_add(v_constants_b, x2, v_constants);
+
+ v_constants = _mm_shuffle_ps(SC0, SC0, 0b10_10_10_10);
+ result = m128_mul_add(result, x2, v_constants);
+
+ v_constants = _mm_shuffle_ps(SC0, SC0, 0b01_01_01_01);
+ result = m128_mul_add(result, x2, v_constants);
+
+ v_constants = _mm_shuffle_ps(SC0, SC0, 0b00_00_00_00);
+ result = m128_mul_add(result, x2, v_constants);
+
+ result = m128_mul_add(result, x2, PS_ONE);
+ result = _mm_mul_ps(result, x);
+
+ result
+}
+
+// Based on http://gruntthepeon.free.fr/ssemath/sse_mathfun.h
+// #[cfg(target_feature = "sse2")]
+// unsafe fn sin_cos_sse2(x: __m128) -> (__m128, __m128) {
+// let mut sign_bit_sin = x;
+// // take the absolute value
+// let mut x = _mm_and_ps(x, PS_INV_SIGN_MASK.m128);
+// // extract the sign bit (upper one)
+// sign_bit_sin = _mm_and_ps(sign_bit_sin, PS_SIGN_MASK.m128);
+
+// // scale by 4/Pi
+// let mut y = _mm_mul_ps(x, PS_CEPHES_FOPI.m128);
+
+// // store the integer part of y in emm2
+// let mut emm2 = _mm_cvttps_epi32(y);
+
+// // j=(j+1) & (~1) (see the cephes sources)
+// emm2 = _mm_add_epi32(emm2, PI32_1.m128i);
+// emm2 = _mm_and_si128(emm2, PI32_INV_1.m128i);
+// y = _mm_cvtepi32_ps(emm2);
+
+// let mut emm4 = emm2;
+
+// /* get the swap sign flag for the sine */
+// let mut emm0 = _mm_and_si128(emm2, PI32_4.m128i);
+// emm0 = _mm_slli_epi32(emm0, 29);
+// let swap_sign_bit_sin = _mm_castsi128_ps(emm0);
+
+// /* get the polynom selection mask for the sine*/
+// emm2 = _mm_and_si128(emm2, PI32_2.m128i);
+// emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
+// let poly_mask = _mm_castsi128_ps(emm2);
+
+// /* The magic pass: "Extended precision modular arithmetic"
+// x = ((x - y * DP1) - y * DP2) - y * DP3; */
+// let mut xmm1 = PS_MINUS_CEPHES_DP1.m128;
+// let mut xmm2 = PS_MINUS_CEPHES_DP2.m128;
+// let mut xmm3 = PS_MINUS_CEPHES_DP3.m128;
+// xmm1 = _mm_mul_ps(y, xmm1);
+// xmm2 = _mm_mul_ps(y, xmm2);
+// xmm3 = _mm_mul_ps(y, xmm3);
+// x = _mm_add_ps(x, xmm1);
+// x = _mm_add_ps(x, xmm2);
+// x = _mm_add_ps(x, xmm3);
+
+// emm4 = _mm_sub_epi32(emm4, PI32_2.m128i);
+// emm4 = _mm_andnot_si128(emm4, PI32_4.m128i);
+// emm4 = _mm_slli_epi32(emm4, 29);
+// let sign_bit_cos = _mm_castsi128_ps(emm4);
+
+// sign_bit_sin = _mm_xor_ps(sign_bit_sin, swap_sign_bit_sin);
+
+// // Evaluate the first polynom (0 <= x <= Pi/4)
+// let z = _mm_mul_ps(x, x);
+// y = PS_COSCOF_P0.m128;
+
+// y = _mm_mul_ps(y, z);
+// y = _mm_add_ps(y, PS_COSCOF_P1.m128);
+// y = _mm_mul_ps(y, z);
+// y = _mm_add_ps(y, PS_COSCOF_P2.m128);
+// y = _mm_mul_ps(y, z);
+// y = _mm_mul_ps(y, z);
+// let tmp = _mm_mul_ps(z, PS_0_5.m128);
+// y = _mm_sub_ps(y, tmp);
+// y = _mm_add_ps(y, PS_1_0.m128);
+
+// // Evaluate the second polynom (Pi/4 <= x <= 0)
+// let mut y2 = PS_SINCOF_P0.m128;
+// y2 = _mm_mul_ps(y2, z);
+// y2 = _mm_add_ps(y2, PS_SINCOF_P1.m128);
+// y2 = _mm_mul_ps(y2, z);
+// y2 = _mm_add_ps(y2, PS_SINCOF_P2.m128);
+// y2 = _mm_mul_ps(y2, z);
+// y2 = _mm_mul_ps(y2, x);
+// y2 = _mm_add_ps(y2, x);
+
+// // select the correct result from the two polynoms
+// xmm3 = poly_mask;
+// let ysin2 = _mm_and_ps(xmm3, y2);
+// let ysin1 = _mm_andnot_ps(xmm3, y);
+// y2 = _mm_sub_ps(y2, ysin2);
+// y = _mm_sub_ps(y, ysin1);
+
+// xmm1 = _mm_add_ps(ysin1, ysin2);
+// xmm2 = _mm_add_ps(y, y2);
+
+// // update the sign
+// (
+// _mm_xor_ps(xmm1, sign_bit_sin),
+// _mm_xor_ps(xmm2, sign_bit_cos),
+// )
+// }
+
+#[test]
+fn test_sse2_m128_sin() {
+ use crate::core::traits::vector::*;
+ use core::f32::consts::PI;
+
+ fn test_sse2_m128_sin_angle(a: f32) {
+ let v = unsafe { m128_sin(_mm_set_ps1(a)) };
+ let v = v.as_ref_xyzw();
+ let a_sin = a.sin();
+ // dbg!((a, a_sin, v));
+ assert!(v.abs_diff_eq(Vector::splat(a_sin), 1e-6));
+ }
+
+ let mut a = -PI;
+ let end = PI;
+ let step = PI / 8192.0;
+
+ while a <= end {
+ test_sse2_m128_sin_angle(a);
+ a += step;
+ }
+}
diff --git a/src/core/sse2/matrix.rs b/src/core/sse2/matrix.rs
new file mode 100644
index 0000000..5850928
--- /dev/null
+++ b/src/core/sse2/matrix.rs
@@ -0,0 +1,558 @@
+#[cfg(target_arch = "x86")]
+use core::arch::x86::*;
+#[cfg(target_arch = "x86_64")]
+use core::arch::x86_64::*;
+
+use core::mem::MaybeUninit;
+
+use crate::core::{
+ storage::{Align16, Columns2, Columns3, Columns4, XY, XYZ},
+ traits::{
+ matrix::{
+ FloatMatrix2x2, FloatMatrix3x3, FloatMatrix4x4, Matrix, Matrix2x2, Matrix3x3,
+ Matrix4x4, MatrixConst,
+ },
+ projection::ProjectionMatrix,
+ scalar::NanConstEx,
+ vector::{FloatVector4, Vector, Vector4, Vector4Const},
+ },
+};
+
+// __m128 as a Matrix2x2
+impl MatrixConst for __m128 {
+ const ZERO: __m128 = const_f32x4!([0.0, 0.0, 0.0, 0.0]);
+ const IDENTITY: __m128 = const_f32x4!([1.0, 0.0, 0.0, 1.0]);
+}
+
+impl Matrix<f32> for __m128 {}
+
+impl Matrix2x2<f32, XY<f32>> for __m128 {
+ #[inline(always)]
+ fn new(m00: f32, m01: f32, m10: f32, m11: f32) -> Self {
+ unsafe { _mm_set_ps(m11, m10, m01, m00) }
+ }
+
+ #[inline(always)]
+ fn from_cols(x_axis: XY<f32>, y_axis: XY<f32>) -> Self {
+ Matrix2x2::new(x_axis.x, x_axis.y, y_axis.x, y_axis.y)
+ }
+
+ #[inline(always)]
+ fn x_axis(&self) -> &XY<f32> {
+ unsafe { &(*(self as *const Self).cast::<Columns2<XY<f32>>>()).x_axis }
+ }
+
+ #[inline(always)]
+ fn y_axis(&self) -> &XY<f32> {
+ unsafe { &(*(self as *const Self).cast::<Columns2<XY<f32>>>()).y_axis }
+ }
+
+ #[inline]
+ fn determinant(&self) -> f32 {
+ // self.x_axis.x * self.y_axis.y - self.x_axis.y * self.y_axis.x
+ unsafe {
+ let abcd = *self;
+ let dcba = _mm_shuffle_ps(abcd, abcd, 0b00_01_10_11);
+ let prod = _mm_mul_ps(abcd, dcba);
+ let det = _mm_sub_ps(prod, _mm_shuffle_ps(prod, prod, 0b01_01_01_01));
+ _mm_cvtss_f32(det)
+ }
+ }
+
+ #[inline(always)]
+ fn transpose(&self) -> Self {
+ unsafe { _mm_shuffle_ps(*self, *self, 0b11_01_10_00) }
+ }
+
+ #[inline]
+ fn mul_vector(&self, other: XY<f32>) -> XY<f32> {
+ unsafe {
+ let abcd = *self;
+ let xxyy = _mm_set_ps(other.y, other.y, other.x, other.x);
+ let axbxcydy = _mm_mul_ps(abcd, xxyy);
+ let cydyaxbx = _mm_shuffle_ps(axbxcydy, axbxcydy, 0b01_00_11_10);
+ let result = _mm_add_ps(axbxcydy, cydyaxbx);
+ let mut out: MaybeUninit<Align16<XY<f32>>> = MaybeUninit::uninit();
+ _mm_store_ps(out.as_mut_ptr().cast(), result);
+ out.assume_init().0
+ }
+ }
+
+ #[inline]
+ fn mul_matrix(&self, other: &Self) -> Self {
+ unsafe {
+ let abcd = *self;
+ let other = *other;
+ let xxyy0 = _mm_shuffle_ps(other, other, 0b01_01_00_00);
+ let xxyy1 = _mm_shuffle_ps(other, other, 0b11_11_10_10);
+ let axbxcydy0 = _mm_mul_ps(abcd, xxyy0);
+ let axbxcydy1 = _mm_mul_ps(abcd, xxyy1);
+ let cydyaxbx0 = _mm_shuffle_ps(axbxcydy0, axbxcydy0, 0b01_00_11_10);
+ let cydyaxbx1 = _mm_shuffle_ps(axbxcydy1, axbxcydy1, 0b01_00_11_10);
+ let result0 = _mm_add_ps(axbxcydy0, cydyaxbx0);
+ let result1 = _mm_add_ps(axbxcydy1, cydyaxbx1);
+ _mm_shuffle_ps(result0, result1, 0b01_00_01_00)
+ }
+ }
+
+ #[inline]
+ fn mul_scalar(&self, other: f32) -> Self {
+ unsafe { _mm_mul_ps(*self, _mm_set_ps1(other)) }
+ }
+
+ #[inline]
+ fn add_matrix(&self, other: &Self) -> Self {
+ unsafe { _mm_add_ps(*self, *other) }
+ }
+
+ #[inline]
+ fn sub_matrix(&self, other: &Self) -> Self {
+ unsafe { _mm_sub_ps(*self, *other) }
+ }
+}
+
+impl FloatMatrix2x2<f32, XY<f32>> for __m128 {
+ #[inline]
+ fn abs_diff_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
+ FloatVector4::abs_diff_eq(*self, *other, max_abs_diff)
+ }
+
+ #[inline]
+ fn inverse(&self) -> Self {
+ unsafe {
+ const SIGN: __m128 = const_f32x4!([1.0, -1.0, -1.0, 1.0]);
+ let abcd = *self;
+ let dcba = _mm_shuffle_ps(abcd, abcd, 0b00_01_10_11);
+ let prod = _mm_mul_ps(abcd, dcba);
+ let sub = _mm_sub_ps(prod, _mm_shuffle_ps(prod, prod, 0b01_01_01_01));
+ let det = _mm_shuffle_ps(sub, sub, 0b00_00_00_00);
+ let tmp = _mm_div_ps(SIGN, det);
+ glam_assert!(tmp.is_finite());
+ let dbca = _mm_shuffle_ps(abcd, abcd, 0b00_10_01_11);
+ _mm_mul_ps(dbca, tmp)
+ }
+ }
+
+ #[inline]
+ fn neg_matrix(&self) -> Self {
+ unsafe { _mm_xor_ps(*self, _mm_set1_ps(-0.0)) }
+ }
+}
+
+impl MatrixConst for Columns3<__m128> {
+ const ZERO: Columns3<__m128> = Columns3 {
+ x_axis: __m128::ZERO,
+ y_axis: __m128::ZERO,
+ z_axis: __m128::ZERO,
+ };
+ const IDENTITY: Columns3<__m128> = Columns3 {
+ x_axis: __m128::X,
+ y_axis: __m128::Y,
+ z_axis: __m128::Z,
+ };
+}
+
+impl NanConstEx for Columns3<__m128> {
+ const NAN: Columns3<__m128> = Columns3 {
+ x_axis: __m128::NAN,
+ y_axis: __m128::NAN,
+ z_axis: __m128::NAN,
+ };
+}
+
+impl Matrix<f32> for Columns3<__m128> {}
+
+impl Matrix3x3<f32, __m128> for Columns3<__m128> {
+ #[inline(always)]
+ fn from_cols(x_axis: __m128, y_axis: __m128, z_axis: __m128) -> Self {
+ Self {
+ x_axis,
+ y_axis,
+ z_axis,
+ }
+ }
+
+ #[inline(always)]
+ fn x_axis(&self) -> &__m128 {
+ &self.x_axis
+ }
+
+ #[inline(always)]
+ fn y_axis(&self) -> &__m128 {
+ &self.y_axis
+ }
+
+ #[inline(always)]
+ fn z_axis(&self) -> &__m128 {
+ &self.z_axis
+ }
+
+ #[inline]
+ fn transpose(&self) -> Self {
+ unsafe {
+ let tmp0 = _mm_shuffle_ps(self.x_axis, self.y_axis, 0b01_00_01_00);
+ let tmp1 = _mm_shuffle_ps(self.x_axis, self.y_axis, 0b11_10_11_10);
+
+ Self {
+ x_axis: _mm_shuffle_ps(tmp0, self.z_axis, 0b00_00_10_00),
+ y_axis: _mm_shuffle_ps(tmp0, self.z_axis, 0b01_01_11_01),
+ z_axis: _mm_shuffle_ps(tmp1, self.z_axis, 0b10_10_10_00),
+ }
+ }
+ }
+}
+
+impl FloatMatrix3x3<f32, __m128> for Columns3<__m128> {
+ #[inline]
+ fn transform_point2(&self, other: XY<f32>) -> XY<f32> {
+ let mut res = self.x_axis.mul_scalar(other.x);
+ res = self.y_axis.mul_scalar(other.y).add(res);
+ res = self.z_axis.add(res);
+ res.into()
+ }
+
+ #[inline]
+ fn transform_vector2(&self, other: XY<f32>) -> XY<f32> {
+ let mut res = self.x_axis.mul_scalar(other.x);
+ res = self.y_axis.mul_scalar(other.y).add(res);
+ res.into()
+ }
+}
+
+impl MatrixConst for Columns4<__m128> {
+ const ZERO: Columns4<__m128> = Columns4 {
+ x_axis: __m128::ZERO,
+ y_axis: __m128::ZERO,
+ z_axis: __m128::ZERO,
+ w_axis: __m128::ZERO,
+ };
+ const IDENTITY: Columns4<__m128> = Columns4 {
+ x_axis: __m128::X,
+ y_axis: __m128::Y,
+ z_axis: __m128::Z,
+ w_axis: __m128::W,
+ };
+}
+
+impl NanConstEx for Columns4<__m128> {
+ const NAN: Columns4<__m128> = Columns4 {
+ x_axis: __m128::NAN,
+ y_axis: __m128::NAN,
+ z_axis: __m128::NAN,
+ w_axis: __m128::NAN,
+ };
+}
+
+impl Matrix<f32> for Columns4<__m128> {}
+
+impl Matrix4x4<f32, __m128> for Columns4<__m128> {
+ #[inline(always)]
+ fn from_cols(x_axis: __m128, y_axis: __m128, z_axis: __m128, w_axis: __m128) -> Self {
+ Self {
+ x_axis,
+ y_axis,
+ z_axis,
+ w_axis,
+ }
+ }
+
+ #[inline(always)]
+ fn x_axis(&self) -> &__m128 {
+ &self.x_axis
+ }
+
+ #[inline(always)]
+ fn y_axis(&self) -> &__m128 {
+ &self.y_axis
+ }
+
+ #[inline(always)]
+ fn z_axis(&self) -> &__m128 {
+ &self.z_axis
+ }
+
+ #[inline(always)]
+ fn w_axis(&self) -> &__m128 {
+ &self.w_axis
+ }
+
+ #[inline]
+ fn determinant(&self) -> f32 {
+ unsafe {
+ // SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3];
+ // SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3];
+ // SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2];
+ // SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3];
+ // SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2];
+ // SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1];
+
+ // Based on https://github.com/g-truc/glm `glm_mat4_determinant_lowp`
+ let swp2a = _mm_shuffle_ps(self.z_axis, self.z_axis, 0b00_01_01_10);
+ let swp3a = _mm_shuffle_ps(self.w_axis, self.w_axis, 0b11_10_11_11);
+ let swp2b = _mm_shuffle_ps(self.z_axis, self.z_axis, 0b11_10_11_11);
+ let swp3b = _mm_shuffle_ps(self.w_axis, self.w_axis, 0b00_01_01_10);
+ let swp2c = _mm_shuffle_ps(self.z_axis, self.z_axis, 0b00_00_01_10);
+ let swp3c = _mm_shuffle_ps(self.w_axis, self.w_axis, 0b01_10_00_00);
+
+ let mula = _mm_mul_ps(swp2a, swp3a);
+ let mulb = _mm_mul_ps(swp2b, swp3b);
+ let mulc = _mm_mul_ps(swp2c, swp3c);
+ let sube = _mm_sub_ps(mula, mulb);
+ let subf = _mm_sub_ps(_mm_movehl_ps(mulc, mulc), mulc);
+
+ let subfaca = _mm_shuffle_ps(sube, sube, 0b10_01_00_00);
+ let swpfaca = _mm_shuffle_ps(self.y_axis, self.y_axis, 0b00_00_00_01);
+ let mulfaca = _mm_mul_ps(swpfaca, subfaca);
+
+ let subtmpb = _mm_shuffle_ps(sube, subf, 0b00_00_11_01);
+ let subfacb = _mm_shuffle_ps(subtmpb, subtmpb, 0b11_01_01_00);
+ let swpfacb = _mm_shuffle_ps(self.y_axis, self.y_axis, 0b01_01_10_10);
+ let mulfacb = _mm_mul_ps(swpfacb, subfacb);
+
+ let subres = _mm_sub_ps(mulfaca, mulfacb);
+ let subtmpc = _mm_shuffle_ps(sube, subf, 0b01_00_10_10);
+ let subfacc = _mm_shuffle_ps(subtmpc, subtmpc, 0b11_11_10_00);
+ let swpfacc = _mm_shuffle_ps(self.y_axis, self.y_axis, 0b10_11_11_11);
+ let mulfacc = _mm_mul_ps(swpfacc, subfacc);
+
+ let addres = _mm_add_ps(subres, mulfacc);
+ let detcof = _mm_mul_ps(addres, _mm_setr_ps(1.0, -1.0, 1.0, -1.0));
+
+ Vector4::dot(self.x_axis, detcof)
+ }
+ }
+
+ #[inline]
+ fn transpose(&self) -> Self {
+ unsafe {
+ // Based on https://github.com/microsoft/DirectXMath `XMMatrixTranspose`
+ let tmp0 = _mm_shuffle_ps(self.x_axis, self.y_axis, 0b01_00_01_00);
+ let tmp1 = _mm_shuffle_ps(self.x_axis, self.y_axis, 0b11_10_11_10);
+ let tmp2 = _mm_shuffle_ps(self.z_axis, self.w_axis, 0b01_00_01_00);
+ let tmp3 = _mm_shuffle_ps(self.z_axis, self.w_axis, 0b11_10_11_10);
+
+ Self {
+ x_axis: _mm_shuffle_ps(tmp0, tmp2, 0b10_00_10_00),
+ y_axis: _mm_shuffle_ps(tmp0, tmp2, 0b11_01_11_01),
+ z_axis: _mm_shuffle_ps(tmp1, tmp3, 0b10_00_10_00),
+ w_axis: _mm_shuffle_ps(tmp1, tmp3, 0b11_01_11_01),
+ }
+ }
+ }
+}
+
+impl FloatMatrix4x4<f32, __m128> for Columns4<__m128> {
+ type SIMDVector3 = __m128;
+
+ fn inverse(&self) -> Self {
+ unsafe {
+ // Based on https://github.com/g-truc/glm `glm_mat4_inverse`
+ let fac0 = {
+ let swp0a = _mm_shuffle_ps(self.w_axis, self.z_axis, 0b11_11_11_11);
+ let swp0b = _mm_shuffle_ps(self.w_axis, self.z_axis, 0b10_10_10_10);
+
+ let swp00 = _mm_shuffle_ps(self.z_axis, self.y_axis, 0b10_10_10_10);
+ let swp01 = _mm_shuffle_ps(swp0a, swp0a, 0b10_00_00_00);
+ let swp02 = _mm_shuffle_ps(swp0b, swp0b, 0b10_00_00_00);
+ let swp03 = _mm_shuffle_ps(self.z_axis, self.y_axis, 0b11_11_11_11);
+
+ let mul00 = _mm_mul_ps(swp00, swp01);
+ let mul01 = _mm_mul_ps(swp02, swp03);
+ _mm_sub_ps(mul00, mul01)
+ };
+ let fac1 = {
+ let swp0a = _mm_shuffle_ps(self.w_axis, self.z_axis, 0b11_11_11_11);
+ let swp0b = _mm_shuffle_ps(self.w_axis, self.z_axis, 0b01_01_01_01);
+
+ let swp00 = _mm_shuffle_ps(self.z_axis, self.y_axis, 0b01_01_01_01);
+ let swp01 = _mm_shuffle_ps(swp0a, swp0a, 0b10_00_00_00);
+ let swp02 = _mm_shuffle_ps(swp0b, swp0b, 0b10_00_00_00);
+ let swp03 = _mm_shuffle_ps(self.z_axis, self.y_axis, 0b11_11_11_11);
+
+ let mul00 = _mm_mul_ps(swp00, swp01);
+ let mul01 = _mm_mul_ps(swp02, swp03);
+ _mm_sub_ps(mul00, mul01)
+ };
+ let fac2 = {
+ let swp0a = _mm_shuffle_ps(self.w_axis, self.z_axis, 0b10_10_10_10);
+ let swp0b = _mm_shuffle_ps(self.w_axis, self.z_axis, 0b01_01_01_01);
+
+ let swp00 = _mm_shuffle_ps(self.z_axis, self.y_axis, 0b01_01_01_01);
+ let swp01 = _mm_shuffle_ps(swp0a, swp0a, 0b10_00_00_00);
+ let swp02 = _mm_shuffle_ps(swp0b, swp0b, 0b10_00_00_00);
+ let swp03 = _mm_shuffle_ps(self.z_axis, self.y_axis, 0b10_10_10_10);
+
+ let mul00 = _mm_mul_ps(swp00, swp01);
+ let mul01 = _mm_mul_ps(swp02, swp03);
+ _mm_sub_ps(mul00, mul01)
+ };
+ let fac3 = {
+ let swp0a = _mm_shuffle_ps(self.w_axis, self.z_axis, 0b11_11_11_11);
+ let swp0b = _mm_shuffle_ps(self.w_axis, self.z_axis, 0b00_00_00_00);
+
+ let swp00 = _mm_shuffle_ps(self.z_axis, self.y_axis, 0b00_00_00_00);
+ let swp01 = _mm_shuffle_ps(swp0a, swp0a, 0b10_00_00_00);
+ let swp02 = _mm_shuffle_ps(swp0b, swp0b, 0b10_00_00_00);
+ let swp03 = _mm_shuffle_ps(self.z_axis, self.y_axis, 0b11_11_11_11);
+
+ let mul00 = _mm_mul_ps(swp00, swp01);
+ let mul01 = _mm_mul_ps(swp02, swp03);
+ _mm_sub_ps(mul00, mul01)
+ };
+ let fac4 = {
+ let swp0a = _mm_shuffle_ps(self.w_axis, self.z_axis, 0b10_10_10_10);
+ let swp0b = _mm_shuffle_ps(self.w_axis, self.z_axis, 0b00_00_00_00);
+
+ let swp00 = _mm_shuffle_ps(self.z_axis, self.y_axis, 0b00_00_00_00);
+ let swp01 = _mm_shuffle_ps(swp0a, swp0a, 0b10_00_00_00);
+ let swp02 = _mm_shuffle_ps(swp0b, swp0b, 0b10_00_00_00);
+ let swp03 = _mm_shuffle_ps(self.z_axis, self.y_axis, 0b10_10_10_10);
+
+ let mul00 = _mm_mul_ps(swp00, swp01);
+ let mul01 = _mm_mul_ps(swp02, swp03);
+ _mm_sub_ps(mul00, mul01)
+ };
+ let fac5 = {
+ let swp0a = _mm_shuffle_ps(self.w_axis, self.z_axis, 0b01_01_01_01);
+ let swp0b = _mm_shuffle_ps(self.w_axis, self.z_axis, 0b00_00_00_00);
+
+ let swp00 = _mm_shuffle_ps(self.z_axis, self.y_axis, 0b00_00_00_00);
+ let swp01 = _mm_shuffle_ps(swp0a, swp0a, 0b10_00_00_00);
+ let swp02 = _mm_shuffle_ps(swp0b, swp0b, 0b10_00_00_00);
+ let swp03 = _mm_shuffle_ps(self.z_axis, self.y_axis, 0b01_01_01_01);
+
+ let mul00 = _mm_mul_ps(swp00, swp01);
+ let mul01 = _mm_mul_ps(swp02, swp03);
+ _mm_sub_ps(mul00, mul01)
+ };
+ let sign_a = _mm_set_ps(1.0, -1.0, 1.0, -1.0);
+ let sign_b = _mm_set_ps(-1.0, 1.0, -1.0, 1.0);
+
+ let temp0 = _mm_shuffle_ps(self.y_axis, self.x_axis, 0b00_00_00_00);
+ let vec0 = _mm_shuffle_ps(temp0, temp0, 0b10_10_10_00);
+
+ let temp1 = _mm_shuffle_ps(self.y_axis, self.x_axis, 0b01_01_01_01);
+ let vec1 = _mm_shuffle_ps(temp1, temp1, 0b10_10_10_00);
+
+ let temp2 = _mm_shuffle_ps(self.y_axis, self.x_axis, 0b10_10_10_10);
+ let vec2 = _mm_shuffle_ps(temp2, temp2, 0b10_10_10_00);
+
+ let temp3 = _mm_shuffle_ps(self.y_axis, self.x_axis, 0b11_11_11_11);
+ let vec3 = _mm_shuffle_ps(temp3, temp3, 0b10_10_10_00);
+
+ let mul00 = _mm_mul_ps(vec1, fac0);
+ let mul01 = _mm_mul_ps(vec2, fac1);
+ let mul02 = _mm_mul_ps(vec3, fac2);
+ let sub00 = _mm_sub_ps(mul00, mul01);
+ let add00 = _mm_add_ps(sub00, mul02);
+ let inv0 = _mm_mul_ps(sign_b, add00);
+
+ let mul03 = _mm_mul_ps(vec0, fac0);
+ let mul04 = _mm_mul_ps(vec2, fac3);
+ let mul05 = _mm_mul_ps(vec3, fac4);
+ let sub01 = _mm_sub_ps(mul03, mul04);
+ let add01 = _mm_add_ps(sub01, mul05);
+ let inv1 = _mm_mul_ps(sign_a, add01);
+
+ let mul06 = _mm_mul_ps(vec0, fac1);
+ let mul07 = _mm_mul_ps(vec1, fac3);
+ let mul08 = _mm_mul_ps(vec3, fac5);
+ let sub02 = _mm_sub_ps(mul06, mul07);
+ let add02 = _mm_add_ps(sub02, mul08);
+ let inv2 = _mm_mul_ps(sign_b, add02);
+
+ let mul09 = _mm_mul_ps(vec0, fac2);
+ let mul10 = _mm_mul_ps(vec1, fac4);
+ let mul11 = _mm_mul_ps(vec2, fac5);
+ let sub03 = _mm_sub_ps(mul09, mul10);
+ let add03 = _mm_add_ps(sub03, mul11);
+ let inv3 = _mm_mul_ps(sign_a, add03);
+
+ let row0 = _mm_shuffle_ps(inv0, inv1, 0b00_00_00_00);
+ let row1 = _mm_shuffle_ps(inv2, inv3, 0b00_00_00_00);
+ let row2 = _mm_shuffle_ps(row0, row1, 0b10_00_10_00);
+
+ let dot0 = Vector4::dot(self.x_axis, row2);
+ glam_assert!(dot0 != 0.0);
+
+ let rcp0 = _mm_set1_ps(dot0.recip());
+
+ Self {
+ x_axis: _mm_mul_ps(inv0, rcp0),
+ y_axis: _mm_mul_ps(inv1, rcp0),
+ z_axis: _mm_mul_ps(inv2, rcp0),
+ w_axis: _mm_mul_ps(inv3, rcp0),
+ }
+ }
+ }
+
+ #[inline(always)]
+ fn transform_point3(&self, other: XYZ<f32>) -> XYZ<f32> {
+ self.x_axis
+ .mul_scalar(other.x)
+ .add(self.y_axis.mul_scalar(other.y))
+ .add(self.z_axis.mul_scalar(other.z))
+ .add(self.w_axis)
+ .into()
+ }
+
+ #[inline(always)]
+ fn transform_vector3(&self, other: XYZ<f32>) -> XYZ<f32> {
+ self.x_axis
+ .mul_scalar(other.x)
+ .add(self.y_axis.mul_scalar(other.y))
+ .add(self.z_axis.mul_scalar(other.z))
+ .into()
+ }
+
+ #[inline]
+ fn transform_float4_as_point3(&self, other: __m128) -> __m128 {
+ let mut res = self.x_axis.mul(Vector4::splat_x(other));
+ res = self.y_axis.mul_add(Vector4::splat_y(other), res);
+ res = self.z_axis.mul_add(Vector4::splat_z(other), res);
+ res = self.w_axis.add(res);
+ res
+ }
+
+ #[inline]
+ fn transform_float4_as_vector3(&self, other: __m128) -> __m128 {
+ let mut res = self.x_axis.mul(Vector4::splat_x(other));
+ res = self.y_axis.mul_add(Vector4::splat_y(other), res);
+ res = self.z_axis.mul_add(Vector4::splat_z(other), res);
+ res
+ }
+
+ #[inline]
+ fn project_float4_as_point3(&self, other: __m128) -> __m128 {
+ let mut res = self.x_axis.mul(Vector4::splat_x(other));
+ res = self.y_axis.mul_add(Vector4::splat_y(other), res);
+ res = self.z_axis.mul_add(Vector4::splat_z(other), res);
+ res = self.w_axis.add(res);
+ res = res.mul(res.splat_w().recip());
+ res
+ }
+}
+
+impl ProjectionMatrix<f32, __m128> for Columns4<__m128> {}
+
+impl From<Columns3<XYZ<f32>>> for Columns3<__m128> {
+ #[inline(always)]
+ fn from(v: Columns3<XYZ<f32>>) -> Columns3<__m128> {
+ Self {
+ x_axis: v.x_axis.into(),
+ y_axis: v.y_axis.into(),
+ z_axis: v.z_axis.into(),
+ }
+ }
+}
+
+impl From<Columns3<__m128>> for Columns3<XYZ<f32>> {
+ #[inline(always)]
+ fn from(v: Columns3<__m128>) -> Columns3<XYZ<f32>> {
+ Self {
+ x_axis: v.x_axis.into(),
+ y_axis: v.y_axis.into(),
+ z_axis: v.z_axis.into(),
+ }
+ }
+}
diff --git a/src/core/sse2/mod.rs b/src/core/sse2/mod.rs
new file mode 100644
index 0000000..4b78bfa
--- /dev/null
+++ b/src/core/sse2/mod.rs
@@ -0,0 +1,4 @@
+mod float;
+pub mod matrix;
+pub mod quaternion;
+pub mod vector;
diff --git a/src/core/sse2/quaternion.rs b/src/core/sse2/quaternion.rs
new file mode 100644
index 0000000..6ffea3b
--- /dev/null
+++ b/src/core/sse2/quaternion.rs
@@ -0,0 +1,135 @@
+#[cfg(target_arch = "x86")]
+use core::arch::x86::*;
+#[cfg(target_arch = "x86_64")]
+use core::arch::x86_64::*;
+
+use super::float::*;
+use crate::core::{
+ storage::XYZ,
+ traits::{quaternion::Quaternion, scalar::*, vector::*},
+};
+
+impl Quaternion<f32> for __m128 {
+ type SIMDVector3 = __m128;
+
+ #[inline(always)]
+ fn conjugate(self) -> Self {
+ const SIGN: __m128 = const_f32x4!([-0.0, -0.0, -0.0, 0.0]);
+ unsafe { _mm_xor_ps(self, SIGN) }
+ }
+
+ #[inline]
+ fn lerp(self, end: Self, s: f32) -> Self {
+ glam_assert!(FloatVector4::is_normalized(self));
+ glam_assert!(FloatVector4::is_normalized(end));
+
+ unsafe {
+ const NEG_ZERO: __m128 = const_f32x4!([-0.0; 4]);
+ let start = self;
+ let end = end;
+ let dot = Vector4::dot_into_vec(start, end);
+ // Calculate the bias, if the dot product is positive or zero, there is no bias
+ // but if it is negative, we want to flip the 'end' rotation XYZW components
+ let bias = _mm_and_ps(dot, NEG_ZERO);
+ let interpolated = _mm_add_ps(
+ _mm_mul_ps(_mm_sub_ps(_mm_xor_ps(end, bias), start), _mm_set_ps1(s)),
+ start,
+ );
+ FloatVector4::normalize(interpolated)
+ }
+ }
+
+ #[inline]
+ fn slerp(self, end: Self, s: f32) -> Self {
+ // http://number-none.com/product/Understanding%20Slerp,%20Then%20Not%20Using%20It/
+ glam_assert!(FloatVector4::is_normalized(self));
+ glam_assert!(FloatVector4::is_normalized(end));
+
+ const DOT_THRESHOLD: f32 = 0.9995;
+
+ let dot = Vector4::dot(self, end);
+
+ if dot > DOT_THRESHOLD {
+ // assumes lerp returns a normalized quaternion
+ self.lerp(end, s)
+ } else {
+ // assumes scalar_acos clamps the input to [-1.0, 1.0]
+ let theta = dot.acos_approx();
+
+ let x = 1.0 - s;
+ let y = s;
+ let z = 1.0;
+
+ unsafe {
+ let tmp = _mm_mul_ps(_mm_set_ps1(theta), _mm_set_ps(0.0, z, y, x));
+ let tmp = m128_sin(tmp);
+
+ let scale1 = _mm_shuffle_ps(tmp, tmp, 0b00_00_00_00);
+ let scale2 = _mm_shuffle_ps(tmp, tmp, 0b01_01_01_01);
+ let theta_sin = _mm_shuffle_ps(tmp, tmp, 0b10_10_10_10);
+
+ self.mul(scale1).add(end.mul(scale2)).div(theta_sin)
+ }
+ }
+ }
+
+ #[inline]
+ fn mul_quaternion(self, other: Self) -> Self {
+ glam_assert!(FloatVector4::is_normalized(self));
+ glam_assert!(FloatVector4::is_normalized(other));
+ unsafe {
+ // Based on https://github.com/nfrechette/rtm `rtm::quat_mul`
+ let lhs = self;
+ let rhs = other;
+
+ const CONTROL_WZYX: __m128 = const_f32x4!([1.0, -1.0, 1.0, -1.0]);
+ const CONTROL_ZWXY: __m128 = const_f32x4!([1.0, 1.0, -1.0, -1.0]);
+ const CONTROL_YXWZ: __m128 = const_f32x4!([-1.0, 1.0, 1.0, -1.0]);
+
+ let r_xxxx = _mm_shuffle_ps(lhs, lhs, 0b00_00_00_00);
+ let r_yyyy = _mm_shuffle_ps(lhs, lhs, 0b01_01_01_01);
+ let r_zzzz = _mm_shuffle_ps(lhs, lhs, 0b10_10_10_10);
+ let r_wwww = _mm_shuffle_ps(lhs, lhs, 0b11_11_11_11);
+
+ let lxrw_lyrw_lzrw_lwrw = _mm_mul_ps(r_wwww, rhs);
+ let l_wzyx = _mm_shuffle_ps(rhs, rhs, 0b00_01_10_11);
+
+ let lwrx_lzrx_lyrx_lxrx = _mm_mul_ps(r_xxxx, l_wzyx);
+ let l_zwxy = _mm_shuffle_ps(l_wzyx, l_wzyx, 0b10_11_00_01);
+
+ let lwrx_nlzrx_lyrx_nlxrx = _mm_mul_ps(lwrx_lzrx_lyrx_lxrx, CONTROL_WZYX);
+
+ let lzry_lwry_lxry_lyry = _mm_mul_ps(r_yyyy, l_zwxy);
+ let l_yxwz = _mm_shuffle_ps(l_zwxy, l_zwxy, 0b00_01_10_11);
+
+ let lzry_lwry_nlxry_nlyry = _mm_mul_ps(lzry_lwry_lxry_lyry, CONTROL_ZWXY);
+
+ let lyrz_lxrz_lwrz_lzrz = _mm_mul_ps(r_zzzz, l_yxwz);
+ let result0 = _mm_add_ps(lxrw_lyrw_lzrw_lwrw, lwrx_nlzrx_lyrx_nlxrx);
+
+ let nlyrz_lxrz_lwrz_wlzrz = _mm_mul_ps(lyrz_lxrz_lwrz_lzrz, CONTROL_YXWZ);
+ let result1 = _mm_add_ps(lzry_lwry_nlxry_nlyry, nlyrz_lxrz_lwrz_wlzrz);
+ _mm_add_ps(result0, result1)
+ }
+ }
+
+ #[inline]
+ fn mul_vector3(self, other: XYZ<f32>) -> XYZ<f32> {
+ self.mul_float4_as_vector3(other.into()).into()
+ }
+
+ #[inline]
+ fn mul_float4_as_vector3(self, other: __m128) -> __m128 {
+ glam_assert!(FloatVector4::is_normalized(self));
+ unsafe {
+ const TWO: __m128 = const_f32x4!([2.0; 4]);
+ let w = _mm_shuffle_ps(self, self, 0b11_11_11_11);
+ let b = self;
+ let b2 = Vector3::dot_into_vec(b, b);
+ other
+ .mul(w.mul(w).sub(b2))
+ .add(b.mul(Vector3::dot_into_vec(other, b).mul(TWO)))
+ .add(b.cross(other).mul(w.mul(TWO)))
+ }
+ }
+}
diff --git a/src/core/sse2/vector.rs b/src/core/sse2/vector.rs
new file mode 100644
index 0000000..d5380ce
--- /dev/null
+++ b/src/core/sse2/vector.rs
@@ -0,0 +1,871 @@
+#![allow(clippy::many_single_char_names)]
+
+#[cfg(target_arch = "x86")]
+use core::arch::x86::*;
+#[cfg(target_arch = "x86_64")]
+use core::arch::x86_64::*;
+
+use super::float::*;
+use crate::core::{
+ storage::{Align16, XY, XYZ, XYZW},
+ traits::{scalar::*, vector::*},
+};
+use core::mem::MaybeUninit;
+
+impl MaskVectorConst for __m128 {
+ const FALSE: __m128 = const_f32x4!([0.0; 4]);
+}
+
+impl MaskVector for __m128 {
+ #[inline(always)]
+ fn bitand(self, other: Self) -> Self {
+ unsafe { _mm_and_ps(self, other) }
+ }
+
+ #[inline(always)]
+ fn bitor(self, other: Self) -> Self {
+ unsafe { _mm_or_ps(self, other) }
+ }
+
+ #[inline]
+ fn not(self) -> Self {
+ unsafe { _mm_andnot_ps(self, _mm_set_ps1(f32::from_bits(0xff_ff_ff_ff))) }
+ }
+}
+
+impl MaskVector3 for __m128 {
+ #[inline(always)]
+ fn new(x: bool, y: bool, z: bool) -> Self {
+ // A SSE2 mask can be any bit pattern but for the `MaskVector3` implementation of select we
+ // expect either 0 or 0xff_ff_ff_ff. This should be a safe assumption as this type can only
+ // be created via this function or by `Vector3` methods.
+
+ unsafe {
+ _mm_set_ps(
+ 0.0,
+ f32::from_bits(MaskConst::MASK[z as usize]),
+ f32::from_bits(MaskConst::MASK[y as usize]),
+ f32::from_bits(MaskConst::MASK[x as usize]),
+ )
+ }
+ }
+
+ #[inline(always)]
+ fn bitmask(self) -> u32 {
+ unsafe { (_mm_movemask_ps(self) as u32) & 0x7 }
+ }
+
+ #[inline(always)]
+ fn any(self) -> bool {
+ unsafe { (_mm_movemask_ps(self) & 0x7) != 0 }
+ }
+
+ #[inline(always)]
+ fn all(self) -> bool {
+ unsafe { (_mm_movemask_ps(self) & 0x7) == 0x7 }
+ }
+
+ #[inline]
+ fn into_bool_array(self) -> [bool; 3] {
+ let bitmask = MaskVector3::bitmask(self);
+ [(bitmask & 1) != 0, (bitmask & 2) != 0, (bitmask & 4) != 0]
+ }
+
+ #[inline]
+ fn into_u32_array(self) -> [u32; 3] {
+ let bitmask = MaskVector3::bitmask(self);
+ [
+ MaskConst::MASK[(bitmask & 1) as usize],
+ MaskConst::MASK[((bitmask >> 1) & 1) as usize],
+ MaskConst::MASK[((bitmask >> 2) & 1) as usize],
+ ]
+ }
+}
+
+impl MaskVector4 for __m128 {
+ #[inline(always)]
+ fn new(x: bool, y: bool, z: bool, w: bool) -> Self {
+ // A SSE2 mask can be any bit pattern but for the `Vec4Mask` implementation of select we
+ // expect either 0 or 0xff_ff_ff_ff. This should be a safe assumption as this type can only
+ // be created via this function or by `Vec4` methods.
+
+ const MASK: [u32; 2] = [0, 0xff_ff_ff_ff];
+ unsafe {
+ _mm_set_ps(
+ f32::from_bits(MASK[w as usize]),
+ f32::from_bits(MASK[z as usize]),
+ f32::from_bits(MASK[y as usize]),
+ f32::from_bits(MASK[x as usize]),
+ )
+ }
+ }
+ #[inline(always)]
+ fn bitmask(self) -> u32 {
+ unsafe { _mm_movemask_ps(self) as u32 }
+ }
+
+ #[inline(always)]
+ fn any(self) -> bool {
+ unsafe { _mm_movemask_ps(self) != 0 }
+ }
+
+ #[inline(always)]
+ fn all(self) -> bool {
+ unsafe { _mm_movemask_ps(self) == 0xf }
+ }
+
+ #[inline]
+ fn into_bool_array(self) -> [bool; 4] {
+ let bitmask = MaskVector4::bitmask(self);
+ [
+ (bitmask & 1) != 0,
+ (bitmask & 2) != 0,
+ (bitmask & 4) != 0,
+ (bitmask & 8) != 0,
+ ]
+ }
+
+ #[inline]
+ fn into_u32_array(self) -> [u32; 4] {
+ let bitmask = MaskVector4::bitmask(self);
+ [
+ MaskConst::MASK[(bitmask & 1) as usize],
+ MaskConst::MASK[((bitmask >> 1) & 1) as usize],
+ MaskConst::MASK[((bitmask >> 2) & 1) as usize],
+ MaskConst::MASK[((bitmask >> 3) & 1) as usize],
+ ]
+ }
+}
+
+/// Calculates the vector 3 dot product and returns answer in x lane of __m128.
+#[inline(always)]
+unsafe fn dot3_in_x(lhs: __m128, rhs: __m128) -> __m128 {
+ let x2_y2_z2_w2 = _mm_mul_ps(lhs, rhs);
+ let y2_0_0_0 = _mm_shuffle_ps(x2_y2_z2_w2, x2_y2_z2_w2, 0b00_00_00_01);
+ let z2_0_0_0 = _mm_shuffle_ps(x2_y2_z2_w2, x2_y2_z2_w2, 0b00_00_00_10);
+ let x2y2_0_0_0 = _mm_add_ss(x2_y2_z2_w2, y2_0_0_0);
+ _mm_add_ss(x2y2_0_0_0, z2_0_0_0)
+}
+
+/// Calculates the vector 4 dot product and returns answer in x lane of __m128.
+#[inline(always)]
+unsafe fn dot4_in_x(lhs: __m128, rhs: __m128) -> __m128 {
+ let x2_y2_z2_w2 = _mm_mul_ps(lhs, rhs);
+ let z2_w2_0_0 = _mm_shuffle_ps(x2_y2_z2_w2, x2_y2_z2_w2, 0b00_00_11_10);
+ let x2z2_y2w2_0_0 = _mm_add_ps(x2_y2_z2_w2, z2_w2_0_0);
+ let y2w2_0_0_0 = _mm_shuffle_ps(x2z2_y2w2_0_0, x2z2_y2w2_0_0, 0b00_00_00_01);
+ _mm_add_ps(x2z2_y2w2_0_0, y2w2_0_0_0)
+}
+
+impl VectorConst for __m128 {
+ const ZERO: __m128 = const_f32x4!([0.0; 4]);
+ const ONE: __m128 = const_f32x4!([1.0; 4]);
+}
+
+impl Vector3Const for __m128 {
+ const X: __m128 = const_f32x4!([1.0, 0.0, 0.0, 0.0]);
+ const Y: __m128 = const_f32x4!([0.0, 1.0, 0.0, 0.0]);
+ const Z: __m128 = const_f32x4!([0.0, 0.0, 1.0, 0.0]);
+}
+
+impl Vector4Const for __m128 {
+ const X: __m128 = const_f32x4!([1.0, 0.0, 0.0, 0.0]);
+ const Y: __m128 = const_f32x4!([0.0, 1.0, 0.0, 0.0]);
+ const Z: __m128 = const_f32x4!([0.0, 0.0, 1.0, 0.0]);
+ const W: __m128 = const_f32x4!([0.0, 0.0, 0.0, 1.0]);
+}
+
+impl NanConstEx for __m128 {
+ const NAN: __m128 = const_f32x4!([f32::NAN; 4]);
+}
+
+impl Vector<f32> for __m128 {
+ type Mask = __m128;
+
+ #[inline(always)]
+ fn splat(s: f32) -> Self {
+ unsafe { _mm_set_ps1(s) }
+ }
+
+ #[inline(always)]
+ fn select(mask: Self::Mask, if_true: Self, if_false: Self) -> Self {
+ unsafe { _mm_or_ps(_mm_andnot_ps(mask, if_false), _mm_and_ps(if_true, mask)) }
+ }
+
+ #[inline(always)]
+ fn cmpeq(self, other: Self) -> Self::Mask {
+ unsafe { _mm_cmpeq_ps(self, other) }
+ }
+
+ #[inline(always)]
+ fn cmpne(self, other: Self) -> Self::Mask {
+ unsafe { _mm_cmpneq_ps(self, other) }
+ }
+
+ #[inline(always)]
+ fn cmpge(self, other: Self) -> Self::Mask {
+ unsafe { _mm_cmpge_ps(self, other) }
+ }
+
+ #[inline(always)]
+ fn cmpgt(self, other: Self) -> Self::Mask {
+ unsafe { _mm_cmpgt_ps(self, other) }
+ }
+
+ #[inline(always)]
+ fn cmple(self, other: Self) -> Self::Mask {
+ unsafe { _mm_cmple_ps(self, other) }
+ }
+
+ #[inline(always)]
+ fn cmplt(self, other: Self) -> Self::Mask {
+ unsafe { _mm_cmplt_ps(self, other) }
+ }
+
+ #[inline(always)]
+ fn add(self, other: Self) -> Self {
+ unsafe { _mm_add_ps(self, other) }
+ }
+
+ #[inline(always)]
+ fn div(self, other: Self) -> Self {
+ unsafe { _mm_div_ps(self, other) }
+ }
+
+ #[inline(always)]
+ fn mul(self, other: Self) -> Self {
+ unsafe { _mm_mul_ps(self, other) }
+ }
+
+ #[inline(always)]
+ fn sub(self, other: Self) -> Self {
+ unsafe { _mm_sub_ps(self, other) }
+ }
+
+ #[inline(always)]
+ fn add_scalar(self, other: f32) -> Self {
+ unsafe { _mm_add_ps(self, _mm_set_ps1(other)) }
+ }
+
+ #[inline(always)]
+ fn sub_scalar(self, other: f32) -> Self {
+ unsafe { _mm_sub_ps(self, _mm_set_ps1(other)) }
+ }
+
+ #[inline(always)]
+ fn mul_scalar(self, other: f32) -> Self {
+ unsafe { _mm_mul_ps(self, _mm_set_ps1(other)) }
+ }
+
+ #[inline(always)]
+ fn div_scalar(self, other: f32) -> Self {
+ unsafe { _mm_div_ps(self, _mm_set_ps1(other)) }
+ }
+
+ #[inline(always)]
+ fn rem(self, other: Self) -> Self {
+ unsafe {
+ let n = m128_floor(_mm_div_ps(self, other));
+ _mm_sub_ps(self, _mm_mul_ps(n, other))
+ }
+ }
+
+ #[inline(always)]
+ fn rem_scalar(self, other: f32) -> Self {
+ unsafe { self.rem(_mm_set1_ps(other)) }
+ }
+
+ #[inline(always)]
+ fn min(self, other: Self) -> Self {
+ unsafe { _mm_min_ps(self, other) }
+ }
+
+ #[inline(always)]
+ fn max(self, other: Self) -> Self {
+ unsafe { _mm_max_ps(self, other) }
+ }
+}
+
+impl Vector3<f32> for __m128 {
+ #[inline(always)]
+ fn new(x: f32, y: f32, z: f32) -> Self {
+ unsafe { _mm_set_ps(z, z, y, x) }
+ }
+
+ #[inline(always)]
+ fn x(self) -> f32 {
+ unsafe { _mm_cvtss_f32(self) }
+ }
+
+ #[inline(always)]
+ fn y(self) -> f32 {
+ unsafe { _mm_cvtss_f32(_mm_shuffle_ps(self, self, 0b01_01_01_01)) }
+ }
+
+ #[inline(always)]
+ fn z(self) -> f32 {
+ unsafe { _mm_cvtss_f32(_mm_shuffle_ps(self, self, 0b10_10_10_10)) }
+ }
+
+ #[inline(always)]
+ fn splat_x(self) -> Self {
+ unsafe { _mm_shuffle_ps(self, self, 0b00_00_00_00) }
+ }
+
+ #[inline(always)]
+ fn splat_y(self) -> Self {
+ unsafe { _mm_shuffle_ps(self, self, 0b01_01_01_01) }
+ }
+
+ #[inline(always)]
+ fn splat_z(self) -> Self {
+ unsafe { _mm_shuffle_ps(self, self, 0b10_10_10_10) }
+ }
+
+ #[inline(always)]
+ fn from_slice_unaligned(slice: &[f32]) -> Self {
+ Vector3::new(slice[0], slice[1], slice[2])
+ }
+
+ #[inline(always)]
+ fn write_to_slice_unaligned(self, slice: &mut [f32]) {
+ let xyz = self.as_ref_xyz();
+ slice[0] = xyz.x;
+ slice[1] = xyz.y;
+ slice[2] = xyz.z;
+ }
+
+ #[inline(always)]
+ fn as_ref_xyz(&self) -> &XYZ<f32> {
+ unsafe { &*(self as *const Self).cast() }
+ }
+
+ #[inline(always)]
+ fn as_mut_xyz(&mut self) -> &mut XYZ<f32> {
+ unsafe { &mut *(self as *mut Self).cast() }
+ }
+
+ #[inline(always)]
+ fn into_xy(self) -> XY<f32> {
+ let mut out: MaybeUninit<Align16<XY<f32>>> = MaybeUninit::uninit();
+ unsafe {
+ _mm_store_ps(out.as_mut_ptr().cast(), self);
+ out.assume_init().0
+ }
+ }
+
+ #[inline]
+ fn into_xyzw(self, w: f32) -> XYZW<f32> {
+ unsafe {
+ let mut t = _mm_move_ss(self, _mm_set_ss(w));
+ t = _mm_shuffle_ps(t, t, 0b00_10_01_00);
+ // TODO: need a SIMD path
+ *_mm_move_ss(t, self).as_ref_xyzw()
+ }
+ }
+
+ #[inline(always)]
+ fn from_array(a: [f32; 3]) -> Self {
+ unsafe { _mm_set_ps(a[2], a[2], a[1], a[0]) }
+ }
+
+ #[inline(always)]
+ fn into_array(self) -> [f32; 3] {
+ let mut out: MaybeUninit<Align16<[f32; 3]>> = MaybeUninit::uninit();
+ unsafe {
+ _mm_store_ps(out.as_mut_ptr().cast(), self);
+ out.assume_init().0
+ }
+ }
+
+ #[inline(always)]
+ fn from_tuple(t: (f32, f32, f32)) -> Self {
+ unsafe { _mm_set_ps(t.2, t.2, t.1, t.0) }
+ }
+
+ #[inline(always)]
+ fn into_tuple(self) -> (f32, f32, f32) {
+ let mut out: MaybeUninit<Align16<(f32, f32, f32)>> = MaybeUninit::uninit();
+ unsafe {
+ _mm_store_ps(out.as_mut_ptr().cast(), self);
+ out.assume_init().0
+ }
+ }
+
+ #[inline]
+ fn min_element(self) -> f32 {
+ unsafe {
+ let v = self;
+ let v = _mm_min_ps(v, _mm_shuffle_ps(v, v, 0b01_01_10_10));
+ let v = _mm_min_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_01));
+ _mm_cvtss_f32(v)
+ }
+ }
+
+ #[inline]
+ fn max_element(self) -> f32 {
+ unsafe {
+ let v = self;
+ let v = _mm_max_ps(v, _mm_shuffle_ps(v, v, 0b00_00_10_10));
+ let v = _mm_max_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_01));
+ _mm_cvtss_f32(v)
+ }
+ }
+
+ #[inline]
+ fn dot(self, other: Self) -> f32 {
+ unsafe { _mm_cvtss_f32(dot3_in_x(self, other)) }
+ }
+
+ #[inline]
+ fn dot_into_vec(self, other: Self) -> Self {
+ unsafe {
+ let dot_in_x = dot3_in_x(self, other);
+ _mm_shuffle_ps(dot_in_x, dot_in_x, 0b00_00_00_00)
+ }
+ }
+
+ #[inline]
+ fn cross(self, other: Self) -> Self {
+ unsafe {
+ // x <- a.y*b.z - a.z*b.y
+ // y <- a.z*b.x - a.x*b.z
+ // z <- a.x*b.y - a.y*b.x
+ // We can save a shuffle by grouping it in this wacky order:
+ // (self.zxy() * other - self * other.zxy()).zxy()
+ let lhszxy = _mm_shuffle_ps(self, self, 0b01_01_00_10);
+ let rhszxy = _mm_shuffle_ps(other, other, 0b01_01_00_10);
+ let lhszxy_rhs = _mm_mul_ps(lhszxy, other);
+ let rhszxy_lhs = _mm_mul_ps(rhszxy, self);
+ let sub = _mm_sub_ps(lhszxy_rhs, rhszxy_lhs);
+ _mm_shuffle_ps(sub, sub, 0b01_01_00_10)
+ }
+ }
+
+ #[inline]
+ fn clamp(self, min: Self, max: Self) -> Self {
+ glam_assert!(
+ MaskVector3::all(min.cmple(max)),
+ "clamp: expected min <= max"
+ );
+ self.max(min).min(max)
+ }
+}
+
+impl Vector4<f32> for __m128 {
+ #[inline(always)]
+ fn new(x: f32, y: f32, z: f32, w: f32) -> Self {
+ unsafe { _mm_set_ps(w, z, y, x) }
+ }
+
+ #[inline(always)]
+ fn x(self) -> f32 {
+ unsafe { _mm_cvtss_f32(self) }
+ }
+
+ #[inline(always)]
+ fn y(self) -> f32 {
+ unsafe { _mm_cvtss_f32(_mm_shuffle_ps(self, self, 0b01_01_01_01)) }
+ }
+
+ #[inline(always)]
+ fn z(self) -> f32 {
+ unsafe { _mm_cvtss_f32(_mm_shuffle_ps(self, self, 0b10_10_10_10)) }
+ }
+
+ #[inline(always)]
+ fn w(self) -> f32 {
+ unsafe { _mm_cvtss_f32(_mm_shuffle_ps(self, self, 0b11_11_11_11)) }
+ }
+
+ #[inline(always)]
+ fn splat_x(self) -> Self {
+ unsafe { _mm_shuffle_ps(self, self, 0b00_00_00_00) }
+ }
+
+ #[inline(always)]
+ fn splat_y(self) -> Self {
+ unsafe { _mm_shuffle_ps(self, self, 0b01_01_01_01) }
+ }
+
+ #[inline(always)]
+ fn splat_z(self) -> Self {
+ unsafe { _mm_shuffle_ps(self, self, 0b10_10_10_10) }
+ }
+
+ #[inline(always)]
+ fn splat_w(self) -> Self {
+ unsafe { _mm_shuffle_ps(self, self, 0b11_11_11_11) }
+ }
+
+ #[inline(always)]
+ fn from_slice_unaligned(slice: &[f32]) -> Self {
+ assert!(slice.len() >= 4);
+ unsafe { _mm_loadu_ps(slice.as_ptr()) }
+ }
+
+ #[inline(always)]
+ fn write_to_slice_unaligned(self, slice: &mut [f32]) {
+ unsafe {
+ assert!(slice.len() >= 4);
+ _mm_storeu_ps(slice.as_mut_ptr(), self);
+ }
+ }
+
+ #[inline(always)]
+ fn as_ref_xyzw(&self) -> &XYZW<f32> {
+ unsafe { &*(self as *const Self).cast() }
+ }
+
+ #[inline(always)]
+ fn as_mut_xyzw(&mut self) -> &mut XYZW<f32> {
+ unsafe { &mut *(self as *mut Self).cast() }
+ }
+
+ #[inline(always)]
+ fn into_xy(self) -> XY<f32> {
+ let mut out: MaybeUninit<Align16<XY<f32>>> = MaybeUninit::uninit();
+ unsafe {
+ _mm_store_ps(out.as_mut_ptr().cast(), self);
+ out.assume_init().0
+ }
+ }
+
+ #[inline(always)]
+ fn into_xyz(self) -> XYZ<f32> {
+ let mut out: MaybeUninit<Align16<XYZ<f32>>> = MaybeUninit::uninit();
+ unsafe {
+ _mm_store_ps(out.as_mut_ptr().cast(), self);
+ out.assume_init().0
+ }
+ }
+
+ #[inline(always)]
+ fn from_array(a: [f32; 4]) -> Self {
+ unsafe { _mm_loadu_ps(a.as_ptr()) }
+ }
+
+ #[inline(always)]
+ fn into_array(self) -> [f32; 4] {
+ let mut out: MaybeUninit<Align16<[f32; 4]>> = MaybeUninit::uninit();
+ unsafe {
+ _mm_store_ps(out.as_mut_ptr().cast(), self);
+ out.assume_init().0
+ }
+ }
+
+ #[inline(always)]
+ fn from_tuple(t: (f32, f32, f32, f32)) -> Self {
+ unsafe { _mm_set_ps(t.3, t.2, t.1, t.0) }
+ }
+
+ #[inline(always)]
+ fn into_tuple(self) -> (f32, f32, f32, f32) {
+ let mut out: MaybeUninit<Align16<(f32, f32, f32, f32)>> = MaybeUninit::uninit();
+ unsafe {
+ _mm_store_ps(out.as_mut_ptr().cast(), self);
+ out.assume_init().0
+ }
+ }
+
+ #[inline]
+ fn min_element(self) -> f32 {
+ unsafe {
+ let v = self;
+ let v = _mm_min_ps(v, _mm_shuffle_ps(v, v, 0b00_00_11_10));
+ let v = _mm_min_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_01));
+ _mm_cvtss_f32(v)
+ }
+ }
+
+ #[inline]
+ fn max_element(self) -> f32 {
+ unsafe {
+ let v = self;
+ let v = _mm_max_ps(v, _mm_shuffle_ps(v, v, 0b00_00_11_10));
+ let v = _mm_max_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_01));
+ _mm_cvtss_f32(v)
+ }
+ }
+
+ #[inline]
+ fn dot(self, other: Self) -> f32 {
+ unsafe { _mm_cvtss_f32(dot4_in_x(self, other)) }
+ }
+
+ #[inline]
+ fn dot_into_vec(self, other: Self) -> Self {
+ unsafe {
+ let dot_in_x = dot4_in_x(self, other);
+ _mm_shuffle_ps(dot_in_x, dot_in_x, 0b00_00_00_00)
+ }
+ }
+
+ #[inline]
+ fn clamp(self, min: Self, max: Self) -> Self {
+ glam_assert!(
+ MaskVector4::all(min.cmple(max)),
+ "clamp: expected min <= max"
+ );
+ self.max(min).min(max)
+ }
+}
+
+impl SignedVector<f32> for __m128 {
+ #[inline(always)]
+ fn neg(self) -> Self {
+ unsafe { _mm_sub_ps(Self::ZERO, self) }
+ }
+}
+
+impl SignedVector3<f32> for __m128 {
+ #[inline]
+ fn abs(self) -> Self {
+ unsafe { m128_abs(self) }
+ }
+
+ #[inline]
+ fn signum(self) -> Self {
+ const NEG_ONE: __m128 = const_f32x4!([-1.0; 4]);
+ let mask = self.cmpge(Self::ZERO);
+ let result = Self::select(mask, Self::ONE, NEG_ONE);
+ let mask = unsafe { _mm_cmpunord_ps(self, self) };
+ Self::select(mask, self, result)
+ }
+}
+
+impl FloatVector3<f32> for __m128 {
+ #[inline]
+ fn is_finite(self) -> bool {
+ let (x, y, z) = Vector3::into_tuple(self);
+ x.is_finite() && y.is_finite() && z.is_finite()
+ }
+
+ #[inline]
+ fn is_nan(self) -> bool {
+ MaskVector3::any(FloatVector3::is_nan_mask(self))
+ }
+
+ #[inline(always)]
+ fn is_nan_mask(self) -> Self::Mask {
+ unsafe { _mm_cmpunord_ps(self, self) }
+ }
+
+ #[inline(always)]
+ #[cfg(target_feature = "fma")]
+ fn mul_add(self, b: Self, c: Self) -> Self {
+ unsafe { _mm_fmadd_ps(self, b, c) }
+ }
+
+ #[inline]
+ fn floor(self) -> Self {
+ unsafe { m128_floor(self) }
+ }
+
+ #[inline]
+ fn ceil(self) -> Self {
+ unsafe { m128_ceil(self) }
+ }
+
+ #[inline]
+ fn round(self) -> Self {
+ unsafe { m128_round(self) }
+ }
+
+ #[inline(always)]
+ fn recip(self) -> Self {
+ unsafe { _mm_div_ps(Self::ONE, self) }
+ }
+
+ #[inline]
+ fn exp(self) -> Self {
+ let (x, y, z) = Vector3::into_tuple(self);
+ unsafe { _mm_set_ps(0.0, z.exp(), y.exp(), x.exp()) }
+ }
+
+ #[inline]
+ fn powf(self, n: f32) -> Self {
+ let (x, y, z) = Vector3::into_tuple(self);
+ unsafe { _mm_set_ps(0.0, z.powf(n), y.powf(n), x.powf(n)) }
+ }
+
+ #[inline]
+ fn length(self) -> f32 {
+ unsafe {
+ let dot = dot3_in_x(self, self);
+ _mm_cvtss_f32(_mm_sqrt_ps(dot))
+ }
+ }
+
+ #[inline]
+ fn length_recip(self) -> f32 {
+ unsafe {
+ let dot = dot3_in_x(self, self);
+ _mm_cvtss_f32(_mm_div_ps(Self::ONE, _mm_sqrt_ps(dot)))
+ }
+ }
+
+ #[inline]
+ fn normalize(self) -> Self {
+ unsafe {
+ let length = _mm_sqrt_ps(Vector3::dot_into_vec(self, self));
+ #[allow(clippy::let_and_return)]
+ let normalized = _mm_div_ps(self, length);
+ glam_assert!(FloatVector3::is_finite(normalized));
+ normalized
+ }
+ }
+}
+
+impl SignedVector4<f32> for __m128 {
+ #[inline]
+ fn abs(self) -> Self {
+ unsafe { m128_abs(self) }
+ }
+
+ #[inline]
+ fn signum(self) -> Self {
+ const NEG_ONE: __m128 = const_f32x4!([-1.0; 4]);
+ let mask = self.cmpge(Self::ZERO);
+ let result = Self::select(mask, Self::ONE, NEG_ONE);
+ let mask = unsafe { _mm_cmpunord_ps(self, self) };
+ Self::select(mask, self, result)
+ }
+}
+
+impl FloatVector4<f32> for __m128 {
+ #[inline]
+ fn is_finite(self) -> bool {
+ let (x, y, z, w) = Vector4::into_tuple(self);
+ x.is_finite() && y.is_finite() && z.is_finite() && w.is_finite()
+ }
+
+ #[inline]
+ fn is_nan(self) -> bool {
+ MaskVector4::any(FloatVector4::is_nan_mask(self))
+ }
+
+ #[inline(always)]
+ fn is_nan_mask(self) -> Self::Mask {
+ unsafe { _mm_cmpunord_ps(self, self) }
+ }
+
+ #[inline(always)]
+ #[cfg(target_feature = "fma")]
+ fn mul_add(self, b: Self, c: Self) -> Self {
+ unsafe { _mm_fmadd_ps(self, b, c) }
+ }
+
+ #[inline]
+ fn floor(self) -> Self {
+ unsafe { m128_floor(self) }
+ }
+
+ #[inline]
+ fn ceil(self) -> Self {
+ unsafe { m128_ceil(self) }
+ }
+
+ #[inline]
+ fn round(self) -> Self {
+ unsafe { m128_round(self) }
+ }
+
+ #[inline(always)]
+ fn recip(self) -> Self {
+ unsafe { _mm_div_ps(Self::ONE, self) }
+ }
+
+ #[inline]
+ fn exp(self) -> Self {
+ let (x, y, z, w) = Vector4::into_tuple(self);
+ unsafe { _mm_set_ps(w.exp(), z.exp(), y.exp(), x.exp()) }
+ }
+
+ #[inline]
+ fn powf(self, n: f32) -> Self {
+ let (x, y, z, w) = Vector4::into_tuple(self);
+ unsafe { _mm_set_ps(w.powf(n), z.powf(n), y.powf(n), x.powf(n)) }
+ }
+
+ #[inline]
+ fn length(self) -> f32 {
+ unsafe {
+ let dot = dot4_in_x(self, self);
+ _mm_cvtss_f32(_mm_sqrt_ps(dot))
+ }
+ }
+
+ #[inline]
+ fn length_recip(self) -> f32 {
+ unsafe {
+ let dot = dot4_in_x(self, self);
+ _mm_cvtss_f32(_mm_div_ps(Self::ONE, _mm_sqrt_ps(dot)))
+ }
+ }
+
+ #[inline]
+ fn normalize(self) -> Self {
+ unsafe {
+ let dot = Vector4::dot_into_vec(self, self);
+ #[allow(clippy::let_and_return)]
+ let normalized = _mm_div_ps(self, _mm_sqrt_ps(dot));
+ glam_assert!(FloatVector4::is_finite(normalized));
+ normalized
+ }
+ }
+}
+
+impl From<XYZW<f32>> for __m128 {
+ #[inline(always)]
+ fn from(v: XYZW<f32>) -> __m128 {
+ unsafe { _mm_set_ps(v.w, v.z, v.y, v.x) }
+ }
+}
+
+impl From<XYZ<f32>> for __m128 {
+ #[inline(always)]
+ fn from(v: XYZ<f32>) -> __m128 {
+ unsafe { _mm_set_ps(v.z, v.z, v.y, v.x) }
+ }
+}
+
+impl From<XY<f32>> for __m128 {
+ #[inline(always)]
+ fn from(v: XY<f32>) -> __m128 {
+ unsafe { _mm_set_ps(v.y, v.y, v.y, v.x) }
+ }
+}
+
+impl From<__m128> for XYZW<f32> {
+ #[inline(always)]
+ fn from(v: __m128) -> XYZW<f32> {
+ let mut out: MaybeUninit<Align16<XYZW<f32>>> = MaybeUninit::uninit();
+ unsafe {
+ _mm_store_ps(out.as_mut_ptr().cast(), v);
+ out.assume_init().0
+ }
+ }
+}
+
+impl From<__m128> for XYZ<f32> {
+ #[inline(always)]
+ fn from(v: __m128) -> XYZ<f32> {
+ let mut out: MaybeUninit<Align16<XYZ<f32>>> = MaybeUninit::uninit();
+ unsafe {
+ _mm_store_ps(out.as_mut_ptr().cast(), v);
+ out.assume_init().0
+ }
+ }
+}
+
+impl From<__m128> for XY<f32> {
+ #[inline(always)]
+ fn from(v: __m128) -> XY<f32> {
+ let mut out: MaybeUninit<Align16<XY<f32>>> = MaybeUninit::uninit();
+ unsafe {
+ _mm_store_ps(out.as_mut_ptr().cast(), v);
+ out.assume_init().0
+ }
+ }
+}
diff --git a/src/core/storage.rs b/src/core/storage.rs
new file mode 100644
index 0000000..19bf35d
--- /dev/null
+++ b/src/core/storage.rs
@@ -0,0 +1,128 @@
+#[derive(Clone, Copy, Debug, Default, PartialEq, PartialOrd)]
+#[cfg_attr(target_arch = "spirv", repr(simd))]
+#[cfg_attr(not(target_arch = "spirv"), repr(C))]
+pub struct XY<T> {
+ pub x: T,
+ pub y: T,
+}
+
+#[derive(Clone, Copy, Debug, Default, PartialEq, PartialOrd)]
+#[cfg_attr(target_arch = "spirv", repr(simd))]
+#[cfg_attr(not(target_arch = "spirv"), repr(C))]
+pub struct XYZ<T> {
+ pub x: T,
+ pub y: T,
+ pub z: T,
+}
+
+#[derive(Clone, Copy, Debug, Default, PartialEq, PartialOrd)]
+#[cfg_attr(target_arch = "spirv", repr(simd))]
+#[cfg_attr(not(target_arch = "spirv"), repr(C))]
+pub struct XYZW<T> {
+ pub x: T,
+ pub y: T,
+ pub z: T,
+ pub w: T,
+}
+
+#[derive(Clone, Copy, Default, PartialEq, PartialOrd)]
+#[cfg_attr(not(target_arch = "spirv"), repr(C))]
+pub struct Columns2<V> {
+ pub x_axis: V,
+ pub y_axis: V,
+}
+
+#[derive(Clone, Copy, Default, PartialEq, PartialOrd)]
+#[cfg_attr(not(target_arch = "spirv"), repr(C))]
+pub struct Columns3<V> {
+ pub x_axis: V,
+ pub y_axis: V,
+ pub z_axis: V,
+}
+
+#[derive(Clone, Copy, Default, PartialEq, PartialOrd)]
+#[cfg_attr(not(target_arch = "spirv"), repr(C))]
+pub struct Columns4<V> {
+ pub x_axis: V,
+ pub y_axis: V,
+ pub z_axis: V,
+ pub w_axis: V,
+}
+
+/// The `XYZF32A16` is used for the `Vec3A` type, that is a 16 btye aligned `XYZ<f32>` type.
+#[derive(Clone, Copy, Debug, Default, PartialEq, PartialOrd)]
+#[cfg_attr(target_arch = "spirv", repr(simd))]
+#[cfg_attr(not(target_arch = "spirv"), repr(C, align(16)))]
+pub struct XYZF32A16 {
+ pub x: f32,
+ pub y: f32,
+ pub z: f32,
+}
+
+impl From<XYZW<f32>> for XYZF32A16 {
+ #[inline(always)]
+ fn from(v: XYZW<f32>) -> Self {
+ Self {
+ x: v.x,
+ y: v.y,
+ z: v.z,
+ }
+ }
+}
+
+impl From<XYZ<f32>> for XYZF32A16 {
+ #[inline(always)]
+ fn from(v: XYZ<f32>) -> Self {
+ Self {
+ x: v.x,
+ y: v.y,
+ z: v.z,
+ }
+ }
+}
+
+impl From<XYZF32A16> for XYZ<f32> {
+ #[inline(always)]
+ fn from(v: XYZF32A16) -> Self {
+ Self {
+ x: v.x,
+ y: v.y,
+ z: v.z,
+ }
+ }
+}
+
+impl From<XYZF32A16> for XY<f32> {
+ #[inline(always)]
+ fn from(v: XYZF32A16) -> Self {
+ Self { x: v.x, y: v.y }
+ }
+}
+
+#[derive(Clone, Copy, Default, PartialEq, PartialOrd)]
+#[repr(C, align(16))]
+pub(crate) struct Align16<T>(pub T);
+
+impl<T> Align16<T> {
+ #[allow(dead_code)]
+ pub fn as_ptr(&self) -> *const T {
+ &self.0
+ }
+
+ #[allow(dead_code)]
+ pub fn as_mut_ptr(&mut self) -> *mut T {
+ &mut self.0
+ }
+}
+
+#[test]
+fn test_align16() {
+ use core::{mem, ptr};
+ let mut a = Align16::<f32>(1.0);
+ assert_eq!(mem::align_of_val(&a), 16);
+ unsafe {
+ assert_eq!(ptr::read(a.as_ptr()).to_bits(), f32::to_bits(1.0));
+ ptr::write(a.as_mut_ptr(), -1.0);
+ }
+ assert_eq!(a.0.to_bits(), f32::to_bits(-1.0));
+}
diff --git a/src/core/traits/matrix.rs b/src/core/traits/matrix.rs
new file mode 100644
index 0000000..f1b00eb
--- /dev/null
+++ b/src/core/traits/matrix.rs
@@ -0,0 +1,985 @@
+use crate::core::{
+ storage::{XY, XYZ, XYZW},
+ traits::{
+ quaternion::Quaternion,
+ scalar::{FloatEx, NumEx},
+ vector::*,
+ },
+};
+
+pub trait MatrixConst {
+ const ZERO: Self;
+ const IDENTITY: Self;
+}
+
+/// Base matrix trait that sets up trait bounds
+pub trait Matrix<T: NumEx>: Sized + Copy + Clone {}
+
+/// 2x2 Matrix trait for all types of T
+pub trait Matrix2x2<T: NumEx, V2: Vector2<T>>: Matrix<T> {
+ #[inline(always)]
+ fn new(m00: T, m01: T, m10: T, m11: T) -> Self {
+ Self::from_cols(V2::new(m00, m01), V2::new(m10, m11))
+ }
+
+ fn from_cols(x_axis: V2, y_axis: V2) -> Self;
+
+ fn x_axis(&self) -> &V2;
+ fn y_axis(&self) -> &V2;
+
+ #[inline(always)]
+ fn from_cols_array(m: &[T; 4]) -> Self {
+ Self::new(m[0], m[1], m[2], m[3])
+ }
+
+ #[rustfmt::skip]
+ #[inline(always)]
+ fn to_cols_array(&self) -> [T; 4] {
+ let x_axis = self.x_axis();
+ let y_axis = self.y_axis();
+ [x_axis.x(), x_axis.y(),
+ y_axis.x(), y_axis.y()]
+ }
+
+ #[inline(always)]
+ fn from_cols_array_2d(m: &[[T; 2]; 2]) -> Self {
+ Self::from_cols(V2::from_array(m[0]), V2::from_array(m[1]))
+ }
+
+ #[inline(always)]
+ fn to_cols_array_2d(&self) -> [[T; 2]; 2] {
+ [self.x_axis().into_array(), self.y_axis().into_array()]
+ }
+
+ #[inline(always)]
+ fn from_cols_slice(m: &[T]) -> Self {
+ Self::new(m[0], m[1], m[2], m[3])
+ }
+
+ #[inline(always)]
+ fn write_cols_to_slice(&self, slice: &mut [T]) {
+ let x_axis = self.x_axis();
+ let y_axis = self.y_axis();
+ slice[0] = x_axis.x();
+ slice[1] = x_axis.y();
+ slice[2] = y_axis.x();
+ slice[3] = y_axis.y();
+ }
+
+ #[rustfmt::skip]
+ #[inline(always)]
+ fn from_diagonal(diagonal: XY<T>) -> Self {
+ Self::new(
+ diagonal.x, T::ZERO,
+ T::ZERO, diagonal.y)
+ }
+
+ #[inline]
+ fn determinant(&self) -> T {
+ let x_axis = self.x_axis();
+ let y_axis = self.y_axis();
+ x_axis.x() * y_axis.y() - x_axis.y() * y_axis.x()
+ }
+
+ #[inline(always)]
+ fn transpose(&self) -> Self {
+ let x_axis = self.x_axis();
+ let y_axis = self.y_axis();
+ Self::new(x_axis.x(), y_axis.x(), x_axis.y(), y_axis.y())
+ }
+
+ #[inline]
+ fn mul_vector(&self, other: V2) -> V2 {
+ let x_axis = self.x_axis();
+ let y_axis = self.y_axis();
+ #[allow(clippy::suspicious_operation_groupings)]
+ V2::new(
+ (x_axis.x() * other.x()) + (y_axis.x() * other.y()),
+ (x_axis.y() * other.x()) + (y_axis.y() * other.y()),
+ )
+ }
+
+ #[inline]
+ fn mul_matrix(&self, other: &Self) -> Self {
+ Self::from_cols(
+ self.mul_vector(*other.x_axis()),
+ self.mul_vector(*other.y_axis()),
+ )
+ }
+
+ #[inline]
+ fn mul_scalar(&self, other: T) -> Self {
+ Self::from_cols(
+ self.x_axis().mul_scalar(other),
+ self.y_axis().mul_scalar(other),
+ )
+ }
+
+ #[inline]
+ fn add_matrix(&self, other: &Self) -> Self {
+ Self::from_cols(
+ self.x_axis().add(*other.x_axis()),
+ self.y_axis().add(*other.y_axis()),
+ )
+ }
+
+ #[inline]
+ fn sub_matrix(&self, other: &Self) -> Self {
+ Self::from_cols(
+ self.x_axis().sub(*other.x_axis()),
+ self.y_axis().sub(*other.y_axis()),
+ )
+ }
+}
+
+/// 2x2 matrix trait for float types of T
+pub trait FloatMatrix2x2<T: FloatEx, V2: FloatVector2<T>>: Matrix2x2<T, V2> {
+ #[inline]
+ fn abs_diff_eq(&self, other: &Self, max_abs_diff: T) -> bool
+ where
+ <V2 as Vector<T>>::Mask: MaskVector2,
+ {
+ self.x_axis().abs_diff_eq(*other.x_axis(), max_abs_diff)
+ && self.y_axis().abs_diff_eq(*other.y_axis(), max_abs_diff)
+ }
+
+ #[inline]
+ fn neg_matrix(&self) -> Self {
+ Self::from_cols(self.x_axis().neg(), self.y_axis().neg())
+ }
+
+ #[inline]
+ fn from_scale_angle(scale: V2, angle: T) -> Self {
+ let (sin, cos) = angle.sin_cos();
+ let (scale_x, scale_y) = scale.into_tuple();
+ Self::new(cos * scale_x, sin * scale_x, -sin * scale_y, cos * scale_y)
+ }
+
+ #[inline]
+ fn from_angle(angle: T) -> Self {
+ let (sin, cos) = angle.sin_cos();
+ Self::new(cos, sin, -sin, cos)
+ }
+
+ #[inline]
+ fn inverse(&self) -> Self {
+ let inv_det = {
+ let det = self.determinant();
+ glam_assert!(det != T::ZERO);
+ det.recip()
+ };
+ let x_axis = self.x_axis();
+ let y_axis = self.y_axis();
+ Self::new(
+ y_axis.y() * inv_det,
+ x_axis.y() * -inv_det,
+ y_axis.x() * -inv_det,
+ x_axis.x() * inv_det,
+ )
+ }
+}
+
+pub trait Matrix3x3<T: NumEx, V3: Vector3<T>>: Matrix<T> {
+ fn from_cols(x_axis: V3, y_axis: V3, z_axis: V3) -> Self;
+
+ fn x_axis(&self) -> &V3;
+ fn y_axis(&self) -> &V3;
+ fn z_axis(&self) -> &V3;
+
+ #[rustfmt::skip]
+ #[inline(always)]
+ fn from_cols_array(m: &[T; 9]) -> Self {
+ Self::from_cols(
+ V3::new(m[0], m[1], m[2]),
+ V3::new(m[3], m[4], m[5]),
+ V3::new(m[6], m[7], m[8]))
+ }
+
+ #[rustfmt::skip]
+ #[inline(always)]
+ fn to_cols_array(&self) -> [T; 9] {
+ let x_axis = self.x_axis();
+ let y_axis = self.y_axis();
+ let z_axis = self.z_axis();
+ [
+ x_axis.x(), x_axis.y(), x_axis.z(),
+ y_axis.x(), y_axis.y(), y_axis.z(),
+ z_axis.x(), z_axis.y(), z_axis.z(),
+ ]
+ }
+
+ #[inline(always)]
+ fn from_cols_array_2d(m: &[[T; 3]; 3]) -> Self {
+ Self::from_cols(
+ V3::from_array(m[0]),
+ V3::from_array(m[1]),
+ V3::from_array(m[2]),
+ )
+ }
+
+ #[inline(always)]
+ fn to_cols_array_2d(&self) -> [[T; 3]; 3] {
+ [
+ self.x_axis().into_array(),
+ self.y_axis().into_array(),
+ self.z_axis().into_array(),
+ ]
+ }
+
+ #[inline(always)]
+ fn from_cols_slice(m: &[T]) -> Self {
+ Self::from_cols(
+ V3::new(m[0], m[1], m[2]),
+ V3::new(m[3], m[4], m[5]),
+ V3::new(m[6], m[7], m[8]),
+ )
+ }
+
+ #[inline(always)]
+ fn write_cols_to_slice(&self, slice: &mut [T]) {
+ let x_axis = self.x_axis();
+ let y_axis = self.y_axis();
+ let z_axis = self.z_axis();
+ slice[0] = x_axis.x();
+ slice[1] = x_axis.y();
+ slice[2] = x_axis.z();
+ slice[3] = y_axis.x();
+ slice[4] = y_axis.y();
+ slice[5] = y_axis.z();
+ slice[6] = z_axis.x();
+ slice[7] = z_axis.y();
+ slice[8] = z_axis.z();
+ }
+
+ #[rustfmt::skip]
+ #[inline(always)]
+ fn from_diagonal(diagonal: XYZ<T>) -> Self {
+ Self::from_cols(
+ V3::new(diagonal.x, T::ZERO, T::ZERO),
+ V3::new(T::ZERO, diagonal.y, T::ZERO),
+ V3::new(T::ZERO, T::ZERO, diagonal.z),
+ )
+ }
+
+ #[inline(always)]
+ fn from_scale(scale: XY<T>) -> Self {
+ // Do not panic as long as any component is non-zero
+ glam_assert!(scale.cmpne(XY::<T>::ZERO).any());
+ Self::from_cols(
+ V3::new(scale.x, T::ZERO, T::ZERO),
+ V3::new(T::ZERO, scale.y, T::ZERO),
+ V3::Z,
+ )
+ }
+
+ #[inline(always)]
+ fn from_translation(translation: XY<T>) -> Self {
+ Self::from_cols(V3::X, V3::Y, V3::new(translation.x, translation.y, T::ONE))
+ }
+
+ #[inline]
+ fn determinant(&self) -> T {
+ self.z_axis().dot(self.x_axis().cross(*self.y_axis()))
+ }
+
+ #[inline]
+ fn transpose(&self) -> Self {
+ let x_axis = self.x_axis();
+ let y_axis = self.y_axis();
+ let z_axis = self.z_axis();
+ Self::from_cols(
+ V3::new(x_axis.x(), y_axis.x(), z_axis.x()),
+ V3::new(x_axis.y(), y_axis.y(), z_axis.y()),
+ V3::new(x_axis.z(), y_axis.z(), z_axis.z()),
+ )
+ }
+
+ #[inline]
+ fn mul_vector(&self, other: V3) -> V3 {
+ let mut res = self.x_axis().mul(other.splat_x());
+ res = res.add(self.y_axis().mul(other.splat_y()));
+ res = res.add(self.z_axis().mul(other.splat_z()));
+ res
+ }
+
+ #[inline]
+ fn mul_matrix(&self, other: &Self) -> Self {
+ Self::from_cols(
+ self.mul_vector(*other.x_axis()),
+ self.mul_vector(*other.y_axis()),
+ self.mul_vector(*other.z_axis()),
+ )
+ }
+
+ #[inline]
+ fn mul_scalar(&self, other: T) -> Self {
+ Self::from_cols(
+ self.x_axis().mul_scalar(other),
+ self.y_axis().mul_scalar(other),
+ self.z_axis().mul_scalar(other),
+ )
+ }
+
+ #[inline]
+ fn add_matrix(&self, other: &Self) -> Self {
+ Self::from_cols(
+ self.x_axis().add(*other.x_axis()),
+ self.y_axis().add(*other.y_axis()),
+ self.z_axis().add(*other.z_axis()),
+ )
+ }
+
+ #[inline]
+ fn sub_matrix(&self, other: &Self) -> Self {
+ Self::from_cols(
+ self.x_axis().sub(*other.x_axis()),
+ self.y_axis().sub(*other.y_axis()),
+ self.z_axis().sub(*other.z_axis()),
+ )
+ }
+}
+
+pub trait FloatMatrix3x3<T: FloatEx, V3: FloatVector3<T>>: Matrix3x3<T, V3> {
+ #[inline]
+ fn abs_diff_eq(&self, other: &Self, max_abs_diff: T) -> bool
+ where
+ <V3 as Vector<T>>::Mask: MaskVector3,
+ {
+ self.x_axis().abs_diff_eq(*other.x_axis(), max_abs_diff)
+ && self.y_axis().abs_diff_eq(*other.y_axis(), max_abs_diff)
+ && self.z_axis().abs_diff_eq(*other.z_axis(), max_abs_diff)
+ }
+
+ #[inline]
+ fn neg_matrix(&self) -> Self {
+ Self::from_cols(
+ self.x_axis().neg(),
+ self.y_axis().neg(),
+ self.z_axis().neg(),
+ )
+ }
+
+ #[inline]
+ fn from_angle(angle: T) -> Self {
+ let (sin, cos) = angle.sin_cos();
+ Self::from_cols(
+ V3::new(cos, sin, T::ZERO),
+ V3::new(-sin, cos, T::ZERO),
+ V3::Z,
+ )
+ }
+ #[inline]
+ fn from_scale_angle_translation(scale: XY<T>, angle: T, translation: XY<T>) -> Self {
+ let (sin, cos) = angle.sin_cos();
+ Self::from_cols(
+ V3::new(cos * scale.x, sin * scale.x, T::ZERO),
+ V3::new(-sin * scale.y, cos * scale.y, T::ZERO),
+ V3::new(translation.x, translation.y, T::ONE),
+ )
+ }
+
+ #[inline]
+ fn from_axis_angle(axis: XYZ<T>, angle: T) -> Self {
+ glam_assert!(axis.is_normalized());
+ let (sin, cos) = angle.sin_cos();
+ let (xsin, ysin, zsin) = axis.mul_scalar(sin).into_tuple();
+ let (x, y, z) = axis.into_tuple();
+ let (x2, y2, z2) = axis.mul(axis).into_tuple();
+ let omc = T::ONE - cos;
+ let xyomc = x * y * omc;
+ let xzomc = x * z * omc;
+ let yzomc = y * z * omc;
+ Self::from_cols(
+ V3::new(x2 * omc + cos, xyomc + zsin, xzomc - ysin),
+ V3::new(xyomc - zsin, y2 * omc + cos, yzomc + xsin),
+ V3::new(xzomc + ysin, yzomc - xsin, z2 * omc + cos),
+ )
+ }
+
+ #[inline]
+ fn from_quaternion(rotation: XYZW<T>) -> Self {
+ glam_assert!(rotation.is_normalized());
+ let x2 = rotation.x + rotation.x;
+ let y2 = rotation.y + rotation.y;
+ let z2 = rotation.z + rotation.z;
+ let xx = rotation.x * x2;
+ let xy = rotation.x * y2;
+ let xz = rotation.x * z2;
+ let yy = rotation.y * y2;
+ let yz = rotation.y * z2;
+ let zz = rotation.z * z2;
+ let wx = rotation.w * x2;
+ let wy = rotation.w * y2;
+ let wz = rotation.w * z2;
+
+ Self::from_cols(
+ V3::new(T::ONE - (yy + zz), xy + wz, xz - wy),
+ V3::new(xy - wz, T::ONE - (xx + zz), yz + wx),
+ V3::new(xz + wy, yz - wx, T::ONE - (xx + yy)),
+ )
+ }
+
+ #[inline]
+ fn from_rotation_x(angle: T) -> Self {
+ let (sina, cosa) = angle.sin_cos();
+ Self::from_cols(
+ V3::X,
+ V3::new(T::ZERO, cosa, sina),
+ V3::new(T::ZERO, -sina, cosa),
+ )
+ }
+
+ #[inline]
+ fn from_rotation_y(angle: T) -> Self {
+ let (sina, cosa) = angle.sin_cos();
+ Self::from_cols(
+ V3::new(cosa, T::ZERO, -sina),
+ V3::Y,
+ V3::new(sina, T::ZERO, cosa),
+ )
+ }
+
+ #[inline]
+ fn from_rotation_z(angle: T) -> Self {
+ let (sina, cosa) = angle.sin_cos();
+ Self::from_cols(
+ V3::new(cosa, sina, T::ZERO),
+ V3::new(-sina, cosa, T::ZERO),
+ V3::Z,
+ )
+ }
+
+ fn transform_point2(&self, other: XY<T>) -> XY<T>;
+ fn transform_vector2(&self, other: XY<T>) -> XY<T>;
+
+ #[inline]
+ fn inverse(&self) -> Self
+ where
+ <V3 as Vector<T>>::Mask: MaskVector3,
+ {
+ let x_axis = self.x_axis();
+ let y_axis = self.y_axis();
+ let z_axis = self.z_axis();
+ let tmp0 = y_axis.cross(*z_axis);
+ let tmp1 = z_axis.cross(*x_axis);
+ let tmp2 = x_axis.cross(*y_axis);
+ let det = z_axis.dot_into_vec(tmp2);
+ glam_assert!(det.cmpne(V3::ZERO).all());
+ let inv_det = det.recip();
+ // TODO: Work out if it's possible to get rid of the transpose
+ Self::from_cols(tmp0.mul(inv_det), tmp1.mul(inv_det), tmp2.mul(inv_det)).transpose()
+ }
+
+ #[inline]
+ fn is_finite(&self) -> bool {
+ self.x_axis().is_finite() && self.y_axis().is_finite() && self.z_axis().is_finite()
+ }
+
+ #[inline]
+ fn is_nan(&self) -> bool {
+ self.x_axis().is_nan() || self.y_axis().is_nan() || self.z_axis().is_nan()
+ }
+}
+
+pub trait Matrix4x4<T: NumEx, V4: Vector4<T>>: Matrix<T> {
+ fn from_cols(x_axis: V4, y_axis: V4, z_axis: V4, w_axis: V4) -> Self;
+
+ fn x_axis(&self) -> &V4;
+ fn y_axis(&self) -> &V4;
+ fn z_axis(&self) -> &V4;
+ fn w_axis(&self) -> &V4;
+
+ #[rustfmt::skip]
+ #[inline(always)]
+ fn from_cols_array(m: &[T; 16]) -> Self {
+ Self::from_cols(
+ V4::new( m[0], m[1], m[2], m[3]),
+ V4::new( m[4], m[5], m[6], m[7]),
+ V4::new( m[8], m[9], m[10], m[11]),
+ V4::new(m[12], m[13], m[14], m[15]))
+ }
+
+ #[rustfmt::skip]
+ #[inline(always)]
+ fn to_cols_array(&self) -> [T; 16] {
+ let x_axis = self.x_axis();
+ let y_axis = self.y_axis();
+ let z_axis = self.z_axis();
+ let w_axis = self.w_axis();
+ [
+ x_axis.x(), x_axis.y(), x_axis.z(), x_axis.w(),
+ y_axis.x(), y_axis.y(), y_axis.z(), y_axis.w(),
+ z_axis.x(), z_axis.y(), z_axis.z(), z_axis.w(),
+ w_axis.x(), w_axis.y(), w_axis.z(), w_axis.w(),
+ ]
+ }
+
+ #[inline(always)]
+ fn from_cols_array_2d(m: &[[T; 4]; 4]) -> Self {
+ Self::from_cols(
+ Vector4::from_array(m[0]),
+ Vector4::from_array(m[1]),
+ Vector4::from_array(m[2]),
+ Vector4::from_array(m[3]),
+ )
+ }
+
+ #[inline(always)]
+ fn to_cols_array_2d(&self) -> [[T; 4]; 4] {
+ [
+ self.x_axis().into_array(),
+ self.y_axis().into_array(),
+ self.z_axis().into_array(),
+ self.w_axis().into_array(),
+ ]
+ }
+
+ #[rustfmt::skip]
+ #[inline(always)]
+ fn from_cols_slice(m: &[T]) -> Self {
+ Self::from_cols(
+ V4::new( m[0], m[1], m[2], m[3]),
+ V4::new( m[4], m[5], m[6], m[7]),
+ V4::new( m[8], m[9], m[10], m[11]),
+ V4::new(m[12], m[13], m[14], m[15]))
+ }
+
+ #[inline(always)]
+ fn write_cols_to_slice(&self, slice: &mut [T]) {
+ let x_axis = self.x_axis();
+ let y_axis = self.y_axis();
+ let z_axis = self.z_axis();
+ let w_axis = self.w_axis();
+ slice[0] = x_axis.x();
+ slice[1] = x_axis.y();
+ slice[2] = x_axis.z();
+ slice[3] = x_axis.w();
+
+ slice[4] = y_axis.x();
+ slice[5] = y_axis.y();
+ slice[6] = y_axis.z();
+ slice[7] = y_axis.w();
+
+ slice[8] = z_axis.x();
+ slice[9] = z_axis.y();
+ slice[10] = z_axis.z();
+ slice[11] = z_axis.w();
+
+ slice[12] = w_axis.x();
+ slice[13] = w_axis.y();
+ slice[14] = w_axis.z();
+ slice[15] = w_axis.w();
+ }
+
+ #[inline(always)]
+ fn from_diagonal(diagonal: XYZW<T>) -> Self {
+ Self::from_cols(
+ V4::new(diagonal.x, T::ZERO, T::ZERO, T::ZERO),
+ V4::new(T::ZERO, diagonal.y, T::ZERO, T::ZERO),
+ V4::new(T::ZERO, T::ZERO, diagonal.z, T::ZERO),
+ V4::new(T::ZERO, T::ZERO, T::ZERO, diagonal.w),
+ )
+ }
+
+ #[inline(always)]
+ fn from_scale(scale: XYZ<T>) -> Self {
+ // Do not panic as long as any component is non-zero
+ glam_assert!(scale.cmpne(XYZ::<T>::ZERO).any());
+ Self::from_cols(
+ V4::new(scale.x, T::ZERO, T::ZERO, T::ZERO),
+ V4::new(T::ZERO, scale.y, T::ZERO, T::ZERO),
+ V4::new(T::ZERO, T::ZERO, scale.z, T::ZERO),
+ V4::W,
+ )
+ }
+
+ #[inline(always)]
+ fn from_translation(translation: XYZ<T>) -> Self {
+ Self::from_cols(
+ V4::X,
+ V4::Y,
+ V4::Z,
+ V4::new(translation.x, translation.y, translation.z, T::ONE),
+ )
+ }
+
+ #[inline]
+ fn determinant(&self) -> T {
+ let (m00, m01, m02, m03) = self.x_axis().into_tuple();
+ let (m10, m11, m12, m13) = self.y_axis().into_tuple();
+ let (m20, m21, m22, m23) = self.z_axis().into_tuple();
+ let (m30, m31, m32, m33) = self.w_axis().into_tuple();
+
+ let a2323 = m22 * m33 - m23 * m32;
+ let a1323 = m21 * m33 - m23 * m31;
+ let a1223 = m21 * m32 - m22 * m31;
+ let a0323 = m20 * m33 - m23 * m30;
+ let a0223 = m20 * m32 - m22 * m30;
+ let a0123 = m20 * m31 - m21 * m30;
+
+ m00 * (m11 * a2323 - m12 * a1323 + m13 * a1223)
+ - m01 * (m10 * a2323 - m12 * a0323 + m13 * a0223)
+ + m02 * (m10 * a1323 - m11 * a0323 + m13 * a0123)
+ - m03 * (m10 * a1223 - m11 * a0223 + m12 * a0123)
+ }
+
+ #[inline]
+ fn transpose(&self) -> Self {
+ let (m00, m01, m02, m03) = self.x_axis().into_tuple();
+ let (m10, m11, m12, m13) = self.y_axis().into_tuple();
+ let (m20, m21, m22, m23) = self.z_axis().into_tuple();
+ let (m30, m31, m32, m33) = self.w_axis().into_tuple();
+
+ Self::from_cols(
+ V4::new(m00, m10, m20, m30),
+ V4::new(m01, m11, m21, m31),
+ V4::new(m02, m12, m22, m32),
+ V4::new(m03, m13, m23, m33),
+ )
+ }
+
+ #[inline]
+ fn mul_vector(&self, other: V4) -> V4 {
+ let mut res = self.x_axis().mul(other.splat_x());
+ res = res.add(self.y_axis().mul(other.splat_y()));
+ res = res.add(self.z_axis().mul(other.splat_z()));
+ res = res.add(self.w_axis().mul(other.splat_w()));
+ res
+ }
+
+ #[inline]
+ fn mul_matrix(&self, other: &Self) -> Self {
+ Self::from_cols(
+ self.mul_vector(*other.x_axis()),
+ self.mul_vector(*other.y_axis()),
+ self.mul_vector(*other.z_axis()),
+ self.mul_vector(*other.w_axis()),
+ )
+ }
+
+ #[inline]
+ fn mul_scalar(&self, other: T) -> Self {
+ Self::from_cols(
+ self.x_axis().mul_scalar(other),
+ self.y_axis().mul_scalar(other),
+ self.z_axis().mul_scalar(other),
+ self.w_axis().mul_scalar(other),
+ )
+ }
+
+ #[inline]
+ fn add_matrix(&self, other: &Self) -> Self {
+ // TODO: Make Vector4::add take a ref?
+ Self::from_cols(
+ self.x_axis().add(*other.x_axis()),
+ self.y_axis().add(*other.y_axis()),
+ self.z_axis().add(*other.z_axis()),
+ self.w_axis().add(*other.w_axis()),
+ )
+ }
+
+ #[inline]
+ fn sub_matrix(&self, other: &Self) -> Self {
+ // TODO: Make Vector4::sub take a ref?
+ Self::from_cols(
+ self.x_axis().sub(*other.x_axis()),
+ self.y_axis().sub(*other.y_axis()),
+ self.z_axis().sub(*other.z_axis()),
+ self.w_axis().sub(*other.w_axis()),
+ )
+ }
+}
+
+pub trait FloatMatrix4x4<T: FloatEx, V4: FloatVector4<T> + Quaternion<T>>:
+ Matrix4x4<T, V4>
+{
+ // Vector3 represented by a SIMD type if available
+ type SIMDVector3;
+
+ #[inline]
+ fn abs_diff_eq(&self, other: &Self, max_abs_diff: T) -> bool
+ where
+ <V4 as Vector<T>>::Mask: MaskVector4,
+ {
+ self.x_axis().abs_diff_eq(*other.x_axis(), max_abs_diff)
+ && self.y_axis().abs_diff_eq(*other.y_axis(), max_abs_diff)
+ && self.z_axis().abs_diff_eq(*other.z_axis(), max_abs_diff)
+ && self.w_axis().abs_diff_eq(*other.w_axis(), max_abs_diff)
+ }
+
+ #[inline]
+ fn neg_matrix(&self) -> Self {
+ Self::from_cols(
+ self.x_axis().neg(),
+ self.y_axis().neg(),
+ self.z_axis().neg(),
+ self.w_axis().neg(),
+ )
+ }
+
+ #[inline]
+ fn quaternion_to_axes(rotation: V4) -> (V4, V4, V4) {
+ glam_assert!(rotation.is_normalized());
+ let (x, y, z, w) = rotation.into_tuple();
+ let x2 = x + x;
+ let y2 = y + y;
+ let z2 = z + z;
+ let xx = x * x2;
+ let xy = x * y2;
+ let xz = x * z2;
+ let yy = y * y2;
+ let yz = y * z2;
+ let zz = z * z2;
+ let wx = w * x2;
+ let wy = w * y2;
+ let wz = w * z2;
+
+ let x_axis = V4::new(T::ONE - (yy + zz), xy + wz, xz - wy, T::ZERO);
+ let y_axis = V4::new(xy - wz, T::ONE - (xx + zz), yz + wx, T::ZERO);
+ let z_axis = V4::new(xz + wy, yz - wx, T::ONE - (xx + yy), T::ZERO);
+ (x_axis, y_axis, z_axis)
+ }
+
+ #[inline]
+ fn from_quaternion(rotation: V4) -> Self {
+ let (x_axis, y_axis, z_axis) = Self::quaternion_to_axes(rotation);
+ Self::from_cols(x_axis, y_axis, z_axis, V4::W)
+ }
+
+ fn to_scale_quaternion_translation(&self) -> (XYZ<T>, V4, XYZ<T>) {
+ let det = self.determinant();
+ glam_assert!(det != T::ZERO);
+
+ let scale: XYZ<T> = Vector3::new(
+ self.x_axis().length() * det.signum(),
+ self.y_axis().length(),
+ self.z_axis().length(),
+ );
+
+ glam_assert!(scale.cmpne(XYZ::<T>::ZERO).all());
+
+ let inv_scale = scale.recip();
+
+ let rotation = Quaternion::from_rotation_axes(
+ self.x_axis().mul_scalar(inv_scale.x).into_xyz(),
+ self.y_axis().mul_scalar(inv_scale.y).into_xyz(),
+ self.z_axis().mul_scalar(inv_scale.z).into_xyz(),
+ );
+
+ let translation = self.w_axis().into_xyz();
+
+ (scale, rotation, translation)
+ }
+
+ #[inline]
+ fn from_scale_quaternion_translation(scale: XYZ<T>, rotation: V4, translation: XYZ<T>) -> Self {
+ let (x_axis, y_axis, z_axis) = Self::quaternion_to_axes(rotation);
+ Self::from_cols(
+ x_axis.mul_scalar(scale.x),
+ y_axis.mul_scalar(scale.y),
+ z_axis.mul_scalar(scale.z),
+ V4::from_xyz(translation, T::ONE),
+ )
+ }
+
+ #[inline]
+ fn from_quaternion_translation(rotation: V4, translation: XYZ<T>) -> Self {
+ let (x_axis, y_axis, z_axis) = Self::quaternion_to_axes(rotation);
+ Self::from_cols(x_axis, y_axis, z_axis, V4::from_xyz(translation, T::ONE))
+ }
+
+ #[inline]
+ fn from_axis_angle(axis: XYZ<T>, angle: T) -> Self {
+ glam_assert!(axis.is_normalized());
+ let (sin, cos) = angle.sin_cos();
+ let axis_sin = axis.mul_scalar(sin);
+ let axis_sq = axis.mul(axis);
+ let omc = T::ONE - cos;
+ let xyomc = axis.x * axis.y * omc;
+ let xzomc = axis.x * axis.z * omc;
+ let yzomc = axis.y * axis.z * omc;
+ Self::from_cols(
+ V4::new(
+ axis_sq.x * omc + cos,
+ xyomc + axis_sin.z,
+ xzomc - axis_sin.y,
+ T::ZERO,
+ ),
+ V4::new(
+ xyomc - axis_sin.z,
+ axis_sq.y * omc + cos,
+ yzomc + axis_sin.x,
+ T::ZERO,
+ ),
+ V4::new(
+ xzomc + axis_sin.y,
+ yzomc - axis_sin.x,
+ axis_sq.z * omc + cos,
+ T::ZERO,
+ ),
+ V4::W,
+ )
+ }
+
+ #[inline]
+ fn from_rotation_x(angle: T) -> Self {
+ let (sina, cosa) = angle.sin_cos();
+ Self::from_cols(
+ V4::X,
+ V4::new(T::ZERO, cosa, sina, T::ZERO),
+ V4::new(T::ZERO, -sina, cosa, T::ZERO),
+ V4::W,
+ )
+ }
+
+ #[inline]
+ fn from_rotation_y(angle: T) -> Self {
+ let (sina, cosa) = angle.sin_cos();
+ Self::from_cols(
+ V4::new(cosa, T::ZERO, -sina, T::ZERO),
+ V4::Y,
+ V4::new(sina, T::ZERO, cosa, T::ZERO),
+ V4::W,
+ )
+ }
+
+ #[inline]
+ fn from_rotation_z(angle: T) -> Self {
+ let (sina, cosa) = angle.sin_cos();
+ Self::from_cols(
+ V4::new(cosa, sina, T::ZERO, T::ZERO),
+ V4::new(-sina, cosa, T::ZERO, T::ZERO),
+ V4::Z,
+ V4::W,
+ )
+ }
+
+ #[inline]
+ fn look_to_lh(eye: XYZ<T>, dir: XYZ<T>, up: XYZ<T>) -> Self {
+ let f = dir.normalize();
+ let s = up.cross(f).normalize();
+ let u = f.cross(s);
+ Self::from_cols(
+ V4::new(s.x, u.x, f.x, T::ZERO),
+ V4::new(s.y, u.y, f.y, T::ZERO),
+ V4::new(s.z, u.z, f.z, T::ZERO),
+ V4::new(-s.dot(eye), -u.dot(eye), -f.dot(eye), T::ONE),
+ )
+ }
+
+ #[inline]
+ fn look_at_lh(eye: XYZ<T>, center: XYZ<T>, up: XYZ<T>) -> Self {
+ glam_assert!(up.is_normalized());
+ Self::look_to_lh(eye, center.sub(eye), up)
+ }
+
+ #[inline]
+ fn look_at_rh(eye: XYZ<T>, center: XYZ<T>, up: XYZ<T>) -> Self {
+ glam_assert!(up.is_normalized());
+ Self::look_to_lh(eye, eye.sub(center), up)
+ }
+
+ #[inline]
+ fn transform_point3(&self, other: XYZ<T>) -> XYZ<T> {
+ let mut res = self.x_axis().mul_scalar(other.x);
+ res = self.y_axis().mul_scalar(other.y).add(res);
+ res = self.z_axis().mul_scalar(other.z).add(res);
+ res = self.w_axis().add(res);
+ res.into_xyz()
+ }
+
+ #[inline]
+ fn transform_vector3(&self, other: XYZ<T>) -> XYZ<T> {
+ let mut res = self.x_axis().mul_scalar(other.x);
+ res = self.y_axis().mul_scalar(other.y).add(res);
+ res = self.z_axis().mul_scalar(other.z).add(res);
+ res.into_xyz()
+ }
+
+ #[inline]
+ fn project_point3(&self, other: XYZ<T>) -> XYZ<T> {
+ let mut res = self.x_axis().mul_scalar(other.x);
+ res = self.y_axis().mul_scalar(other.y).add(res);
+ res = self.z_axis().mul_scalar(other.z).add(res);
+ res = self.w_axis().add(res);
+ res = res.mul(res.splat_w().recip());
+ res.into_xyz()
+ }
+
+ fn transform_float4_as_point3(&self, other: Self::SIMDVector3) -> Self::SIMDVector3;
+ fn transform_float4_as_vector3(&self, other: Self::SIMDVector3) -> Self::SIMDVector3;
+ fn project_float4_as_point3(&self, other: Self::SIMDVector3) -> Self::SIMDVector3;
+
+ fn inverse(&self) -> Self {
+ let (m00, m01, m02, m03) = self.x_axis().into_tuple();
+ let (m10, m11, m12, m13) = self.y_axis().into_tuple();
+ let (m20, m21, m22, m23) = self.z_axis().into_tuple();
+ let (m30, m31, m32, m33) = self.w_axis().into_tuple();
+
+ let coef00 = m22 * m33 - m32 * m23;
+ let coef02 = m12 * m33 - m32 * m13;
+ let coef03 = m12 * m23 - m22 * m13;
+
+ let coef04 = m21 * m33 - m31 * m23;
+ let coef06 = m11 * m33 - m31 * m13;
+ let coef07 = m11 * m23 - m21 * m13;
+
+ let coef08 = m21 * m32 - m31 * m22;
+ let coef10 = m11 * m32 - m31 * m12;
+ let coef11 = m11 * m22 - m21 * m12;
+
+ let coef12 = m20 * m33 - m30 * m23;
+ let coef14 = m10 * m33 - m30 * m13;
+ let coef15 = m10 * m23 - m20 * m13;
+
+ let coef16 = m20 * m32 - m30 * m22;
+ let coef18 = m10 * m32 - m30 * m12;
+ let coef19 = m10 * m22 - m20 * m12;
+
+ let coef20 = m20 * m31 - m30 * m21;
+ let coef22 = m10 * m31 - m30 * m11;
+ let coef23 = m10 * m21 - m20 * m11;
+
+ let fac0 = V4::new(coef00, coef00, coef02, coef03);
+ let fac1 = V4::new(coef04, coef04, coef06, coef07);
+ let fac2 = V4::new(coef08, coef08, coef10, coef11);
+ let fac3 = V4::new(coef12, coef12, coef14, coef15);
+ let fac4 = V4::new(coef16, coef16, coef18, coef19);
+ let fac5 = V4::new(coef20, coef20, coef22, coef23);
+
+ let vec0 = V4::new(m10, m00, m00, m00);
+ let vec1 = V4::new(m11, m01, m01, m01);
+ let vec2 = V4::new(m12, m02, m02, m02);
+ let vec3 = V4::new(m13, m03, m03, m03);
+
+ let inv0 = vec1.mul(fac0).sub(vec2.mul(fac1)).add(vec3.mul(fac2));
+ let inv1 = vec0.mul(fac0).sub(vec2.mul(fac3)).add(vec3.mul(fac4));
+ let inv2 = vec0.mul(fac1).sub(vec1.mul(fac3)).add(vec3.mul(fac5));
+ let inv3 = vec0.mul(fac2).sub(vec1.mul(fac4)).add(vec2.mul(fac5));
+
+ let sign_a = Vector4::new(T::ONE, -T::ONE, T::ONE, -T::ONE);
+ let sign_b = Vector4::new(-T::ONE, T::ONE, -T::ONE, T::ONE);
+
+ let inverse = Self::from_cols(
+ inv0.mul(sign_a),
+ inv1.mul(sign_b),
+ inv2.mul(sign_a),
+ inv3.mul(sign_b),
+ );
+
+ let col0 = V4::new(
+ inverse.x_axis().x(),
+ inverse.y_axis().x(),
+ inverse.z_axis().x(),
+ inverse.w_axis().x(),
+ );
+
+ let dot0 = self.x_axis().mul(col0);
+ let dot1 = dot0.x() + dot0.y() + dot0.z() + dot0.w();
+
+ glam_assert!(dot1 != T::ZERO);
+
+ let rcp_det = dot1.recip();
+ inverse.mul_scalar(rcp_det)
+ }
+}
diff --git a/src/core/traits/mod.rs b/src/core/traits/mod.rs
new file mode 100644
index 0000000..917ae32
--- /dev/null
+++ b/src/core/traits/mod.rs
@@ -0,0 +1,5 @@
+pub mod matrix;
+pub mod projection;
+pub mod quaternion;
+pub mod scalar;
+pub mod vector;
diff --git a/src/core/traits/projection.rs b/src/core/traits/projection.rs
new file mode 100644
index 0000000..bc90b56
--- /dev/null
+++ b/src/core/traits/projection.rs
@@ -0,0 +1,164 @@
+use crate::core::traits::{
+ matrix::FloatMatrix4x4, quaternion::Quaternion, scalar::FloatEx, vector::*,
+};
+
+pub trait ProjectionMatrix<T: FloatEx, V4: FloatVector4<T> + Quaternion<T>>:
+ FloatMatrix4x4<T, V4>
+{
+ /// Creates a right-handed perspective projection matrix with [-1,1] depth range.
+ /// This is the same as the OpenGL [`gluPerspective`] function.
+ /// [`gluPerspective`]: <https://www.khronos.org/registry/OpenGL-Refpages/gl2.1/xhtml/gluPerspective.xml>
+ fn perspective_rh_gl(fov_y_radians: T, aspect_ratio: T, z_near: T, z_far: T) -> Self {
+ let inv_length = T::ONE / (z_near - z_far);
+ let f = T::ONE / (T::HALF * fov_y_radians).tan();
+ let a = f / aspect_ratio;
+ let b = (z_near + z_far) * inv_length;
+ let c = (T::TWO * z_near * z_far) * inv_length;
+ Self::from_cols(
+ V4::new(a, T::ZERO, T::ZERO, T::ZERO),
+ V4::new(T::ZERO, f, T::ZERO, T::ZERO),
+ V4::new(T::ZERO, T::ZERO, b, -T::ONE),
+ V4::new(T::ZERO, T::ZERO, c, T::ZERO),
+ )
+ }
+
+ /// Creates a left-handed perspective projection matrix with [0,1] depth range.
+ fn perspective_lh(fov_y_radians: T, aspect_ratio: T, z_near: T, z_far: T) -> Self {
+ glam_assert!(z_near > T::ZERO && z_far > T::ZERO);
+ let (sin_fov, cos_fov) = (T::HALF * fov_y_radians).sin_cos();
+ let h = cos_fov / sin_fov;
+ let w = h / aspect_ratio;
+ let r = z_far / (z_far - z_near);
+ Self::from_cols(
+ V4::new(w, T::ZERO, T::ZERO, T::ZERO),
+ V4::new(T::ZERO, h, T::ZERO, T::ZERO),
+ V4::new(T::ZERO, T::ZERO, r, T::ONE),
+ V4::new(T::ZERO, T::ZERO, -r * z_near, T::ZERO),
+ )
+ }
+
+ /// Creates a right-handed perspective projection matrix with [0,1] depth range.
+ fn perspective_rh(fov_y_radians: T, aspect_ratio: T, z_near: T, z_far: T) -> Self {
+ glam_assert!(z_near > T::ZERO && z_far > T::ZERO);
+ let (sin_fov, cos_fov) = (T::HALF * fov_y_radians).sin_cos();
+ let h = cos_fov / sin_fov;
+ let w = h / aspect_ratio;
+ let r = z_far / (z_near - z_far);
+ Self::from_cols(
+ V4::new(w, T::ZERO, T::ZERO, T::ZERO),
+ V4::new(T::ZERO, h, T::ZERO, T::ZERO),
+ V4::new(T::ZERO, T::ZERO, r, -T::ONE),
+ V4::new(T::ZERO, T::ZERO, r * z_near, T::ZERO),
+ )
+ }
+
+ /// Creates an infinite left-handed perspective projection matrix with [0,1] depth range.
+ fn perspective_infinite_lh(fov_y_radians: T, aspect_ratio: T, z_near: T) -> Self {
+ glam_assert!(z_near > T::ZERO);
+ let (sin_fov, cos_fov) = (T::HALF * fov_y_radians).sin_cos();
+ let h = cos_fov / sin_fov;
+ let w = h / aspect_ratio;
+ Self::from_cols(
+ V4::new(w, T::ZERO, T::ZERO, T::ZERO),
+ V4::new(T::ZERO, h, T::ZERO, T::ZERO),
+ V4::new(T::ZERO, T::ZERO, T::ONE, T::ONE),
+ V4::new(T::ZERO, T::ZERO, -z_near, T::ZERO),
+ )
+ }
+
+ /// Creates an infinite left-handed perspective projection matrix with [0,1] depth range.
+ fn perspective_infinite_reverse_lh(fov_y_radians: T, aspect_ratio: T, z_near: T) -> Self {
+ glam_assert!(z_near > T::ZERO);
+ let (sin_fov, cos_fov) = (T::HALF * fov_y_radians).sin_cos();
+ let h = cos_fov / sin_fov;
+ let w = h / aspect_ratio;
+ Self::from_cols(
+ V4::new(w, T::ZERO, T::ZERO, T::ZERO),
+ V4::new(T::ZERO, h, T::ZERO, T::ZERO),
+ V4::new(T::ZERO, T::ZERO, T::ZERO, T::ONE),
+ V4::new(T::ZERO, T::ZERO, z_near, T::ZERO),
+ )
+ }
+
+ /// Creates an infinite right-handed perspective projection matrix with
+ /// [0,1] depth range.
+ fn perspective_infinite_rh(fov_y_radians: T, aspect_ratio: T, z_near: T) -> Self {
+ glam_assert!(z_near > T::ZERO);
+ let f = T::ONE / (T::HALF * fov_y_radians).tan();
+ Self::from_cols(
+ V4::new(f / aspect_ratio, T::ZERO, T::ZERO, T::ZERO),
+ V4::new(T::ZERO, f, T::ZERO, T::ZERO),
+ V4::new(T::ZERO, T::ZERO, -T::ONE, -T::ONE),
+ V4::new(T::ZERO, T::ZERO, -z_near, T::ZERO),
+ )
+ }
+
+ /// Creates an infinite reverse right-handed perspective projection matrix
+ /// with [0,1] depth range.
+ fn perspective_infinite_reverse_rh(fov_y_radians: T, aspect_ratio: T, z_near: T) -> Self {
+ glam_assert!(z_near > T::ZERO);
+ let f = T::ONE / (T::HALF * fov_y_radians).tan();
+ Self::from_cols(
+ V4::new(f / aspect_ratio, T::ZERO, T::ZERO, T::ZERO),
+ V4::new(T::ZERO, f, T::ZERO, T::ZERO),
+ V4::new(T::ZERO, T::ZERO, T::ZERO, -T::ONE),
+ V4::new(T::ZERO, T::ZERO, z_near, T::ZERO),
+ )
+ }
+
+ /// Creates a right-handed orthographic projection matrix with [-1,1] depth
+ /// range. This is the same as the OpenGL [`glOrtho`] function in OpenGL.
+ /// See
+ /// [`glOrtho`]: <https://www.khronos.org/registry/OpenGL-Refpages/gl2.1/xhtml/glOrtho.xml>
+ fn orthographic_rh_gl(left: T, right: T, bottom: T, top: T, near: T, far: T) -> Self {
+ let a = T::TWO / (right - left);
+ let b = T::TWO / (top - bottom);
+ let c = -T::TWO / (far - near);
+ let tx = -(right + left) / (right - left);
+ let ty = -(top + bottom) / (top - bottom);
+ let tz = -(far + near) / (far - near);
+
+ Self::from_cols(
+ V4::new(a, T::ZERO, T::ZERO, T::ZERO),
+ V4::new(T::ZERO, b, T::ZERO, T::ZERO),
+ V4::new(T::ZERO, T::ZERO, c, T::ZERO),
+ V4::new(tx, ty, tz, T::ONE),
+ )
+ }
+
+ /// Creates a left-handed orthographic projection matrix with [0,1] depth range.
+ fn orthographic_lh(left: T, right: T, bottom: T, top: T, near: T, far: T) -> Self {
+ let rcp_width = T::ONE / (right - left);
+ let rcp_height = T::ONE / (top - bottom);
+ let r = T::ONE / (far - near);
+ Self::from_cols(
+ V4::new(rcp_width + rcp_width, T::ZERO, T::ZERO, T::ZERO),
+ V4::new(T::ZERO, rcp_height + rcp_height, T::ZERO, T::ZERO),
+ V4::new(T::ZERO, T::ZERO, r, T::ZERO),
+ V4::new(
+ -(left + right) * rcp_width,
+ -(top + bottom) * rcp_height,
+ -r * near,
+ T::ONE,
+ ),
+ )
+ }
+
+ /// Creates a right-handed orthographic projection matrix with [0,1] depth range.
+ fn orthographic_rh(left: T, right: T, bottom: T, top: T, near: T, far: T) -> Self {
+ let rcp_width = T::ONE / (right - left);
+ let rcp_height = T::ONE / (top - bottom);
+ let r = T::ONE / (near - far);
+ Self::from_cols(
+ V4::new(rcp_width + rcp_width, T::ZERO, T::ZERO, T::ZERO),
+ V4::new(T::ZERO, rcp_height + rcp_height, T::ZERO, T::ZERO),
+ V4::new(T::ZERO, T::ZERO, r, T::ZERO),
+ V4::new(
+ -(left + right) * rcp_width,
+ -(top + bottom) * rcp_height,
+ r * near,
+ T::ONE,
+ ),
+ )
+ }
+}
diff --git a/src/core/traits/quaternion.rs b/src/core/traits/quaternion.rs
new file mode 100644
index 0000000..8a3836c
--- /dev/null
+++ b/src/core/traits/quaternion.rs
@@ -0,0 +1,140 @@
+use crate::core::{
+ storage::XYZ,
+ traits::{
+ scalar::{FloatEx, NumEx},
+ vector::*,
+ },
+};
+
+pub trait Quaternion<T: FloatEx>: FloatVector4<T> {
+ type SIMDVector3;
+
+ #[inline]
+ fn from_axis_angle(axis: XYZ<T>, angle: T) -> Self {
+ glam_assert!(FloatVector3::is_normalized(axis));
+ let (s, c) = (angle * T::HALF).sin_cos();
+ let v = axis.mul_scalar(s);
+ Self::new(v.x, v.y, v.z, c)
+ }
+
+ #[inline]
+ fn from_rotation_x(angle: T) -> Self {
+ let (s, c) = (angle * T::HALF).sin_cos();
+ Self::new(s, T::ZERO, T::ZERO, c)
+ }
+
+ #[inline]
+ fn from_rotation_y(angle: T) -> Self {
+ let (s, c) = (angle * T::HALF).sin_cos();
+ Self::new(T::ZERO, s, T::ZERO, c)
+ }
+
+ #[inline]
+ fn from_rotation_z(angle: T) -> Self {
+ let (s, c) = (angle * T::HALF).sin_cos();
+ Self::new(T::ZERO, T::ZERO, s, c)
+ }
+
+ /// From the columns of a 3x3 rotation matrix.
+ #[inline]
+ fn from_rotation_axes(x_axis: XYZ<T>, y_axis: XYZ<T>, z_axis: XYZ<T>) -> Self {
+ // Based on https://github.com/microsoft/DirectXMath `XM$quaternionRotationMatrix`
+ // TODO: sse2 version
+ let (m00, m01, m02) = x_axis.into_tuple();
+ let (m10, m11, m12) = y_axis.into_tuple();
+ let (m20, m21, m22) = z_axis.into_tuple();
+ if m22 <= T::ZERO {
+ // x^2 + y^2 >= z^2 + w^2
+ let dif10 = m11 - m00;
+ let omm22 = T::ONE - m22;
+ if dif10 <= T::ZERO {
+ // x^2 >= y^2
+ let four_xsq = omm22 - dif10;
+ let inv4x = T::HALF / four_xsq.sqrt();
+ Self::new(
+ four_xsq * inv4x,
+ (m01 + m10) * inv4x,
+ (m02 + m20) * inv4x,
+ (m12 - m21) * inv4x,
+ )
+ } else {
+ // y^2 >= x^2
+ let four_ysq = omm22 + dif10;
+ let inv4y = T::HALF / four_ysq.sqrt();
+ Self::new(
+ (m01 + m10) * inv4y,
+ four_ysq * inv4y,
+ (m12 + m21) * inv4y,
+ (m20 - m02) * inv4y,
+ )
+ }
+ } else {
+ // z^2 + w^2 >= x^2 + y^2
+ let sum10 = m11 + m00;
+ let opm22 = T::ONE + m22;
+ if sum10 <= T::ZERO {
+ // z^2 >= w^2
+ let four_zsq = opm22 - sum10;
+ let inv4z = T::HALF / four_zsq.sqrt();
+ Self::new(
+ (m02 + m20) * inv4z,
+ (m12 + m21) * inv4z,
+ four_zsq * inv4z,
+ (m01 - m10) * inv4z,
+ )
+ } else {
+ // w^2 >= z^2
+ let four_wsq = opm22 + sum10;
+ let inv4w = T::HALF / four_wsq.sqrt();
+ Self::new(
+ (m12 - m21) * inv4w,
+ (m20 - m02) * inv4w,
+ (m01 - m10) * inv4w,
+ four_wsq * inv4w,
+ )
+ }
+ }
+ }
+
+ fn to_axis_angle(self) -> (XYZ<T>, T) {
+ // const EPSILON: f32 = 1.0e-8;
+ // const EPSILON_SQUARED: f32 = EPSILON * EPSILON;
+ let (x, y, z, w) = Vector4::into_tuple(self);
+ let angle = w.acos_approx() * T::TWO;
+ let scale_sq = NumEx::max(T::ONE - w * w, T::ZERO);
+ // TODO: constants for epslions?
+ if scale_sq >= T::from_f32(1.0e-8 * 1.0e-8) {
+ (XYZ { x, y, z }.mul_scalar(scale_sq.sqrt().recip()), angle)
+ } else {
+ (Vector3Const::X, angle)
+ }
+ }
+
+ #[inline]
+ fn is_near_identity(self) -> bool {
+ // Based on https://github.com/nfrechette/rtm `rtm::quat_near_identity`
+ let threshold_angle = T::from_f64(0.002_847_144_6);
+ // Because of floating point precision, we cannot represent very small rotations.
+ // The closest f32 to 1.0 that is not 1.0 itself yields:
+ // 0.99999994.acos() * 2.0 = 0.000690533954 rad
+ //
+ // An error threshold of 1.e-6 is used by default.
+ // (1.0 - 1.e-6).acos() * 2.0 = 0.00284714461 rad
+ // (1.0 - 1.e-7).acos() * 2.0 = 0.00097656250 rad
+ //
+ // We don't really care about the angle value itself, only if it's close to 0.
+ // This will happen whenever quat.w is close to 1.0.
+ // If the quat.w is close to -1.0, the angle will be near 2*PI which is close to
+ // a negative 0 rotation. By forcing quat.w to be positive, we'll end up with
+ // the shortest path.
+ let positive_w_angle = self.as_ref_xyzw().w.abs().acos_approx() * T::TWO;
+ positive_w_angle < threshold_angle
+ }
+
+ fn conjugate(self) -> Self;
+ fn lerp(self, end: Self, s: T) -> Self;
+ fn slerp(self, end: Self, s: T) -> Self;
+ fn mul_quaternion(self, other: Self) -> Self;
+ fn mul_vector3(self, other: XYZ<T>) -> XYZ<T>;
+ fn mul_float4_as_vector3(self, other: Self::SIMDVector3) -> Self::SIMDVector3;
+}
diff --git a/src/core/traits/scalar.rs b/src/core/traits/scalar.rs
new file mode 100644
index 0000000..bf2b135
--- /dev/null
+++ b/src/core/traits/scalar.rs
@@ -0,0 +1,434 @@
+// Wait until this bug is fix and float cmp to zero don't report a warning.
+// https://github.com/rust-lang/rust-clippy/issues/3804
+#![allow(clippy::float_cmp)]
+// num_traits is optional as it adds 70% to compile times. It is needed by no_std builds
+#[cfg(feature = "libm")]
+pub use num_traits::{Float, Num, Signed};
+
+use core::{
+ marker::Sized,
+ ops::{Add, BitAnd, BitOr, BitXor, Div, Mul, Not, Rem, Shl, Shr, Sub},
+};
+
+// Stub the necessary parts of num traits
+#[cfg(not(feature = "libm"))]
+pub trait Num: PartialEq {}
+
+#[cfg(not(feature = "libm"))]
+pub trait Signed: Sized + Num + core::ops::Neg<Output = Self> {
+ fn abs(self) -> Self;
+ fn signum(self) -> Self;
+}
+
+#[cfg(not(feature = "libm"))]
+pub trait Float: Num + Copy + core::ops::Neg<Output = Self> {
+ fn asin(self) -> Self;
+ fn acos(self) -> Self;
+ fn ceil(self) -> Self;
+ fn exp(self) -> Self;
+ fn floor(self) -> Self;
+ fn is_finite(self) -> bool;
+ fn is_nan(self) -> bool;
+ fn mul_add(self, b: Self, c: Self) -> Self;
+ fn powf(self, n: Self) -> Self;
+ fn recip(self) -> Self;
+ fn round(self) -> Self;
+ fn sqrt(self) -> Self;
+ fn sin(self) -> Self;
+ fn sin_cos(self) -> (Self, Self);
+ fn tan(self) -> Self;
+}
+
+#[cfg(not(feature = "libm"))]
+macro_rules! impl_num_trait {
+ ($t:ident) => {
+ impl Num for $t {}
+ };
+}
+
+#[cfg(not(feature = "libm"))]
+macro_rules! impl_signed_trait {
+ ($t:ident) => {
+ impl_num_trait!($t);
+
+ impl Signed for $t {
+ #[inline(always)]
+ fn abs(self) -> Self {
+ $t::abs(self)
+ }
+ #[inline(always)]
+ fn signum(self) -> Self {
+ $t::signum(self)
+ }
+ }
+ };
+}
+
+#[cfg(not(feature = "libm"))]
+macro_rules! impl_float_trait {
+ ($t:ident) => {
+ impl_signed_trait!($t);
+
+ impl Float for $t {
+ #[inline(always)]
+ fn asin(self) -> Self {
+ $t::asin(self)
+ }
+ #[inline(always)]
+ fn acos(self) -> Self {
+ $t::acos(self)
+ }
+ #[inline(always)]
+ fn ceil(self) -> Self {
+ $t::ceil(self)
+ }
+ #[inline(always)]
+ fn exp(self) -> Self {
+ $t::exp(self)
+ }
+ #[inline(always)]
+ fn floor(self) -> Self {
+ $t::floor(self)
+ }
+ #[inline(always)]
+ fn is_finite(self) -> bool {
+ $t::is_finite(self)
+ }
+ #[inline(always)]
+ fn is_nan(self) -> bool {
+ $t::is_nan(self)
+ }
+ #[inline(always)]
+ fn mul_add(self, b: Self, c: Self) -> Self {
+ $t::mul_add(self, b, c)
+ }
+ #[inline(always)]
+ fn powf(self, n: Self) -> Self {
+ $t::powf(self, n)
+ }
+ #[inline(always)]
+ fn recip(self) -> Self {
+ $t::recip(self)
+ }
+ #[inline(always)]
+ fn round(self) -> Self {
+ $t::round(self)
+ }
+ #[inline(always)]
+ fn sin(self) -> Self {
+ $t::sin(self)
+ }
+ #[inline(always)]
+ fn sin_cos(self) -> (Self, Self) {
+ $t::sin_cos(self)
+ }
+ #[inline(always)]
+ fn sqrt(self) -> Self {
+ $t::sqrt(self)
+ }
+ #[inline(always)]
+ fn tan(self) -> Self {
+ $t::tan(self)
+ }
+ }
+ };
+}
+
+#[cfg(not(feature = "libm"))]
+impl_float_trait!(f32);
+#[cfg(not(feature = "libm"))]
+impl_float_trait!(f64);
+#[cfg(not(feature = "libm"))]
+impl_signed_trait!(i32);
+#[cfg(not(feature = "libm"))]
+impl_num_trait!(u32);
+
+pub trait MaskConst: Sized {
+ const MASK: [Self; 2];
+}
+
+pub trait NumConstEx: Sized {
+ const ZERO: Self;
+ const ONE: Self;
+}
+
+pub trait FloatConstEx: Sized {
+ const NEG_ONE: Self;
+ const TWO: Self;
+ const HALF: Self;
+}
+
+pub trait NanConstEx: Sized {
+ const NAN: Self;
+}
+
+pub trait NumEx:
+ Num
+ + NumConstEx
+ + Copy
+ + Clone
+ + PartialEq
+ + PartialOrd
+ + Add<Output = Self>
+ + Div<Output = Self>
+ + Mul<Output = Self>
+ + Sub<Output = Self>
+ + Rem<Output = Self>
+{
+ fn min(self, other: Self) -> Self;
+ fn max(self, other: Self) -> Self;
+}
+
+pub trait SignedEx: Signed + NumEx {}
+
+pub trait FloatEx: Float + FloatConstEx + SignedEx + NanConstEx {
+ /// Returns a very close approximation of `self.clamp(-1.0, 1.0).acos()`.
+ fn acos_approx(self) -> Self;
+ fn from_f32(f: f32) -> Self;
+ fn from_f64(f: f64) -> Self;
+}
+
+impl NumConstEx for f32 {
+ const ZERO: Self = 0.0;
+ const ONE: Self = 1.0;
+}
+
+impl NanConstEx for f32 {
+ const NAN: Self = f32::NAN;
+}
+
+impl FloatConstEx for f32 {
+ const NEG_ONE: Self = -1.0;
+ const TWO: Self = 2.0;
+ const HALF: Self = 0.5;
+}
+
+impl NumEx for f32 {
+ #[inline(always)]
+ fn min(self, other: Self) -> Self {
+ f32::min(self, other)
+ }
+ #[inline(always)]
+ fn max(self, other: Self) -> Self {
+ f32::max(self, other)
+ }
+}
+
+impl SignedEx for f32 {}
+
+impl FloatEx for f32 {
+ #[inline(always)]
+ fn from_f32(v: f32) -> Self {
+ v
+ }
+ #[inline(always)]
+ fn from_f64(v: f64) -> Self {
+ v as Self
+ }
+ #[inline(always)]
+ fn acos_approx(self) -> Self {
+ // Based on https://github.com/microsoft/DirectXMath `XMScalarAcos`
+ // Clamp input to [-1,1].
+ let nonnegative = self >= 0.0;
+ let x = self.abs();
+ let mut omx = 1.0 - x;
+ if omx < 0.0 {
+ omx = 0.0;
+ }
+ let root = omx.sqrt();
+
+ // 7-degree minimax approximation
+ #[allow(clippy::approx_constant)]
+ let mut result = ((((((-0.001_262_491_1 * x + 0.006_670_09) * x - 0.017_088_126) * x
+ + 0.030_891_88)
+ * x
+ - 0.050_174_303)
+ * x
+ + 0.088_978_99)
+ * x
+ - 0.214_598_8)
+ * x
+ + 1.570_796_3;
+ result *= root;
+
+ // acos(x) = pi - acos(-x) when x < 0
+ if nonnegative {
+ result
+ } else {
+ core::f32::consts::PI - result
+ }
+ }
+}
+
+impl NumConstEx for f64 {
+ const ZERO: Self = 0.0;
+ const ONE: Self = 1.0;
+}
+
+impl NanConstEx for f64 {
+ const NAN: Self = f64::NAN;
+}
+
+impl FloatConstEx for f64 {
+ const NEG_ONE: Self = -1.0;
+ const TWO: Self = 2.0;
+ const HALF: Self = 0.5;
+}
+
+impl NumEx for f64 {
+ #[inline(always)]
+ fn min(self, other: Self) -> Self {
+ f64::min(self, other)
+ }
+ #[inline(always)]
+ fn max(self, other: Self) -> Self {
+ f64::max(self, other)
+ }
+}
+
+impl SignedEx for f64 {}
+
+impl FloatEx for f64 {
+ #[inline(always)]
+ fn from_f32(v: f32) -> Self {
+ v as Self
+ }
+ #[inline(always)]
+ fn from_f64(v: f64) -> Self {
+ v
+ }
+ #[inline(always)]
+ fn acos_approx(self) -> Self {
+ f64::acos(self.max(-1.0).min(1.0))
+ }
+}
+
+impl NumConstEx for i32 {
+ const ZERO: Self = 0;
+ const ONE: Self = 1;
+}
+
+impl NumEx for i32 {
+ #[inline(always)]
+ fn min(self, other: Self) -> Self {
+ core::cmp::min(self, other)
+ }
+ #[inline(always)]
+ fn max(self, other: Self) -> Self {
+ core::cmp::max(self, other)
+ }
+}
+
+impl SignedEx for i32 {}
+
+impl NumConstEx for u32 {
+ const ZERO: Self = 0;
+ const ONE: Self = 1;
+}
+
+impl NumEx for u32 {
+ #[inline(always)]
+ fn min(self, other: Self) -> Self {
+ core::cmp::min(self, other)
+ }
+ #[inline(always)]
+ fn max(self, other: Self) -> Self {
+ core::cmp::max(self, other)
+ }
+}
+
+pub trait IntegerShiftOps<Rhs>: Sized + Shl<Rhs, Output = Self> + Shr<Rhs, Output = Self> {}
+
+pub trait IntegerBitOps:
+ Sized + Not<Output = Self> + BitAnd<Output = Self> + BitOr<Output = Self> + BitXor<Output = Self>
+{
+}
+
+impl IntegerShiftOps<i8> for i32 {}
+impl IntegerShiftOps<i16> for i32 {}
+impl IntegerShiftOps<i32> for i32 {}
+impl IntegerShiftOps<u8> for i32 {}
+impl IntegerShiftOps<u16> for i32 {}
+impl IntegerShiftOps<u32> for i32 {}
+
+impl IntegerShiftOps<i8> for u32 {}
+impl IntegerShiftOps<i16> for u32 {}
+impl IntegerShiftOps<i32> for u32 {}
+impl IntegerShiftOps<u8> for u32 {}
+impl IntegerShiftOps<u16> for u32 {}
+impl IntegerShiftOps<u32> for u32 {}
+
+impl IntegerBitOps for i32 {}
+impl IntegerBitOps for u32 {}
+
+#[cfg(test)]
+macro_rules! assert_approx_eq {
+ ($a:expr, $b:expr) => {{
+ assert_approx_eq!($a, $b, core::f32::EPSILON);
+ }};
+ ($a:expr, $b:expr, $eps:expr) => {{
+ let (a, b) = (&$a, &$b);
+ let eps = $eps;
+ assert!(
+ (a - b).abs() <= eps,
+ "assertion failed: `(left !== right)` \
+ (left: `{:?}`, right: `{:?}`, expect diff: `{:?}`, real diff: `{:?}`)",
+ *a,
+ *b,
+ eps,
+ (a - b).abs()
+ );
+ }};
+}
+
+#[cfg(test)]
+macro_rules! assert_relative_eq {
+ ($a:expr, $b:expr) => {{
+ assert_relative_eq!($a, $b, core::f32::EPSILON);
+ }};
+ ($a:expr, $b:expr, $eps:expr) => {{
+ let (a, b) = (&$a, &$b);
+ let eps = $eps;
+ let diff = (a - b).abs();
+ let largest = a.abs().max(b.abs());
+ assert!(
+ diff <= largest * eps,
+ "assertion failed: `(left !== right)` \
+ (left: `{:?}`, right: `{:?}`, expect diff: `{:?}`, real diff: `{:?}`)",
+ *a,
+ *b,
+ largest * eps,
+ diff
+ );
+ }};
+}
+
+#[test]
+fn test_scalar_acos() {
+ fn test_scalar_acos_angle(a: f32) {
+ // 1e-6 is the lowest epsilon that will pass
+ assert_relative_eq!(a.acos_approx(), a.acos(), 1e-6);
+ // assert_approx_eq!(scalar_acos(a), a.acos(), 1e-6);
+ }
+
+ // test 1024 floats between -1.0 and 1.0 inclusive
+ const MAX_TESTS: u32 = 1024 / 2;
+ const SIGN: u32 = 0x80_00_00_00;
+ const PTVE_ONE: u32 = 0x3f_80_00_00; // 1.0_f32.to_bits();
+ const NGVE_ONE: u32 = SIGN | PTVE_ONE;
+ const STEP_SIZE: usize = (PTVE_ONE / MAX_TESTS) as usize;
+ for f in (SIGN..=NGVE_ONE).step_by(STEP_SIZE).map(f32::from_bits) {
+ test_scalar_acos_angle(f);
+ }
+ for f in (0..=PTVE_ONE).step_by(STEP_SIZE).map(f32::from_bits) {
+ test_scalar_acos_angle(f);
+ }
+
+ // input is clamped to -1.0..1.0
+ assert_approx_eq!(2.0_f32.acos_approx(), 0.0);
+ assert_approx_eq!((-2.0_f32).acos_approx(), core::f32::consts::PI);
+
+ // input is clamped to -1.0..1.0
+ assert_eq!(2.0_f64.acos_approx(), 0.0);
+ assert!(((-2.0_f64).acos_approx() - core::f64::consts::PI).abs() < f64::EPSILON);
+}
diff --git a/src/core/traits/vector.rs b/src/core/traits/vector.rs
new file mode 100644
index 0000000..16ff5fe
--- /dev/null
+++ b/src/core/traits/vector.rs
@@ -0,0 +1,854 @@
+use super::scalar::{FloatEx, SignedEx};
+use crate::core::storage::{XY, XYZ, XYZW};
+use core::ops::{Add, Mul, Sub};
+
+/// Mask vector constants that are independent of vector length
+pub trait MaskVectorConst: Sized {
+ const FALSE: Self;
+}
+
+/// Mask vector methods that are independent of vector dimension.
+pub trait MaskVector: MaskVectorConst {
+ fn bitand(self, other: Self) -> Self;
+ fn bitor(self, other: Self) -> Self;
+ fn not(self) -> Self;
+}
+
+/// Mask vector methods specific to 2D vectors.
+pub trait MaskVector2: MaskVector {
+ fn new(x: bool, y: bool) -> Self;
+ fn bitmask(self) -> u32;
+ fn any(self) -> bool;
+ fn all(self) -> bool;
+ fn into_bool_array(self) -> [bool; 2];
+ fn into_u32_array(self) -> [u32; 2];
+}
+
+/// Mask vector methods specific to 3D vectors.
+pub trait MaskVector3: MaskVector {
+ fn new(x: bool, y: bool, z: bool) -> Self;
+ fn bitmask(self) -> u32;
+ fn any(self) -> bool;
+ fn all(self) -> bool;
+ fn into_bool_array(self) -> [bool; 3];
+ fn into_u32_array(self) -> [u32; 3];
+}
+
+/// Mask vector methods specific to 4D vectors.
+pub trait MaskVector4: MaskVector {
+ fn new(x: bool, y: bool, z: bool, w: bool) -> Self;
+ fn bitmask(self) -> u32;
+ fn any(self) -> bool;
+ fn all(self) -> bool;
+ fn into_bool_array(self) -> [bool; 4];
+ fn into_u32_array(self) -> [u32; 4];
+}
+
+/// Vector constants that are independent of vector dimension.
+pub trait VectorConst {
+ const ZERO: Self;
+ const ONE: Self;
+}
+
+/// Vector constants specific to 2D vectors.
+pub trait Vector2Const: VectorConst {
+ const X: Self;
+ const Y: Self;
+}
+
+/// Vector constants specific to 3D vectors.
+pub trait Vector3Const: VectorConst {
+ const X: Self;
+ const Y: Self;
+ const Z: Self;
+}
+
+/// Vector constants specific to 4D vectors.
+pub trait Vector4Const: VectorConst {
+ const X: Self;
+ const Y: Self;
+ const Z: Self;
+ const W: Self;
+}
+
+/// Vector methods that are independent of vector dimension.
+///
+/// These methods typically need to be implemented for each type as while the method signature does
+/// not imply any dimensionality, the implementation does.
+pub trait Vector<T>: Sized + Copy + Clone {
+ type Mask;
+
+ fn splat(s: T) -> Self;
+
+ fn select(mask: Self::Mask, a: Self, b: Self) -> Self;
+
+ fn cmpeq(self, other: Self) -> Self::Mask;
+ fn cmpne(self, other: Self) -> Self::Mask;
+ fn cmpge(self, other: Self) -> Self::Mask;
+ fn cmpgt(self, other: Self) -> Self::Mask;
+ fn cmple(self, other: Self) -> Self::Mask;
+ fn cmplt(self, other: Self) -> Self::Mask;
+
+ fn add(self, other: Self) -> Self;
+ fn div(self, other: Self) -> Self;
+ fn mul(self, other: Self) -> Self;
+ fn rem(self, rhs: Self) -> Self;
+ fn sub(self, other: Self) -> Self;
+
+ fn scale(self, other: T) -> Self {
+ self.mul_scalar(other)
+ }
+
+ fn add_scalar(self, other: T) -> Self;
+ fn sub_scalar(self, other: T) -> Self;
+ fn mul_scalar(self, other: T) -> Self;
+ fn div_scalar(self, other: T) -> Self;
+ fn rem_scalar(self, rhs: T) -> Self;
+
+ fn min(self, other: Self) -> Self;
+ fn max(self, other: Self) -> Self;
+}
+
+/// Vector methods specific to 2D vectors.
+pub trait Vector2<T>: Vector<T> + Vector2Const
+where
+ T: Copy + Mul<Output = T> + Sub<Output = T> + Add<Output = T>,
+{
+ fn new(x: T, y: T) -> Self;
+ fn x(self) -> T;
+ fn y(self) -> T;
+
+ fn as_ref_xy(&self) -> &XY<T>;
+ fn as_mut_xy(&mut self) -> &mut XY<T>;
+
+ // min and max behave differently for float and integer types in Rust, so we can't have a
+ // default implementation here.
+ fn min_element(self) -> T;
+ fn max_element(self) -> T;
+ fn clamp(self, min: Self, max: Self) -> Self;
+
+ #[inline(always)]
+ fn splat_x(self) -> Self {
+ Self::splat(self.x())
+ }
+
+ #[inline(always)]
+ fn splat_y(self) -> Self {
+ Self::splat(self.y())
+ }
+
+ #[inline(always)]
+ fn from_slice_unaligned(slice: &[T]) -> Self {
+ Self::new(slice[0], slice[1])
+ }
+
+ #[inline(always)]
+ fn write_to_slice_unaligned(self, slice: &mut [T]) {
+ slice[0] = self.x();
+ slice[1] = self.y();
+ }
+
+ #[inline(always)]
+ fn into_xyz(self, z: T) -> XYZ<T> {
+ XYZ {
+ x: self.x(),
+ y: self.y(),
+ z,
+ }
+ }
+
+ #[inline(always)]
+ fn into_xyzw(self, z: T, w: T) -> XYZW<T> {
+ XYZW {
+ x: self.x(),
+ y: self.y(),
+ z,
+ w,
+ }
+ }
+
+ #[inline(always)]
+ fn from_array(a: [T; 2]) -> Self {
+ Self::new(a[0], a[1])
+ }
+
+ #[inline(always)]
+ fn into_array(self) -> [T; 2] {
+ [self.x(), self.y()]
+ }
+
+ #[inline(always)]
+ fn from_tuple(t: (T, T)) -> Self {
+ Self::new(t.0, t.1)
+ }
+
+ #[inline(always)]
+ fn into_tuple(self) -> (T, T) {
+ (self.x(), self.y())
+ }
+
+ #[inline]
+ fn dot(self, other: Self) -> T {
+ (self.x() * other.x()) + (self.y() * other.y())
+ }
+
+ #[inline(always)]
+ fn dot_into_vec(self, other: Self) -> Self {
+ Self::splat(self.dot(other))
+ }
+}
+
+/// Vector methods specific to 3D vectors.
+pub trait Vector3<T>: Vector<T> + Vector3Const
+where
+ T: Copy + Mul<Output = T> + Sub<Output = T> + Add<Output = T>,
+{
+ fn new(x: T, y: T, z: T) -> Self;
+ fn x(self) -> T;
+ fn y(self) -> T;
+ fn z(self) -> T;
+
+ fn as_ref_xyz(&self) -> &XYZ<T>;
+ fn as_mut_xyz(&mut self) -> &mut XYZ<T>;
+
+ fn min_element(self) -> T;
+ fn max_element(self) -> T;
+ fn clamp(self, min: Self, max: Self) -> Self;
+
+ #[inline(always)]
+ fn splat_x(self) -> Self {
+ Self::splat(self.x())
+ }
+
+ #[inline(always)]
+ fn splat_y(self) -> Self {
+ Self::splat(self.y())
+ }
+
+ #[inline(always)]
+ fn splat_z(self) -> Self {
+ Self::splat(self.z())
+ }
+
+ #[inline(always)]
+ fn from_slice_unaligned(slice: &[T]) -> Self {
+ Self::new(slice[0], slice[1], slice[2])
+ }
+
+ #[inline(always)]
+ fn write_to_slice_unaligned(self, slice: &mut [T]) {
+ slice[0] = self.x();
+ slice[1] = self.y();
+ slice[2] = self.z();
+ }
+
+ #[inline(always)]
+ fn from_xy(v2: XY<T>, z: T) -> Self {
+ Self::new(v2.x, v2.y, z)
+ }
+
+ #[inline(always)]
+ fn from_xyzw(v4: XYZW<T>) -> Self {
+ Self::new(v4.x, v4.y, v4.z)
+ }
+
+ #[inline(always)]
+ fn into_xy(self) -> XY<T> {
+ XY {
+ x: self.x(),
+ y: self.y(),
+ }
+ }
+
+ #[inline(always)]
+ fn into_xyzw(self, w: T) -> XYZW<T> {
+ XYZW {
+ x: self.x(),
+ y: self.y(),
+ z: self.z(),
+ w,
+ }
+ }
+
+ #[inline(always)]
+ fn from_array(a: [T; 3]) -> Self {
+ Self::new(a[0], a[1], a[2])
+ }
+
+ #[inline(always)]
+ fn into_array(self) -> [T; 3] {
+ [self.x(), self.y(), self.z()]
+ }
+
+ #[inline(always)]
+ fn from_tuple(t: (T, T, T)) -> Self {
+ Self::new(t.0, t.1, t.2)
+ }
+
+ #[inline(always)]
+ fn into_tuple(self) -> (T, T, T) {
+ (self.x(), self.y(), self.z())
+ }
+
+ #[inline]
+ fn dot(self, other: Self) -> T {
+ (self.x() * other.x()) + (self.y() * other.y()) + (self.z() * other.z())
+ }
+
+ #[inline(always)]
+ fn dot_into_vec(self, other: Self) -> Self {
+ Self::splat(self.dot(other))
+ }
+
+ #[inline]
+ fn cross(self, other: Self) -> Self {
+ Self::new(
+ self.y() * other.z() - other.y() * self.z(),
+ self.z() * other.x() - other.z() * self.x(),
+ self.x() * other.y() - other.x() * self.y(),
+ )
+ }
+}
+
+/// Vector methods specific to 3D vectors.
+pub trait Vector4<T>: Vector<T> + Vector4Const
+where
+ T: Copy + Mul<Output = T> + Sub<Output = T> + Add<Output = T>,
+{
+ fn new(x: T, y: T, z: T, w: T) -> Self;
+
+ fn x(self) -> T;
+ fn y(self) -> T;
+ fn z(self) -> T;
+ fn w(self) -> T;
+
+ fn as_ref_xyzw(&self) -> &XYZW<T>;
+ fn as_mut_xyzw(&mut self) -> &mut XYZW<T>;
+
+ fn min_element(self) -> T;
+ fn max_element(self) -> T;
+ fn clamp(self, min: Self, max: Self) -> Self;
+
+ #[inline(always)]
+ fn splat_x(self) -> Self {
+ Self::splat(self.x())
+ }
+
+ #[inline(always)]
+ fn splat_y(self) -> Self {
+ Self::splat(self.y())
+ }
+
+ #[inline(always)]
+ fn splat_z(self) -> Self {
+ Self::splat(self.z())
+ }
+
+ #[inline(always)]
+ fn splat_w(self) -> Self {
+ Self::splat(self.w())
+ }
+
+ #[inline(always)]
+ fn from_slice_unaligned(slice: &[T]) -> Self {
+ Self::new(slice[0], slice[1], slice[2], slice[3])
+ }
+
+ #[inline(always)]
+ fn write_to_slice_unaligned(self, slice: &mut [T]) {
+ slice[0] = self.x();
+ slice[1] = self.y();
+ slice[2] = self.z();
+ slice[3] = self.w();
+ }
+
+ #[inline(always)]
+ fn from_xy(v2: XY<T>, z: T, w: T) -> Self {
+ Self::new(v2.x, v2.y, z, w)
+ }
+
+ #[inline(always)]
+ fn from_xyz(v3: XYZ<T>, w: T) -> Self {
+ Self::new(v3.x, v3.y, v3.z, w)
+ }
+
+ #[inline(always)]
+ fn into_xy(self) -> XY<T> {
+ XY {
+ x: self.x(),
+ y: self.y(),
+ }
+ }
+
+ #[inline(always)]
+ fn into_xyz(self) -> XYZ<T> {
+ XYZ {
+ x: self.x(),
+ y: self.y(),
+ z: self.z(),
+ }
+ }
+
+ #[inline(always)]
+ fn from_array(a: [T; 4]) -> Self {
+ Self::new(a[0], a[1], a[2], a[3])
+ }
+
+ #[inline(always)]
+ fn into_array(self) -> [T; 4] {
+ [self.x(), self.y(), self.z(), self.w()]
+ }
+
+ #[inline(always)]
+ fn from_tuple(t: (T, T, T, T)) -> Self {
+ Self::new(t.0, t.1, t.2, t.3)
+ }
+
+ #[inline(always)]
+ fn into_tuple(self) -> (T, T, T, T) {
+ (self.x(), self.y(), self.z(), self.w())
+ }
+
+ #[inline]
+ fn dot(self, other: Self) -> T {
+ (self.x() * other.x())
+ + (self.y() * other.y())
+ + (self.z() * other.z())
+ + (self.w() * other.w())
+ }
+
+ #[inline(always)]
+ fn dot_into_vec(self, other: Self) -> Self {
+ Self::splat(self.dot(other))
+ }
+}
+
+/// Vector methods for vectors of signed types that are independent of vector dimension.
+///
+/// These methods typically need to be implemented for each type as while the method signature does
+/// not imply any dimensionality, the implementation does.
+pub trait SignedVector<T: SignedEx>: Vector<T> {
+ fn neg(self) -> Self;
+}
+
+/// Vector methods specific to 2D vectors of signed types.
+pub trait SignedVector2<T: SignedEx>: SignedVector<T> + Vector2<T> {
+ #[inline]
+ fn abs(self) -> Self {
+ Self::new(self.x().abs(), self.y().abs())
+ }
+
+ #[inline]
+ fn signum(self) -> Self {
+ Self::new(self.x().signum(), self.y().signum())
+ }
+
+ #[inline]
+ fn perp(self) -> Self {
+ Self::new(-self.y(), self.x())
+ }
+
+ #[inline]
+ fn perp_dot(self, other: Self) -> T {
+ (self.x() * other.y()) - (self.y() * other.x())
+ }
+}
+
+/// Vector methods specific to 3D vectors of signed types.
+pub trait SignedVector3<T: SignedEx>: SignedVector<T> + Vector3<T> {
+ #[inline]
+ fn abs(self) -> Self {
+ Self::new(self.x().abs(), self.y().abs(), self.z().abs())
+ }
+
+ #[inline]
+ fn signum(self) -> Self {
+ Self::new(self.x().signum(), self.y().signum(), self.z().signum())
+ }
+}
+
+pub trait SignedVector4<T: SignedEx>: SignedVector<T> + Vector4<T> {
+ #[inline]
+ fn abs(self) -> Self {
+ Self::new(
+ self.x().abs(),
+ self.y().abs(),
+ self.z().abs(),
+ self.w().abs(),
+ )
+ }
+
+ #[inline]
+ fn signum(self) -> Self {
+ Self::new(
+ self.x().signum(),
+ self.y().signum(),
+ self.z().signum(),
+ self.w().signum(),
+ )
+ }
+}
+
+pub trait FloatVector2<T: FloatEx>: SignedVector2<T> {
+ #[inline]
+ fn floor(self) -> Self {
+ Self::new(self.x().floor(), self.y().floor())
+ }
+
+ #[inline]
+ fn ceil(self) -> Self {
+ Self::new(self.x().ceil(), self.y().ceil())
+ }
+
+ #[inline]
+ fn round(self) -> Self {
+ Self::new(self.x().round(), self.y().round())
+ }
+
+ #[inline]
+ fn recip(self) -> Self {
+ Self::new(self.x().recip(), self.y().recip())
+ }
+
+ #[inline]
+ fn exp(self) -> Self {
+ Self::new(self.x().exp(), self.y().exp())
+ }
+
+ #[inline]
+ fn powf(self, n: T) -> Self {
+ Self::new(self.x().powf(n), self.y().powf(n))
+ }
+
+ #[inline]
+ fn is_finite(self) -> bool {
+ self.x().is_finite() && self.y().is_finite()
+ }
+
+ #[inline]
+ fn is_nan(self) -> bool {
+ self.x().is_nan() || self.y().is_nan()
+ }
+
+ #[inline]
+ fn mul_add(self, b: Self, c: Self) -> Self {
+ Self::new(
+ self.x().mul_add(b.x(), c.x()),
+ self.y().mul_add(b.y(), c.y()),
+ )
+ }
+
+ #[inline]
+ fn is_nan_mask(self) -> Self::Mask
+ where
+ <Self as Vector<T>>::Mask: MaskVector2,
+ {
+ Self::Mask::new(self.x().is_nan(), self.y().is_nan())
+ }
+
+ #[inline]
+ fn length(self) -> T {
+ self.dot(self).sqrt()
+ }
+
+ #[inline]
+ fn length_recip(self) -> T {
+ self.length().recip()
+ }
+
+ #[inline]
+ fn normalize(self) -> Self {
+ #[allow(clippy::let_and_return)]
+ let normalized = self.mul_scalar(self.length_recip());
+ glam_assert!(normalized.is_finite());
+ normalized
+ }
+
+ #[inline(always)]
+ fn length_squared(self) -> T {
+ self.dot(self)
+ }
+
+ #[inline]
+ fn is_normalized(self) -> bool {
+ // TODO: do something with epsilon
+ (self.length_squared() - T::ONE).abs() <= T::from_f64(1e-4)
+ }
+
+ #[inline]
+ fn abs_diff_eq(self, other: Self, max_abs_diff: T) -> bool
+ where
+ <Self as Vector<T>>::Mask: MaskVector2,
+ {
+ self.sub(other).abs().cmple(Self::splat(max_abs_diff)).all()
+ }
+
+ #[inline]
+ fn angle_between(self, other: Self) -> T {
+ let angle = (self.dot(other) / (self.length_squared() * other.length_squared()).sqrt())
+ .acos_approx();
+
+ if self.perp_dot(other) < T::ZERO {
+ -angle
+ } else {
+ angle
+ }
+ }
+}
+
+pub trait FloatVector3<T: FloatEx>: SignedVector3<T> {
+ #[inline]
+ fn floor(self) -> Self {
+ Self::new(self.x().floor(), self.y().floor(), self.z().floor())
+ }
+
+ #[inline]
+ fn ceil(self) -> Self {
+ Self::new(self.x().ceil(), self.y().ceil(), self.z().ceil())
+ }
+
+ #[inline]
+ fn round(self) -> Self {
+ Self::new(self.x().round(), self.y().round(), self.z().round())
+ }
+
+ #[inline]
+ fn recip(self) -> Self {
+ Self::new(self.x().recip(), self.y().recip(), self.z().recip())
+ }
+
+ #[inline]
+ fn exp(self) -> Self {
+ Self::new(self.x().exp(), self.y().exp(), self.z().exp())
+ }
+
+ #[inline]
+ fn powf(self, n: T) -> Self {
+ Self::new(self.x().powf(n), self.y().powf(n), self.z().powf(n))
+ }
+
+ #[inline]
+ fn is_finite(self) -> bool {
+ self.x().is_finite() && self.y().is_finite() && self.z().is_finite()
+ }
+
+ #[inline]
+ fn is_nan(self) -> bool {
+ self.x().is_nan() || self.y().is_nan() || self.z().is_nan()
+ }
+
+ #[inline]
+ fn mul_add(self, b: Self, c: Self) -> Self {
+ Self::new(
+ self.x().mul_add(b.x(), c.x()),
+ self.y().mul_add(b.y(), c.y()),
+ self.z().mul_add(b.z(), c.z()),
+ )
+ }
+
+ #[inline]
+ fn is_nan_mask(self) -> Self::Mask
+ where
+ <Self as Vector<T>>::Mask: MaskVector3,
+ {
+ Self::Mask::new(self.x().is_nan(), self.y().is_nan(), self.z().is_nan())
+ }
+
+ #[inline]
+ fn length(self) -> T {
+ self.dot(self).sqrt()
+ }
+
+ #[inline]
+ fn length_recip(self) -> T {
+ self.length().recip()
+ }
+
+ #[inline]
+ fn normalize(self) -> Self {
+ #[allow(clippy::let_and_return)]
+ let normalized = self.mul_scalar(self.length_recip());
+ glam_assert!(normalized.is_finite());
+ normalized
+ }
+
+ #[inline(always)]
+ fn length_squared(self) -> T {
+ self.dot(self)
+ }
+
+ #[inline]
+ fn is_normalized(self) -> bool {
+ // TODO: do something with epsilon
+ (self.length_squared() - T::ONE).abs() <= T::from_f64(1e-4)
+ }
+
+ #[inline]
+ fn abs_diff_eq(self, other: Self, max_abs_diff: T) -> bool
+ where
+ <Self as Vector<T>>::Mask: MaskVector3,
+ {
+ self.sub(other).abs().cmple(Self::splat(max_abs_diff)).all()
+ }
+
+ fn angle_between(self, other: Self) -> T {
+ self.dot(other)
+ .div(self.length_squared().mul(other.length_squared()).sqrt())
+ .acos_approx()
+ }
+}
+
+pub trait FloatVector4<T: FloatEx>: SignedVector4<T> {
+ #[inline]
+ fn floor(self) -> Self {
+ Self::new(
+ self.x().floor(),
+ self.y().floor(),
+ self.z().floor(),
+ self.w().floor(),
+ )
+ }
+
+ #[inline]
+ fn ceil(self) -> Self {
+ Self::new(
+ self.x().ceil(),
+ self.y().ceil(),
+ self.z().ceil(),
+ self.w().ceil(),
+ )
+ }
+
+ #[inline]
+ fn round(self) -> Self {
+ Self::new(
+ self.x().round(),
+ self.y().round(),
+ self.z().round(),
+ self.w().round(),
+ )
+ }
+
+ #[inline]
+ fn recip(self) -> Self {
+ Self::new(
+ self.x().recip(),
+ self.y().recip(),
+ self.z().recip(),
+ self.w().recip(),
+ )
+ }
+
+ #[inline]
+ fn exp(self) -> Self {
+ Self::new(
+ self.x().exp(),
+ self.y().exp(),
+ self.z().exp(),
+ self.w().exp(),
+ )
+ }
+
+ #[inline]
+ fn powf(self, n: T) -> Self {
+ Self::new(
+ self.x().powf(n),
+ self.y().powf(n),
+ self.z().powf(n),
+ self.w().powf(n),
+ )
+ }
+
+ #[inline]
+ fn is_finite(self) -> bool {
+ self.x().is_finite() && self.y().is_finite() && self.z().is_finite() && self.w().is_finite()
+ }
+
+ #[inline]
+ fn is_nan(self) -> bool {
+ self.x().is_nan() || self.y().is_nan() || self.z().is_nan() || self.w().is_nan()
+ }
+
+ #[inline]
+ fn mul_add(self, b: Self, c: Self) -> Self {
+ Self::new(
+ self.x().mul_add(b.x(), c.x()),
+ self.y().mul_add(b.y(), c.y()),
+ self.z().mul_add(b.z(), c.z()),
+ self.w().mul_add(b.w(), c.w()),
+ )
+ }
+
+ #[inline]
+ fn is_nan_mask(self) -> Self::Mask
+ where
+ <Self as Vector<T>>::Mask: MaskVector4,
+ {
+ Self::Mask::new(
+ self.x().is_nan(),
+ self.y().is_nan(),
+ self.z().is_nan(),
+ self.w().is_nan(),
+ )
+ }
+
+ #[inline]
+ fn length(self) -> T {
+ self.dot(self).sqrt()
+ }
+
+ #[inline]
+ fn length_recip(self) -> T {
+ self.length().recip()
+ }
+
+ #[inline]
+ fn normalize(self) -> Self {
+ #[allow(clippy::let_and_return)]
+ let normalized = self.mul_scalar(self.length_recip());
+ glam_assert!(normalized.is_finite());
+ normalized
+ }
+
+ #[inline(always)]
+ fn length_squared(self) -> T {
+ self.dot(self)
+ }
+
+ #[inline]
+ fn is_normalized(self) -> bool {
+ // TODO: do something with epsilon
+ (self.length_squared() - T::ONE).abs() <= T::from_f64(1e-4)
+ }
+
+ #[inline]
+ fn abs_diff_eq(self, other: Self, max_abs_diff: T) -> bool
+ where
+ <Self as Vector<T>>::Mask: MaskVector4,
+ {
+ self.sub(other).abs().cmple(Self::splat(max_abs_diff)).all()
+ }
+}
+
+pub trait ScalarShiftOps<Rhs> {
+ fn scalar_shl(self, rhs: Rhs) -> Self;
+ fn scalar_shr(self, rhs: Rhs) -> Self;
+}
+
+pub trait VectorShiftOps<Rhs> {
+ fn vector_shl(self, rhs: Rhs) -> Self;
+ fn vector_shr(self, rhs: Rhs) -> Self;
+}
+
+pub trait ScalarBitOps<Rhs> {
+ fn scalar_bitand(self, rhs: Rhs) -> Self;
+ fn scalar_bitor(self, rhs: Rhs) -> Self;
+ fn scalar_bitxor(self, rhs: Rhs) -> Self;
+}
+
+pub trait VectorBitOps<Rhs> {
+ fn not(self) -> Self;
+ fn vector_bitand(self, rhs: Rhs) -> Self;
+ fn vector_bitor(self, rhs: Rhs) -> Self;
+ fn vector_bitxor(self, rhs: Rhs) -> Self;
+}
diff --git a/src/core/wasm32/float.rs b/src/core/wasm32/float.rs
new file mode 100644
index 0000000..0bd80b4
--- /dev/null
+++ b/src/core/wasm32/float.rs
@@ -0,0 +1,113 @@
+use core::arch::wasm32::*;
+
+macro_rules! const_u32x4 {
+ ($ux4:expr) => {
+ unsafe { $crate::cast::UVec4Cast { ux4: $ux4 }.v128 }
+ };
+}
+
+const PS_NEGATIVE_ZERO: v128 = const_u32x4!([0x8000_0000; 4]);
+const PS_PI: v128 = const_f32x4!([core::f32::consts::PI; 4]);
+const PS_HALF_PI: v128 = const_f32x4!([core::f32::consts::FRAC_PI_2; 4]);
+const PS_SIN_COEFFICIENTS0: v128 =
+ const_f32x4!([-0.16666667, 0.008_333_331, -0.00019840874, 2.752_556_2e-6]);
+const PS_SIN_COEFFICIENTS1: v128 = const_f32x4!([
+ -2.388_985_9e-8,
+ -0.16665852, /*Est1*/
+ 0.008_313_95, /*Est2*/
+ -0.000_185_246_7 /*Est3*/
+]);
+const PS_ONE: v128 = const_f32x4!([1.0; 4]);
+const PS_TWO_PI: v128 = const_f32x4!([core::f32::consts::TAU; 4]);
+const PS_RECIPROCAL_TWO_PI: v128 = const_f32x4!([0.159_154_94; 4]);
+
+#[inline(always)]
+pub(crate) fn v128_mul_add(a: v128, b: v128, c: v128) -> v128 {
+ f32x4_add(f32x4_mul(a, b), c)
+}
+
+#[inline(always)]
+pub(crate) fn v128_neg_mul_sub(a: v128, b: v128, c: v128) -> v128 {
+ f32x4_sub(c, f32x4_mul(a, b))
+}
+
+/// Returns a vector whose components are the corresponding components of Angles modulo 2PI.
+#[inline]
+pub(crate) fn v128_mod_angles(angles: v128) -> v128 {
+ // Based on https://github.com/microsoft/DirectXMath `XMVectorModAngles`
+ let v = f32x4_mul(angles, PS_RECIPROCAL_TWO_PI);
+ let v = f32x4_nearest(v);
+ v128_neg_mul_sub(PS_TWO_PI, v, angles)
+}
+
+/// Computes the sine of the angle in each lane of `v`. Values outside
+/// the bounds of PI may produce an increasing error as the input angle
+/// drifts from `[-PI, PI]`.
+#[inline]
+pub(crate) fn v128_sin(v: v128) -> v128 {
+ // Based on https://github.com/microsoft/DirectXMath `XMVectorSin`
+
+ // 11-degree minimax approximation
+
+ // Force the value within the bounds of pi
+ let mut x = v128_mod_angles(v);
+
+ // Map in [-pi/2,pi/2] with sin(y) = sin(x).
+ let sign = v128_and(x, PS_NEGATIVE_ZERO);
+ // pi when x >= 0, -pi when x < 0
+ let c = v128_or(PS_PI, sign);
+ // |x|
+ let absx = v128_andnot(sign, x);
+ let rflx = f32x4_sub(c, x);
+ let comp = f32x4_le(absx, PS_HALF_PI);
+ let select0 = v128_and(comp, x);
+ let select1 = v128_andnot(comp, rflx);
+ x = v128_or(select0, select1);
+
+ let x2 = f32x4_mul(x, x);
+
+ // Compute polynomial approximation
+ const SC1: v128 = PS_SIN_COEFFICIENTS1;
+ let v_constants_b = i32x4_shuffle::<0, 0, 4, 4>(SC1, SC1);
+
+ const SC0: v128 = PS_SIN_COEFFICIENTS0;
+ let mut v_constants = i32x4_shuffle::<3, 3, 7, 7>(SC0, SC0);
+ let mut result = v128_mul_add(v_constants_b, x2, v_constants);
+
+ v_constants = i32x4_shuffle::<2, 2, 6, 6>(SC0, SC0);
+ result = v128_mul_add(result, x2, v_constants);
+
+ v_constants = i32x4_shuffle::<1, 1, 5, 5>(SC0, SC0);
+ result = v128_mul_add(result, x2, v_constants);
+
+ v_constants = i32x4_shuffle::<0, 0, 4, 4>(SC0, SC0);
+ result = v128_mul_add(result, x2, v_constants);
+
+ result = v128_mul_add(result, x2, PS_ONE);
+ result = f32x4_mul(result, x);
+
+ result
+}
+
+#[test]
+fn test_wasm32_v128_sin() {
+ use crate::core::traits::vector::*;
+ use core::f32::consts::PI;
+
+ fn test_wasm32_v128_sin_angle(a: f32) {
+ let v = v128_sin(f32x4_splat(a));
+ let v = v.as_ref_xyzw();
+ let a_sin = a.sin();
+ // dbg!((a, a_sin, v));
+ assert!(v.abs_diff_eq(Vector::splat(a_sin), 1e-6));
+ }
+
+ let mut a = -PI;
+ let end = PI;
+ let step = PI / 8192.0;
+
+ while a <= end {
+ test_wasm32_v128_sin_angle(a);
+ a += step;
+ }
+}
diff --git a/src/core/wasm32/matrix.rs b/src/core/wasm32/matrix.rs
new file mode 100644
index 0000000..d2a280a
--- /dev/null
+++ b/src/core/wasm32/matrix.rs
@@ -0,0 +1,532 @@
+use core::{arch::wasm32::*, mem::MaybeUninit};
+
+use crate::core::{
+ storage::{Columns2, Columns3, Columns4, XY, XYZ},
+ traits::{
+ matrix::{
+ FloatMatrix2x2, FloatMatrix3x3, FloatMatrix4x4, Matrix, Matrix2x2, Matrix3x3,
+ Matrix4x4, MatrixConst,
+ },
+ projection::ProjectionMatrix,
+ scalar::NanConstEx,
+ vector::{FloatVector4, Vector, Vector4, Vector4Const, VectorConst},
+ },
+};
+
+// v128 as a Matrix2x2
+impl MatrixConst for v128 {
+ const ZERO: v128 = const_f32x4!([0.0, 0.0, 0.0, 0.0]);
+ const IDENTITY: v128 = const_f32x4!([1.0, 0.0, 0.0, 1.0]);
+}
+
+impl Matrix<f32> for v128 {}
+
+impl Matrix2x2<f32, XY<f32>> for v128 {
+ #[inline(always)]
+ fn new(m00: f32, m01: f32, m10: f32, m11: f32) -> Self {
+ f32x4(m00, m01, m10, m11)
+ }
+
+ #[inline(always)]
+ fn from_cols(x_axis: XY<f32>, y_axis: XY<f32>) -> Self {
+ Matrix2x2::new(x_axis.x, x_axis.y, y_axis.x, y_axis.y)
+ }
+
+ #[inline(always)]
+ fn x_axis(&self) -> &XY<f32> {
+ unsafe { &(*(self as *const Self as *const Columns2<XY<f32>>)).x_axis }
+ }
+
+ #[inline(always)]
+ fn y_axis(&self) -> &XY<f32> {
+ unsafe { &(*(self as *const Self as *const Columns2<XY<f32>>)).y_axis }
+ }
+
+ #[inline]
+ fn determinant(&self) -> f32 {
+ // self.x_axis.x * self.y_axis.y - self.x_axis.y * self.y_axis.x
+ let abcd = *self;
+ let dcba = i32x4_shuffle::<3, 2, 5, 4>(abcd, abcd);
+ let prod = f32x4_mul(abcd, dcba);
+ let det = f32x4_sub(prod, i32x4_shuffle::<1, 1, 5, 5>(prod, prod));
+ f32x4_extract_lane::<0>(det)
+ }
+
+ #[inline(always)]
+ fn transpose(&self) -> Self {
+ i32x4_shuffle::<0, 2, 5, 7>(*self, *self)
+ }
+
+ #[inline]
+ fn mul_vector(&self, other: XY<f32>) -> XY<f32> {
+ let abcd = *self;
+ let xxyy = f32x4(other.x, other.x, other.y, other.y);
+ let axbxcydy = f32x4_mul(abcd, xxyy);
+ let cydyaxbx = i32x4_shuffle::<2, 3, 4, 5>(axbxcydy, axbxcydy);
+ let result = f32x4_add(axbxcydy, cydyaxbx);
+ let mut out: MaybeUninit<v128> = MaybeUninit::uninit();
+ unsafe {
+ v128_store(out.as_mut_ptr(), result);
+ *(&out.assume_init() as *const v128 as *const XY<f32>)
+ }
+ }
+
+ #[inline]
+ fn mul_matrix(&self, other: &Self) -> Self {
+ let abcd = *self;
+ let other = *other;
+ let xxyy0 = i32x4_shuffle::<0, 0, 5, 5>(other, other);
+ let xxyy1 = i32x4_shuffle::<2, 2, 7, 7>(other, other);
+ let axbxcydy0 = f32x4_mul(abcd, xxyy0);
+ let axbxcydy1 = f32x4_mul(abcd, xxyy1);
+ let cydyaxbx0 = i32x4_shuffle::<2, 3, 4, 5>(axbxcydy0, axbxcydy0);
+ let cydyaxbx1 = i32x4_shuffle::<2, 3, 4, 5>(axbxcydy1, axbxcydy1);
+ let result0 = f32x4_add(axbxcydy0, cydyaxbx0);
+ let result1 = f32x4_add(axbxcydy1, cydyaxbx1);
+ i32x4_shuffle::<0, 1, 4, 5>(result0, result1)
+ }
+
+ #[inline]
+ fn mul_scalar(&self, other: f32) -> Self {
+ f32x4_mul(*self, f32x4_splat(other))
+ }
+
+ #[inline]
+ fn add_matrix(&self, other: &Self) -> Self {
+ f32x4_add(*self, *other)
+ }
+
+ #[inline]
+ fn sub_matrix(&self, other: &Self) -> Self {
+ f32x4_sub(*self, *other)
+ }
+}
+
+impl FloatMatrix2x2<f32, XY<f32>> for v128 {
+ #[inline]
+ fn abs_diff_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
+ FloatVector4::abs_diff_eq(*self, *other, max_abs_diff)
+ }
+
+ #[inline]
+ fn neg_matrix(&self) -> Self {
+ f32x4_neg(*self)
+ }
+
+ #[inline]
+ fn inverse(&self) -> Self {
+ const SIGN: v128 = const_f32x4!([1.0, -1.0, -1.0, 1.0]);
+ let abcd = *self;
+ let dcba = i32x4_shuffle::<3, 2, 5, 4>(abcd, abcd);
+ let prod = f32x4_mul(abcd, dcba);
+ let sub = f32x4_sub(prod, i32x4_shuffle::<1, 1, 5, 5>(prod, prod));
+ let det = i32x4_shuffle::<0, 0, 4, 4>(sub, sub);
+ let tmp = f32x4_div(SIGN, det);
+ glam_assert!(tmp.is_finite());
+ let dbca = i32x4_shuffle::<3, 1, 6, 4>(abcd, abcd);
+ f32x4_mul(dbca, tmp)
+ }
+}
+
+impl MatrixConst for Columns3<v128> {
+ const ZERO: Columns3<v128> = Columns3 {
+ x_axis: VectorConst::ZERO,
+ y_axis: VectorConst::ZERO,
+ z_axis: VectorConst::ZERO,
+ };
+ const IDENTITY: Columns3<v128> = Columns3 {
+ x_axis: v128::X,
+ y_axis: v128::Y,
+ z_axis: v128::Z,
+ };
+}
+
+impl NanConstEx for Columns3<v128> {
+ const NAN: Columns3<v128> = Columns3 {
+ x_axis: v128::NAN,
+ y_axis: v128::NAN,
+ z_axis: v128::NAN,
+ };
+}
+
+impl Matrix<f32> for Columns3<v128> {}
+
+impl Matrix3x3<f32, v128> for Columns3<v128> {
+ #[inline(always)]
+ fn from_cols(x_axis: v128, y_axis: v128, z_axis: v128) -> Self {
+ Self {
+ x_axis,
+ y_axis,
+ z_axis,
+ }
+ }
+
+ #[inline(always)]
+ fn x_axis(&self) -> &v128 {
+ &self.x_axis
+ }
+
+ #[inline(always)]
+ fn y_axis(&self) -> &v128 {
+ &self.y_axis
+ }
+
+ #[inline(always)]
+ fn z_axis(&self) -> &v128 {
+ &self.z_axis
+ }
+
+ #[inline]
+ fn transpose(&self) -> Self {
+ let tmp0 = i32x4_shuffle::<0, 1, 4, 5>(self.x_axis, self.y_axis);
+ let tmp1 = i32x4_shuffle::<2, 3, 6, 7>(self.x_axis, self.y_axis);
+
+ Self {
+ x_axis: i32x4_shuffle::<0, 2, 4, 4>(tmp0, self.z_axis),
+ y_axis: i32x4_shuffle::<1, 3, 5, 5>(tmp0, self.z_axis),
+ z_axis: i32x4_shuffle::<0, 2, 6, 6>(tmp1, self.z_axis),
+ }
+ }
+}
+
+impl FloatMatrix3x3<f32, v128> for Columns3<v128> {
+ #[inline]
+ fn transform_point2(&self, other: XY<f32>) -> XY<f32> {
+ let mut res = self.x_axis.mul_scalar(other.x);
+ res = self.y_axis.mul_scalar(other.y).add(res);
+ res = self.z_axis.add(res);
+ res.into()
+ }
+
+ #[inline]
+ fn transform_vector2(&self, other: XY<f32>) -> XY<f32> {
+ let mut res = self.x_axis.mul_scalar(other.x);
+ res = self.y_axis.mul_scalar(other.y).add(res);
+ res.into()
+ }
+}
+
+impl MatrixConst for Columns4<v128> {
+ const ZERO: Columns4<v128> = Columns4 {
+ x_axis: VectorConst::ZERO,
+ y_axis: VectorConst::ZERO,
+ z_axis: VectorConst::ZERO,
+ w_axis: VectorConst::ZERO,
+ };
+ const IDENTITY: Columns4<v128> = Columns4 {
+ x_axis: v128::X,
+ y_axis: v128::Y,
+ z_axis: v128::Z,
+ w_axis: v128::W,
+ };
+}
+
+impl NanConstEx for Columns4<v128> {
+ const NAN: Columns4<v128> = Columns4 {
+ x_axis: v128::NAN,
+ y_axis: v128::NAN,
+ z_axis: v128::NAN,
+ w_axis: v128::NAN,
+ };
+}
+
+impl Matrix<f32> for Columns4<v128> {}
+
+impl Matrix4x4<f32, v128> for Columns4<v128> {
+ #[inline(always)]
+ fn from_cols(x_axis: v128, y_axis: v128, z_axis: v128, w_axis: v128) -> Self {
+ Self {
+ x_axis,
+ y_axis,
+ z_axis,
+ w_axis,
+ }
+ }
+
+ #[inline(always)]
+ fn x_axis(&self) -> &v128 {
+ &self.x_axis
+ }
+
+ #[inline(always)]
+ fn y_axis(&self) -> &v128 {
+ &self.y_axis
+ }
+
+ #[inline(always)]
+ fn z_axis(&self) -> &v128 {
+ &self.z_axis
+ }
+
+ #[inline(always)]
+ fn w_axis(&self) -> &v128 {
+ &self.w_axis
+ }
+
+ #[inline]
+ fn determinant(&self) -> f32 {
+ // Based on https://github.com/g-truc/glm `glm_mat4_determinant`
+ let swp2a = i32x4_shuffle::<2, 1, 1, 0>(self.z_axis, self.z_axis);
+ let swp3a = i32x4_shuffle::<3, 3, 2, 3>(self.w_axis, self.w_axis);
+ let swp2b = i32x4_shuffle::<3, 3, 2, 3>(self.z_axis, self.z_axis);
+ let swp3b = i32x4_shuffle::<2, 1, 1, 0>(self.w_axis, self.w_axis);
+ let swp2c = i32x4_shuffle::<2, 1, 0, 0>(self.z_axis, self.z_axis);
+ let swp3c = i32x4_shuffle::<0, 0, 2, 1>(self.w_axis, self.w_axis);
+
+ let mula = f32x4_mul(swp2a, swp3a);
+ let mulb = f32x4_mul(swp2b, swp3b);
+ let mulc = f32x4_mul(swp2c, swp3c);
+ let sube = f32x4_sub(mula, mulb);
+ let subf = f32x4_sub(i32x4_shuffle::<6, 7, 2, 3>(mulc, mulc), mulc);
+
+ let subfaca = i32x4_shuffle::<0, 0, 1, 2>(sube, sube);
+ let swpfaca = i32x4_shuffle::<1, 0, 0, 0>(self.y_axis, self.y_axis);
+ let mulfaca = f32x4_mul(swpfaca, subfaca);
+
+ let subtmpb = i32x4_shuffle::<1, 3, 4, 4>(sube, subf);
+ let subfacb = i32x4_shuffle::<0, 1, 1, 3>(subtmpb, subtmpb);
+ let swpfacb = i32x4_shuffle::<2, 2, 1, 1>(self.y_axis, self.y_axis);
+ let mulfacb = f32x4_mul(swpfacb, subfacb);
+
+ let subres = f32x4_sub(mulfaca, mulfacb);
+ let subtmpc = i32x4_shuffle::<2, 2, 4, 5>(sube, subf);
+ let subfacc = i32x4_shuffle::<0, 2, 3, 3>(subtmpc, subtmpc);
+ let swpfacc = i32x4_shuffle::<3, 3, 3, 2>(self.y_axis, self.y_axis);
+ let mulfacc = f32x4_mul(swpfacc, subfacc);
+
+ let addres = f32x4_add(subres, mulfacc);
+ let detcof = f32x4_mul(addres, f32x4(1.0, -1.0, 1.0, -1.0));
+
+ Vector4::dot(self.x_axis, detcof)
+ }
+
+ #[inline]
+ fn transpose(&self) -> Self {
+ // Based on https://github.com/microsoft/DirectXMath `XMMatrixTranspose`
+ let tmp0 = i32x4_shuffle::<0, 1, 4, 5>(self.x_axis, self.y_axis);
+ let tmp1 = i32x4_shuffle::<2, 3, 6, 7>(self.x_axis, self.y_axis);
+ let tmp2 = i32x4_shuffle::<0, 1, 4, 5>(self.z_axis, self.w_axis);
+ let tmp3 = i32x4_shuffle::<2, 3, 6, 7>(self.z_axis, self.w_axis);
+
+ Self {
+ x_axis: i32x4_shuffle::<0, 2, 4, 6>(tmp0, tmp2),
+ y_axis: i32x4_shuffle::<1, 3, 5, 7>(tmp0, tmp2),
+ z_axis: i32x4_shuffle::<0, 2, 4, 6>(tmp1, tmp3),
+ w_axis: i32x4_shuffle::<1, 3, 5, 7>(tmp1, tmp3),
+ }
+ }
+}
+
+impl FloatMatrix4x4<f32, v128> for Columns4<v128> {
+ type SIMDVector3 = v128;
+
+ fn inverse(&self) -> Self {
+ // Based on https://github.com/g-truc/glm `glm_mat4_inverse`
+ let fac0 = {
+ let swp0a = i32x4_shuffle::<3, 3, 7, 7>(self.w_axis, self.z_axis);
+ let swp0b = i32x4_shuffle::<2, 2, 6, 6>(self.w_axis, self.z_axis);
+
+ let swp00 = i32x4_shuffle::<2, 2, 6, 6>(self.z_axis, self.y_axis);
+ let swp01 = i32x4_shuffle::<0, 0, 4, 6>(swp0a, swp0a);
+ let swp02 = i32x4_shuffle::<0, 0, 4, 6>(swp0b, swp0b);
+ let swp03 = i32x4_shuffle::<3, 3, 7, 7>(self.z_axis, self.y_axis);
+
+ let mul00 = f32x4_mul(swp00, swp01);
+ let mul01 = f32x4_mul(swp02, swp03);
+ f32x4_sub(mul00, mul01)
+ };
+ let fac1 = {
+ let swp0a = i32x4_shuffle::<3, 3, 7, 7>(self.w_axis, self.z_axis);
+ let swp0b = i32x4_shuffle::<1, 1, 5, 5>(self.w_axis, self.z_axis);
+
+ let swp00 = i32x4_shuffle::<1, 1, 5, 5>(self.z_axis, self.y_axis);
+ let swp01 = i32x4_shuffle::<0, 0, 4, 6>(swp0a, swp0a);
+ let swp02 = i32x4_shuffle::<0, 0, 4, 6>(swp0b, swp0b);
+ let swp03 = i32x4_shuffle::<3, 3, 7, 7>(self.z_axis, self.y_axis);
+
+ let mul00 = f32x4_mul(swp00, swp01);
+ let mul01 = f32x4_mul(swp02, swp03);
+ f32x4_sub(mul00, mul01)
+ };
+ let fac2 = {
+ let swp0a = i32x4_shuffle::<2, 2, 6, 6>(self.w_axis, self.z_axis);
+ let swp0b = i32x4_shuffle::<1, 1, 5, 5>(self.w_axis, self.z_axis);
+
+ let swp00 = i32x4_shuffle::<1, 1, 5, 5>(self.z_axis, self.y_axis);
+ let swp01 = i32x4_shuffle::<0, 0, 4, 6>(swp0a, swp0a);
+ let swp02 = i32x4_shuffle::<0, 0, 4, 6>(swp0b, swp0b);
+ let swp03 = i32x4_shuffle::<2, 2, 6, 6>(self.z_axis, self.y_axis);
+
+ let mul00 = f32x4_mul(swp00, swp01);
+ let mul01 = f32x4_mul(swp02, swp03);
+ f32x4_sub(mul00, mul01)
+ };
+ let fac3 = {
+ let swp0a = i32x4_shuffle::<3, 3, 7, 7>(self.w_axis, self.z_axis);
+ let swp0b = i32x4_shuffle::<0, 0, 4, 4>(self.w_axis, self.z_axis);
+
+ let swp00 = i32x4_shuffle::<0, 0, 4, 4>(self.z_axis, self.y_axis);
+ let swp01 = i32x4_shuffle::<0, 0, 4, 6>(swp0a, swp0a);
+ let swp02 = i32x4_shuffle::<0, 0, 4, 6>(swp0b, swp0b);
+ let swp03 = i32x4_shuffle::<3, 3, 7, 7>(self.z_axis, self.y_axis);
+
+ let mul00 = f32x4_mul(swp00, swp01);
+ let mul01 = f32x4_mul(swp02, swp03);
+ f32x4_sub(mul00, mul01)
+ };
+ let fac4 = {
+ let swp0a = i32x4_shuffle::<2, 2, 6, 6>(self.w_axis, self.z_axis);
+ let swp0b = i32x4_shuffle::<0, 0, 4, 4>(self.w_axis, self.z_axis);
+
+ let swp00 = i32x4_shuffle::<0, 0, 4, 4>(self.z_axis, self.y_axis);
+ let swp01 = i32x4_shuffle::<0, 0, 4, 6>(swp0a, swp0a);
+ let swp02 = i32x4_shuffle::<0, 0, 4, 6>(swp0b, swp0b);
+ let swp03 = i32x4_shuffle::<2, 2, 6, 6>(self.z_axis, self.y_axis);
+
+ let mul00 = f32x4_mul(swp00, swp01);
+ let mul01 = f32x4_mul(swp02, swp03);
+ f32x4_sub(mul00, mul01)
+ };
+ let fac5 = {
+ let swp0a = i32x4_shuffle::<1, 1, 5, 5>(self.w_axis, self.z_axis);
+ let swp0b = i32x4_shuffle::<0, 0, 4, 4>(self.w_axis, self.z_axis);
+
+ let swp00 = i32x4_shuffle::<0, 0, 4, 4>(self.z_axis, self.y_axis);
+ let swp01 = i32x4_shuffle::<0, 0, 4, 6>(swp0a, swp0a);
+ let swp02 = i32x4_shuffle::<0, 0, 4, 6>(swp0b, swp0b);
+ let swp03 = i32x4_shuffle::<1, 1, 5, 5>(self.z_axis, self.y_axis);
+
+ let mul00 = f32x4_mul(swp00, swp01);
+ let mul01 = f32x4_mul(swp02, swp03);
+ f32x4_sub(mul00, mul01)
+ };
+ let sign_a = f32x4(-1.0, 1.0, -1.0, 1.0);
+ let sign_b = f32x4(1.0, -1.0, 1.0, -1.0);
+
+ let temp0 = i32x4_shuffle::<0, 0, 4, 4>(self.y_axis, self.x_axis);
+ let vec0 = i32x4_shuffle::<0, 2, 6, 6>(temp0, temp0);
+
+ let temp1 = i32x4_shuffle::<1, 1, 5, 5>(self.y_axis, self.x_axis);
+ let vec1 = i32x4_shuffle::<0, 2, 6, 6>(temp1, temp1);
+
+ let temp2 = i32x4_shuffle::<2, 2, 6, 6>(self.y_axis, self.x_axis);
+ let vec2 = i32x4_shuffle::<0, 2, 6, 6>(temp2, temp2);
+
+ let temp3 = i32x4_shuffle::<3, 3, 7, 7>(self.y_axis, self.x_axis);
+ let vec3 = i32x4_shuffle::<0, 2, 6, 6>(temp3, temp3);
+
+ let mul00 = f32x4_mul(vec1, fac0);
+ let mul01 = f32x4_mul(vec2, fac1);
+ let mul02 = f32x4_mul(vec3, fac2);
+ let sub00 = f32x4_sub(mul00, mul01);
+ let add00 = f32x4_add(sub00, mul02);
+ let inv0 = f32x4_mul(sign_b, add00);
+
+ let mul03 = f32x4_mul(vec0, fac0);
+ let mul04 = f32x4_mul(vec2, fac3);
+ let mul05 = f32x4_mul(vec3, fac4);
+ let sub01 = f32x4_sub(mul03, mul04);
+ let add01 = f32x4_add(sub01, mul05);
+ let inv1 = f32x4_mul(sign_a, add01);
+
+ let mul06 = f32x4_mul(vec0, fac1);
+ let mul07 = f32x4_mul(vec1, fac3);
+ let mul08 = f32x4_mul(vec3, fac5);
+ let sub02 = f32x4_sub(mul06, mul07);
+ let add02 = f32x4_add(sub02, mul08);
+ let inv2 = f32x4_mul(sign_b, add02);
+
+ let mul09 = f32x4_mul(vec0, fac2);
+ let mul10 = f32x4_mul(vec1, fac4);
+ let mul11 = f32x4_mul(vec2, fac5);
+ let sub03 = f32x4_sub(mul09, mul10);
+ let add03 = f32x4_add(sub03, mul11);
+ let inv3 = f32x4_mul(sign_a, add03);
+
+ let row0 = i32x4_shuffle::<0, 0, 4, 4>(inv0, inv1);
+ let row1 = i32x4_shuffle::<0, 0, 4, 4>(inv2, inv3);
+ let row2 = i32x4_shuffle::<0, 2, 4, 6>(row0, row1);
+
+ let dot0 = Vector4::dot(self.x_axis, row2);
+ glam_assert!(dot0 != 0.0);
+
+ let rcp0 = f32x4_splat(dot0.recip());
+
+ Self {
+ x_axis: f32x4_mul(inv0, rcp0),
+ y_axis: f32x4_mul(inv1, rcp0),
+ z_axis: f32x4_mul(inv2, rcp0),
+ w_axis: f32x4_mul(inv3, rcp0),
+ }
+ }
+
+ #[inline(always)]
+ fn transform_point3(&self, other: XYZ<f32>) -> XYZ<f32> {
+ self.x_axis
+ .mul_scalar(other.x)
+ .add(self.y_axis.mul_scalar(other.y))
+ .add(self.z_axis.mul_scalar(other.z))
+ .add(self.w_axis)
+ .into()
+ }
+
+ #[inline(always)]
+ fn transform_vector3(&self, other: XYZ<f32>) -> XYZ<f32> {
+ self.x_axis
+ .mul_scalar(other.x)
+ .add(self.y_axis.mul_scalar(other.y))
+ .add(self.z_axis.mul_scalar(other.z))
+ .into()
+ }
+
+ #[inline]
+ fn transform_float4_as_point3(&self, other: v128) -> v128 {
+ let mut res = self.x_axis.mul(Vector4::splat_x(other));
+ res = res.add(self.y_axis.mul(Vector4::splat_y(other)));
+ res = res.add(self.z_axis.mul(Vector4::splat_z(other)));
+ res = self.w_axis.add(res);
+ res
+ }
+
+ #[inline]
+ fn transform_float4_as_vector3(&self, other: v128) -> v128 {
+ let mut res = self.x_axis.mul(Vector4::splat_x(other));
+ res = res.add(self.y_axis.mul(Vector4::splat_y(other)));
+ res = res.add(self.z_axis.mul(Vector4::splat_z(other)));
+ res
+ }
+
+ #[inline]
+ fn project_float4_as_point3(&self, other: v128) -> v128 {
+ let mut res = self.x_axis.mul(Vector4::splat_x(other));
+ res = res.add(self.y_axis.mul(Vector4::splat_y(other)));
+ res = res.add(self.z_axis.mul(Vector4::splat_z(other)));
+ res = self.w_axis.add(res);
+ res = res.mul(res.splat_w().recip());
+ res
+ }
+}
+
+impl ProjectionMatrix<f32, v128> for Columns4<v128> {}
+
+impl From<Columns3<XYZ<f32>>> for Columns3<v128> {
+ #[inline(always)]
+ fn from(v: Columns3<XYZ<f32>>) -> Columns3<v128> {
+ Self {
+ x_axis: v.x_axis.into(),
+ y_axis: v.y_axis.into(),
+ z_axis: v.z_axis.into(),
+ }
+ }
+}
+
+impl From<Columns3<v128>> for Columns3<XYZ<f32>> {
+ #[inline(always)]
+ fn from(v: Columns3<v128>) -> Columns3<XYZ<f32>> {
+ Self {
+ x_axis: v.x_axis.into(),
+ y_axis: v.y_axis.into(),
+ z_axis: v.z_axis.into(),
+ }
+ }
+}
diff --git a/src/core/wasm32/mod.rs b/src/core/wasm32/mod.rs
new file mode 100644
index 0000000..23a1dff
--- /dev/null
+++ b/src/core/wasm32/mod.rs
@@ -0,0 +1,4 @@
+// mod float;
+pub mod matrix;
+pub mod quaternion;
+pub mod vector;
diff --git a/src/core/wasm32/quaternion.rs b/src/core/wasm32/quaternion.rs
new file mode 100644
index 0000000..1899ee4
--- /dev/null
+++ b/src/core/wasm32/quaternion.rs
@@ -0,0 +1,130 @@
+use core::arch::wasm32::*;
+
+// use super::float::*;
+use crate::core::{
+ storage::XYZ,
+ traits::{quaternion::Quaternion, scalar::*, vector::*},
+};
+
+impl Quaternion<f32> for v128 {
+ type SIMDVector3 = v128;
+
+ #[inline(always)]
+ fn conjugate(self) -> Self {
+ const SIGN: v128 = const_f32x4!([-1.0, -1.0, -1.0, 1.0]);
+ f32x4_mul(self, SIGN)
+ }
+
+ #[inline]
+ fn lerp(self, end: Self, s: f32) -> Self {
+ glam_assert!(FloatVector4::is_normalized(self));
+ glam_assert!(FloatVector4::is_normalized(end));
+
+ const NEG_ZERO: v128 = const_f32x4!([-0.0; 4]);
+ let start = self;
+ let end = end;
+ let dot = Vector4::dot_into_vec(start, end);
+ // Calculate the bias, if the dot product is positive or zero, there is no bias
+ // but if it is negative, we want to flip the 'end' rotation XYZW components
+ let bias = v128_and(dot, NEG_ZERO);
+ let interpolated = f32x4_add(
+ f32x4_mul(f32x4_sub(v128_xor(end, bias), start), f32x4_splat(s)),
+ start,
+ );
+ FloatVector4::normalize(interpolated)
+ }
+
+ #[inline]
+ fn slerp(self, end: Self, s: f32) -> Self {
+ // http://number-none.com/product/Understanding%20Slerp,%20Then%20Not%20Using%20It/
+ glam_assert!(FloatVector4::is_normalized(self));
+ glam_assert!(FloatVector4::is_normalized(end));
+
+ const DOT_THRESHOLD: f32 = 0.9995;
+
+ let dot = Vector4::dot(self, end);
+
+ if dot > DOT_THRESHOLD {
+ // assumes lerp returns a normalized quaternion
+ self.lerp(end, s)
+ } else {
+ // assumes scalar_acos clamps the input to [-1.0, 1.0]
+ let theta = dot.acos_approx();
+
+ // TODO: v128_sin is broken
+ // let x = 1.0 - s;
+ // let y = s;
+ // let z = 1.0;
+ // let w = 0.0;
+ // let tmp = f32x4_mul(f32x4_splat(theta), f32x4(x, y, z, w));
+ // let tmp = v128_sin(tmp);
+ let x = (theta * (1.0 - s)).sin();
+ let y = (theta * s).sin();
+ let z = theta.sin();
+ let w = 0.0;
+ let tmp = f32x4(x, y, z, w);
+
+ let scale1 = i32x4_shuffle::<0, 0, 4, 4>(tmp, tmp);
+ let scale2 = i32x4_shuffle::<1, 1, 5, 5>(tmp, tmp);
+ let theta_sin = i32x4_shuffle::<2, 2, 6, 6>(tmp, tmp);
+
+ self.mul(scale1).add(end.mul(scale2)).div(theta_sin)
+ }
+ }
+
+ #[inline]
+ fn mul_quaternion(self, other: Self) -> Self {
+ glam_assert!(FloatVector4::is_normalized(self));
+ glam_assert!(FloatVector4::is_normalized(other));
+ // Based on https://github.com/nfrechette/rtm `rtm::quat_mul`
+ let lhs = self;
+ let rhs = other;
+
+ const CONTROL_WZYX: v128 = const_f32x4!([1.0, -1.0, 1.0, -1.0]);
+ const CONTROL_ZWXY: v128 = const_f32x4!([1.0, 1.0, -1.0, -1.0]);
+ const CONTROL_YXWZ: v128 = const_f32x4!([-1.0, 1.0, 1.0, -1.0]);
+
+ let r_xxxx = i32x4_shuffle::<0, 0, 4, 4>(lhs, lhs);
+ let r_yyyy = i32x4_shuffle::<1, 1, 5, 5>(lhs, lhs);
+ let r_zzzz = i32x4_shuffle::<2, 2, 6, 6>(lhs, lhs);
+ let r_wwww = i32x4_shuffle::<3, 3, 7, 7>(lhs, lhs);
+
+ let lxrw_lyrw_lzrw_lwrw = f32x4_mul(r_wwww, rhs);
+ let l_wzyx = i32x4_shuffle::<3, 2, 5, 4>(rhs, rhs);
+
+ let lwrx_lzrx_lyrx_lxrx = f32x4_mul(r_xxxx, l_wzyx);
+ let l_zwxy = i32x4_shuffle::<1, 0, 7, 6>(l_wzyx, l_wzyx);
+
+ let lwrx_nlzrx_lyrx_nlxrx = f32x4_mul(lwrx_lzrx_lyrx_lxrx, CONTROL_WZYX);
+
+ let lzry_lwry_lxry_lyry = f32x4_mul(r_yyyy, l_zwxy);
+ let l_yxwz = i32x4_shuffle::<3, 2, 5, 4>(l_zwxy, l_zwxy);
+
+ let lzry_lwry_nlxry_nlyry = f32x4_mul(lzry_lwry_lxry_lyry, CONTROL_ZWXY);
+
+ let lyrz_lxrz_lwrz_lzrz = f32x4_mul(r_zzzz, l_yxwz);
+ let result0 = f32x4_add(lxrw_lyrw_lzrw_lwrw, lwrx_nlzrx_lyrx_nlxrx);
+
+ let nlyrz_lxrz_lwrz_wlzrz = f32x4_mul(lyrz_lxrz_lwrz_lzrz, CONTROL_YXWZ);
+ let result1 = f32x4_add(lzry_lwry_nlxry_nlyry, nlyrz_lxrz_lwrz_wlzrz);
+ f32x4_add(result0, result1)
+ }
+
+ #[inline]
+ fn mul_vector3(self, other: XYZ<f32>) -> XYZ<f32> {
+ self.mul_float4_as_vector3(other.into()).into()
+ }
+
+ #[inline]
+ fn mul_float4_as_vector3(self, other: v128) -> v128 {
+ glam_assert!(FloatVector4::is_normalized(self));
+ const TWO: v128 = const_f32x4!([2.0; 4]);
+ let w = i32x4_shuffle::<3, 3, 7, 7>(self, self);
+ let b = self;
+ let b2 = Vector3::dot_into_vec(b, b);
+ other
+ .mul(w.mul(w).sub(b2))
+ .add(b.mul(Vector3::dot_into_vec(other, b).mul(TWO)))
+ .add(b.cross(other).mul(w.mul(TWO)))
+ }
+}
diff --git a/src/core/wasm32/vector.rs b/src/core/wasm32/vector.rs
new file mode 100644
index 0000000..9ecaa77
--- /dev/null
+++ b/src/core/wasm32/vector.rs
@@ -0,0 +1,812 @@
+use crate::core::{
+ storage::{XY, XYZ, XYZW},
+ traits::{scalar::*, vector::*},
+};
+use core::arch::wasm32::*;
+use core::mem::MaybeUninit;
+
+#[inline(always)]
+fn f32x4_isnan(v: v128) -> v128 {
+ f32x4_ne(v, v)
+}
+
+/// Calculates the vector 3 dot product and returns answer in x lane of __m128.
+#[inline(always)]
+fn dot3_in_x(lhs: v128, rhs: v128) -> v128 {
+ let x2_y2_z2_w2 = f32x4_mul(lhs, rhs);
+ let y2_0_0_0 = i32x4_shuffle::<1, 0, 0, 0>(x2_y2_z2_w2, x2_y2_z2_w2);
+ let z2_0_0_0 = i32x4_shuffle::<2, 0, 0, 0>(x2_y2_z2_w2, x2_y2_z2_w2);
+ let x2y2_0_0_0 = f32x4_add(x2_y2_z2_w2, y2_0_0_0);
+ f32x4_add(x2y2_0_0_0, z2_0_0_0)
+}
+
+/// Calculates the vector 4 dot product and returns answer in x lane of __m128.
+#[inline(always)]
+fn dot4_in_x(lhs: v128, rhs: v128) -> v128 {
+ let x2_y2_z2_w2 = f32x4_mul(lhs, rhs);
+ let z2_w2_0_0 = i32x4_shuffle::<2, 3, 0, 0>(x2_y2_z2_w2, x2_y2_z2_w2);
+ let x2z2_y2w2_0_0 = f32x4_add(x2_y2_z2_w2, z2_w2_0_0);
+ let y2w2_0_0_0 = i32x4_shuffle::<1, 0, 0, 0>(x2z2_y2w2_0_0, x2z2_y2w2_0_0);
+ f32x4_add(x2z2_y2w2_0_0, y2w2_0_0_0)
+}
+
+impl MaskVectorConst for v128 {
+ const FALSE: v128 = const_f32x4!([0.0; 4]);
+}
+
+impl MaskVector for v128 {
+ #[inline(always)]
+ fn bitand(self, other: Self) -> Self {
+ v128_and(self, other)
+ }
+
+ #[inline(always)]
+ fn bitor(self, other: Self) -> Self {
+ v128_or(self, other)
+ }
+
+ #[inline]
+ fn not(self) -> Self {
+ v128_not(self)
+ }
+}
+
+impl MaskVector3 for v128 {
+ #[inline(always)]
+ fn new(x: bool, y: bool, z: bool) -> Self {
+ u32x4(
+ MaskConst::MASK[x as usize],
+ MaskConst::MASK[y as usize],
+ MaskConst::MASK[z as usize],
+ 0,
+ )
+ }
+
+ #[inline(always)]
+ fn bitmask(self) -> u32 {
+ (u32x4_bitmask(self) & 0x7) as u32
+ }
+
+ #[inline(always)]
+ fn any(self) -> bool {
+ (u32x4_bitmask(self) & 0x7) != 0
+ }
+
+ #[inline(always)]
+ fn all(self) -> bool {
+ (u32x4_bitmask(self) & 0x7) == 0x7
+ }
+
+ #[inline]
+ fn into_bool_array(self) -> [bool; 3] {
+ let bitmask = MaskVector3::bitmask(self);
+ [(bitmask & 1) != 0, (bitmask & 2) != 0, (bitmask & 4) != 0]
+ }
+
+ #[inline]
+ fn into_u32_array(self) -> [u32; 3] {
+ let bitmask = MaskVector3::bitmask(self);
+ [
+ MaskConst::MASK[(bitmask & 1) as usize],
+ MaskConst::MASK[((bitmask >> 1) & 1) as usize],
+ MaskConst::MASK[((bitmask >> 2) & 1) as usize],
+ ]
+ }
+}
+
+impl MaskVector4 for v128 {
+ #[inline(always)]
+ fn new(x: bool, y: bool, z: bool, w: bool) -> Self {
+ u32x4(
+ MaskConst::MASK[x as usize],
+ MaskConst::MASK[y as usize],
+ MaskConst::MASK[z as usize],
+ MaskConst::MASK[w as usize],
+ )
+ }
+
+ #[inline(always)]
+ fn bitmask(self) -> u32 {
+ u32x4_bitmask(self) as u32
+ }
+
+ #[inline(always)]
+ fn any(self) -> bool {
+ u32x4_bitmask(self) != 0
+ }
+
+ #[inline(always)]
+ fn all(self) -> bool {
+ u32x4_bitmask(self) == 0xf
+ }
+
+ #[inline]
+ fn into_bool_array(self) -> [bool; 4] {
+ let bitmask = MaskVector4::bitmask(self);
+ [
+ (bitmask & 1) != 0,
+ (bitmask & 2) != 0,
+ (bitmask & 4) != 0,
+ (bitmask & 8) != 0,
+ ]
+ }
+
+ #[inline]
+ fn into_u32_array(self) -> [u32; 4] {
+ let bitmask = MaskVector4::bitmask(self);
+ [
+ MaskConst::MASK[(bitmask & 1) as usize],
+ MaskConst::MASK[((bitmask >> 1) & 1) as usize],
+ MaskConst::MASK[((bitmask >> 2) & 1) as usize],
+ MaskConst::MASK[((bitmask >> 3) & 1) as usize],
+ ]
+ }
+}
+
+impl VectorConst for v128 {
+ const ZERO: v128 = const_f32x4!([0.0; 4]);
+ const ONE: v128 = const_f32x4!([1.0; 4]);
+}
+
+impl NanConstEx for v128 {
+ const NAN: v128 = const_f32x4!([f32::NAN; 4]);
+}
+
+impl Vector3Const for v128 {
+ const X: v128 = const_f32x4!([1.0, 0.0, 0.0, 0.0]);
+ const Y: v128 = const_f32x4!([0.0, 1.0, 0.0, 0.0]);
+ const Z: v128 = const_f32x4!([0.0, 0.0, 1.0, 0.0]);
+}
+
+impl Vector4Const for v128 {
+ const X: v128 = const_f32x4!([1.0, 0.0, 0.0, 0.0]);
+ const Y: v128 = const_f32x4!([0.0, 1.0, 0.0, 0.0]);
+ const Z: v128 = const_f32x4!([0.0, 0.0, 1.0, 0.0]);
+ const W: v128 = const_f32x4!([0.0, 0.0, 0.0, 1.0]);
+}
+
+impl Vector<f32> for v128 {
+ type Mask = v128;
+
+ #[inline(always)]
+ fn splat(s: f32) -> Self {
+ f32x4_splat(s)
+ }
+
+ #[inline(always)]
+ fn select(mask: Self::Mask, if_true: Self, if_false: Self) -> Self {
+ v128_bitselect(if_true, if_false, mask)
+ }
+
+ #[inline(always)]
+ fn cmpeq(self, other: Self) -> Self::Mask {
+ f32x4_eq(self, other)
+ }
+
+ #[inline(always)]
+ fn cmpne(self, other: Self) -> Self::Mask {
+ f32x4_ne(self, other)
+ }
+
+ #[inline(always)]
+ fn cmpge(self, other: Self) -> Self::Mask {
+ f32x4_ge(self, other)
+ }
+
+ #[inline(always)]
+ fn cmpgt(self, other: Self) -> Self::Mask {
+ f32x4_gt(self, other)
+ }
+
+ #[inline(always)]
+ fn cmple(self, other: Self) -> Self::Mask {
+ f32x4_le(self, other)
+ }
+
+ #[inline(always)]
+ fn cmplt(self, other: Self) -> Self::Mask {
+ f32x4_lt(self, other)
+ }
+
+ #[inline(always)]
+ fn add(self, other: Self) -> Self {
+ f32x4_add(self, other)
+ }
+
+ #[inline(always)]
+ fn div(self, other: Self) -> Self {
+ f32x4_div(self, other)
+ }
+
+ #[inline(always)]
+ fn mul(self, other: Self) -> Self {
+ f32x4_mul(self, other)
+ }
+
+ #[inline(always)]
+ fn sub(self, other: Self) -> Self {
+ f32x4_sub(self, other)
+ }
+
+ #[inline(always)]
+ fn add_scalar(self, other: f32) -> Self {
+ f32x4_add(self, f32x4_splat(other))
+ }
+
+ #[inline(always)]
+ fn sub_scalar(self, other: f32) -> Self {
+ f32x4_sub(self, f32x4_splat(other))
+ }
+
+ #[inline(always)]
+ fn mul_scalar(self, other: f32) -> Self {
+ f32x4_mul(self, f32x4_splat(other))
+ }
+
+ #[inline(always)]
+ fn div_scalar(self, other: f32) -> Self {
+ f32x4_div(self, f32x4_splat(other))
+ }
+
+ #[inline(always)]
+ fn rem(self, other: Self) -> Self {
+ let n = f32x4_floor(f32x4_div(self, other));
+ f32x4_sub(self, f32x4_mul(n, other))
+ }
+
+ #[inline(always)]
+ fn rem_scalar(self, other: f32) -> Self {
+ self.rem(f32x4_splat(other))
+ }
+
+ #[inline(always)]
+ fn min(self, other: Self) -> Self {
+ f32x4_pmin(self, other)
+ }
+
+ #[inline(always)]
+ fn max(self, other: Self) -> Self {
+ f32x4_pmax(self, other)
+ }
+}
+
+impl Vector3<f32> for v128 {
+ #[inline(always)]
+ fn new(x: f32, y: f32, z: f32) -> Self {
+ f32x4(x, y, z, x)
+ }
+
+ #[inline(always)]
+ fn x(self) -> f32 {
+ f32x4_extract_lane::<0>(self)
+ }
+
+ #[inline(always)]
+ fn y(self) -> f32 {
+ f32x4_extract_lane::<1>(self)
+ }
+
+ #[inline(always)]
+ fn z(self) -> f32 {
+ f32x4_extract_lane::<2>(self)
+ }
+
+ #[inline(always)]
+ fn splat_x(self) -> Self {
+ i32x4_shuffle::<0, 0, 0, 0>(self, self)
+ }
+
+ #[inline(always)]
+ fn splat_y(self) -> Self {
+ i32x4_shuffle::<1, 1, 1, 1>(self, self)
+ }
+
+ #[inline(always)]
+ fn splat_z(self) -> Self {
+ i32x4_shuffle::<2, 2, 2, 2>(self, self)
+ }
+
+ #[inline(always)]
+ fn from_slice_unaligned(slice: &[f32]) -> Self {
+ Vector3::new(slice[0], slice[1], slice[2])
+ }
+
+ #[inline(always)]
+ fn write_to_slice_unaligned(self, slice: &mut [f32]) {
+ let xyz = self.as_ref_xyz();
+ slice[0] = xyz.x;
+ slice[1] = xyz.y;
+ slice[2] = xyz.z;
+ }
+
+ #[inline(always)]
+ fn as_ref_xyz(&self) -> &XYZ<f32> {
+ unsafe { &*(self as *const Self as *const XYZ<f32>) }
+ }
+
+ #[inline(always)]
+ fn as_mut_xyz(&mut self) -> &mut XYZ<f32> {
+ unsafe { &mut *(self as *mut Self as *mut XYZ<f32>) }
+ }
+
+ #[inline(always)]
+ fn into_xy(self) -> XY<f32> {
+ XY {
+ x: f32x4_extract_lane::<0>(self),
+ y: f32x4_extract_lane::<1>(self),
+ }
+ }
+
+ #[inline]
+ fn into_xyzw(self, w: f32) -> XYZW<f32> {
+ let v = f32x4_replace_lane::<3>(self, w);
+ unsafe { *(&v as *const v128 as *const XYZW<f32>) }
+ }
+
+ #[inline(always)]
+ fn from_array(a: [f32; 3]) -> Self {
+ Vector3::new(a[0], a[1], a[2])
+ }
+
+ #[inline(always)]
+ fn into_array(self) -> [f32; 3] {
+ let mut out: MaybeUninit<v128> = MaybeUninit::uninit();
+ unsafe {
+ v128_store(out.as_mut_ptr(), self);
+ *(&out.assume_init() as *const v128 as *const [f32; 3])
+ }
+ }
+
+ #[inline(always)]
+ fn from_tuple(t: (f32, f32, f32)) -> Self {
+ Vector3::new(t.0, t.1, t.2)
+ }
+
+ #[inline(always)]
+ fn into_tuple(self) -> (f32, f32, f32) {
+ let mut out: MaybeUninit<v128> = MaybeUninit::uninit();
+ unsafe {
+ v128_store(out.as_mut_ptr(), self);
+ *(&out.assume_init() as *const v128 as *const (f32, f32, f32))
+ }
+ }
+
+ #[inline]
+ fn min_element(self) -> f32 {
+ let v = self;
+ let v = f32x4_pmin(v, i32x4_shuffle::<2, 2, 1, 1>(v, v));
+ let v = f32x4_pmin(v, i32x4_shuffle::<1, 0, 0, 0>(v, v));
+ f32x4_extract_lane::<0>(v)
+ }
+
+ #[inline]
+ fn max_element(self) -> f32 {
+ let v = self;
+ let v = f32x4_pmax(v, i32x4_shuffle::<2, 2, 0, 0>(v, v));
+ let v = f32x4_pmax(v, i32x4_shuffle::<1, 0, 0, 0>(v, v));
+ f32x4_extract_lane::<0>(v)
+ }
+
+ #[inline]
+ fn dot(self, other: Self) -> f32 {
+ f32x4_extract_lane::<0>(dot3_in_x(self, other))
+ }
+
+ #[inline]
+ fn dot_into_vec(self, other: Self) -> Self {
+ let dot_in_x = dot3_in_x(self, other);
+ i32x4_shuffle::<0, 0, 0, 0>(dot_in_x, dot_in_x)
+ }
+
+ #[inline]
+ fn cross(self, other: Self) -> Self {
+ // x <- a.y*b.z - a.z*b.y
+ // y <- a.z*b.x - a.x*b.z
+ // z <- a.x*b.y - a.y*b.x
+ // We can save a shuffle by grouping it in this wacky order:
+ // (self.zxy() * other - self * other.zxy()).zxy()
+ let lhszxy = i32x4_shuffle::<2, 0, 1, 1>(self, self);
+ let rhszxy = i32x4_shuffle::<2, 0, 1, 1>(other, other);
+ let lhszxy_rhs = f32x4_mul(lhszxy, other);
+ let rhszxy_lhs = f32x4_mul(rhszxy, self);
+ let sub = f32x4_sub(lhszxy_rhs, rhszxy_lhs);
+ i32x4_shuffle::<2, 0, 1, 1>(sub, sub)
+ }
+
+ #[inline]
+ fn clamp(self, min: Self, max: Self) -> Self {
+ glam_assert!(
+ MaskVector3::all(min.cmple(max)),
+ "clamp: expected min <= max"
+ );
+ self.max(min).min(max)
+ }
+}
+
+impl Vector4<f32> for v128 {
+ #[inline(always)]
+ fn new(x: f32, y: f32, z: f32, w: f32) -> Self {
+ f32x4(x, y, z, w)
+ }
+
+ #[inline(always)]
+ fn x(self) -> f32 {
+ f32x4_extract_lane::<0>(self)
+ }
+
+ #[inline(always)]
+ fn y(self) -> f32 {
+ f32x4_extract_lane::<1>(self)
+ }
+
+ #[inline(always)]
+ fn z(self) -> f32 {
+ f32x4_extract_lane::<2>(self)
+ }
+
+ #[inline(always)]
+ fn w(self) -> f32 {
+ f32x4_extract_lane::<3>(self)
+ }
+
+ #[inline(always)]
+ fn splat_x(self) -> Self {
+ i32x4_shuffle::<0, 0, 0, 0>(self, self)
+ }
+
+ #[inline(always)]
+ fn splat_y(self) -> Self {
+ i32x4_shuffle::<1, 1, 1, 1>(self, self)
+ }
+
+ #[inline(always)]
+ fn splat_z(self) -> Self {
+ i32x4_shuffle::<2, 2, 2, 2>(self, self)
+ }
+
+ #[inline(always)]
+ fn splat_w(self) -> Self {
+ i32x4_shuffle::<3, 3, 3, 3>(self, self)
+ }
+
+ #[inline(always)]
+ fn from_slice_unaligned(slice: &[f32]) -> Self {
+ f32x4(slice[0], slice[1], slice[2], slice[3])
+ }
+
+ #[inline(always)]
+ fn write_to_slice_unaligned(self, slice: &mut [f32]) {
+ let xyzw = self.as_ref_xyzw();
+ slice[0] = xyzw.x;
+ slice[1] = xyzw.y;
+ slice[2] = xyzw.z;
+ slice[3] = xyzw.w;
+ }
+
+ #[inline(always)]
+ fn as_ref_xyzw(&self) -> &XYZW<f32> {
+ unsafe { &*(self as *const Self as *const XYZW<f32>) }
+ }
+
+ #[inline(always)]
+ fn as_mut_xyzw(&mut self) -> &mut XYZW<f32> {
+ unsafe { &mut *(self as *mut Self as *mut XYZW<f32>) }
+ }
+
+ #[inline(always)]
+ fn into_xy(self) -> XY<f32> {
+ XY {
+ x: f32x4_extract_lane::<0>(self),
+ y: f32x4_extract_lane::<1>(self),
+ }
+ }
+
+ #[inline(always)]
+ fn into_xyz(self) -> XYZ<f32> {
+ XYZ {
+ x: f32x4_extract_lane::<0>(self),
+ y: f32x4_extract_lane::<1>(self),
+ z: f32x4_extract_lane::<2>(self),
+ }
+ }
+
+ #[inline(always)]
+ fn from_array(a: [f32; 4]) -> Self {
+ Vector4::new(a[0], a[1], a[2], a[3])
+ }
+
+ #[inline(always)]
+ fn into_array(self) -> [f32; 4] {
+ let mut out: MaybeUninit<v128> = MaybeUninit::uninit();
+ unsafe {
+ v128_store(out.as_mut_ptr(), self);
+ *(&out.assume_init() as *const v128 as *const [f32; 4])
+ }
+ }
+
+ #[inline(always)]
+ fn from_tuple(t: (f32, f32, f32, f32)) -> Self {
+ Vector4::new(t.0, t.1, t.2, t.3)
+ }
+
+ #[inline(always)]
+ fn into_tuple(self) -> (f32, f32, f32, f32) {
+ let mut out: MaybeUninit<v128> = MaybeUninit::uninit();
+ unsafe {
+ v128_store(out.as_mut_ptr(), self);
+ *(&out.assume_init() as *const v128 as *const (f32, f32, f32, f32))
+ }
+ }
+
+ #[inline]
+ fn min_element(self) -> f32 {
+ let v = self;
+ let v = f32x4_pmin(v, i32x4_shuffle::<2, 3, 0, 0>(v, v));
+ let v = f32x4_pmin(v, i32x4_shuffle::<1, 0, 0, 0>(v, v));
+ f32x4_extract_lane::<0>(v)
+ }
+
+ #[inline]
+ fn max_element(self) -> f32 {
+ let v = self;
+ let v = f32x4_pmax(v, i32x4_shuffle::<2, 3, 0, 0>(v, v));
+ let v = f32x4_pmax(v, i32x4_shuffle::<1, 0, 0, 0>(v, v));
+ f32x4_extract_lane::<0>(v)
+ }
+
+ #[inline]
+ fn dot(self, other: Self) -> f32 {
+ f32x4_extract_lane::<0>(dot4_in_x(self, other))
+ }
+
+ #[inline]
+ fn dot_into_vec(self, other: Self) -> Self {
+ let dot_in_x = dot4_in_x(self, other);
+ i32x4_shuffle::<0, 0, 0, 0>(dot_in_x, dot_in_x)
+ }
+
+ #[inline]
+ fn clamp(self, min: Self, max: Self) -> Self {
+ glam_assert!(
+ MaskVector4::all(min.cmple(max)),
+ "clamp: expected min <= max"
+ );
+ self.max(min).min(max)
+ }
+}
+
+impl SignedVector<f32> for v128 {
+ #[inline(always)]
+ fn neg(self) -> Self {
+ f32x4_neg(self)
+ }
+}
+
+impl SignedVector3<f32> for v128 {
+ #[inline]
+ fn abs(self) -> Self {
+ f32x4_abs(self)
+ }
+
+ #[inline]
+ fn signum(self) -> Self {
+ const NEG_ONE: v128 = const_f32x4!([-1.0; 4]);
+ let mask = self.cmpge(Self::ZERO);
+ let result = Self::select(mask, Self::ONE, NEG_ONE);
+ let mask = f32x4_isnan(self);
+ Self::select(mask, self, result)
+ }
+}
+
+impl SignedVector4<f32> for v128 {
+ #[inline]
+ fn abs(self) -> Self {
+ f32x4_abs(self)
+ }
+
+ #[inline]
+ fn signum(self) -> Self {
+ const NEG_ONE: v128 = const_f32x4!([-1.0; 4]);
+ let mask = self.cmpge(Self::ZERO);
+ let result = Self::select(mask, Self::ONE, NEG_ONE);
+ let mask = f32x4_isnan(self);
+ Self::select(mask, self, result)
+ }
+}
+
+impl FloatVector3<f32> for v128 {
+ #[inline]
+ fn is_finite(self) -> bool {
+ let (x, y, z) = Vector3::into_tuple(self);
+ x.is_finite() && y.is_finite() && z.is_finite()
+ }
+
+ #[inline]
+ fn is_nan(self) -> bool {
+ MaskVector3::any(FloatVector3::is_nan_mask(self))
+ }
+
+ #[inline(always)]
+ fn is_nan_mask(self) -> Self::Mask {
+ f32x4_isnan(self)
+ }
+
+ #[inline]
+ fn floor(self) -> Self {
+ f32x4_floor(self)
+ }
+
+ #[inline]
+ fn ceil(self) -> Self {
+ f32x4_ceil(self)
+ }
+
+ #[inline]
+ fn round(self) -> Self {
+ // TODO: might differ to m128_round
+ f32x4_nearest(self)
+ }
+
+ #[inline(always)]
+ fn recip(self) -> Self {
+ f32x4_div(Self::ONE, self)
+ }
+
+ #[inline]
+ fn exp(self) -> Self {
+ let (x, y, z) = Vector3::into_tuple(self);
+ Vector3::new(x.exp(), y.exp(), z.exp())
+ }
+
+ #[inline]
+ fn powf(self, n: f32) -> Self {
+ let (x, y, z) = Vector3::into_tuple(self);
+ Vector3::new(x.powf(n), y.powf(n), z.powf(n))
+ }
+
+ #[inline]
+ fn length(self) -> f32 {
+ let dot = dot3_in_x(self, self);
+ f32x4_extract_lane::<0>(f32x4_sqrt(dot))
+ }
+
+ #[inline]
+ fn length_recip(self) -> f32 {
+ let dot = dot3_in_x(self, self);
+ f32x4_extract_lane::<0>(f32x4_div(Self::ONE, f32x4_sqrt(dot)))
+ }
+
+ #[inline]
+ fn normalize(self) -> Self {
+ let length = f32x4_sqrt(Vector3::dot_into_vec(self, self));
+ #[allow(clippy::let_and_return)]
+ let normalized = f32x4_div(self, length);
+ glam_assert!(FloatVector3::is_finite(normalized));
+ normalized
+ }
+}
+
+impl FloatVector4<f32> for v128 {
+ #[inline]
+ fn is_finite(self) -> bool {
+ let (x, y, z, w) = Vector4::into_tuple(self);
+ x.is_finite() && y.is_finite() && z.is_finite() && w.is_finite()
+ }
+
+ #[inline]
+ fn is_nan(self) -> bool {
+ MaskVector4::any(FloatVector4::is_nan_mask(self))
+ }
+
+ #[inline(always)]
+ fn is_nan_mask(self) -> Self::Mask {
+ f32x4_isnan(self)
+ }
+
+ #[inline]
+ fn floor(self) -> Self {
+ f32x4_floor(self)
+ }
+
+ #[inline]
+ fn ceil(self) -> Self {
+ f32x4_ceil(self)
+ }
+
+ #[inline]
+ fn round(self) -> Self {
+ f32x4_nearest(self)
+ }
+
+ #[inline(always)]
+ fn recip(self) -> Self {
+ f32x4_div(Self::ONE, self)
+ }
+
+ #[inline]
+ fn exp(self) -> Self {
+ let (x, y, z, w) = Vector4::into_tuple(self);
+ f32x4(x.exp(), y.exp(), z.exp(), w.exp())
+ }
+
+ #[inline]
+ fn powf(self, n: f32) -> Self {
+ let (x, y, z, w) = Vector4::into_tuple(self);
+ f32x4(x.powf(n), y.powf(n), z.powf(n), w.powf(n))
+ }
+
+ #[inline]
+ fn length(self) -> f32 {
+ let dot = dot4_in_x(self, self);
+ f32x4_extract_lane::<0>(f32x4_sqrt(dot))
+ }
+
+ #[inline]
+ fn length_recip(self) -> f32 {
+ let dot = dot4_in_x(self, self);
+ f32x4_extract_lane::<0>(f32x4_div(Self::ONE, f32x4_sqrt(dot)))
+ }
+
+ #[inline]
+ fn normalize(self) -> Self {
+ let dot = Vector4::dot_into_vec(self, self);
+ #[allow(clippy::let_and_return)]
+ let normalized = f32x4_div(self, f32x4_sqrt(dot));
+ glam_assert!(FloatVector4::is_finite(normalized));
+ normalized
+ }
+}
+
+impl From<XYZW<f32>> for v128 {
+ #[inline(always)]
+ fn from(v: XYZW<f32>) -> v128 {
+ f32x4(v.x, v.y, v.z, v.w)
+ }
+}
+
+impl From<XYZ<f32>> for v128 {
+ #[inline(always)]
+ fn from(v: XYZ<f32>) -> v128 {
+ f32x4(v.x, v.y, v.z, v.z)
+ }
+}
+
+impl From<XY<f32>> for v128 {
+ #[inline(always)]
+ fn from(v: XY<f32>) -> v128 {
+ f32x4(v.x, v.y, v.y, v.y)
+ }
+}
+
+impl From<v128> for XYZW<f32> {
+ #[inline(always)]
+ fn from(v: v128) -> XYZW<f32> {
+ let mut out: MaybeUninit<v128> = MaybeUninit::uninit();
+ unsafe {
+ v128_store(out.as_mut_ptr(), v);
+ *(&out.assume_init() as *const v128 as *const XYZW<f32>)
+ }
+ }
+}
+
+impl From<v128> for XYZ<f32> {
+ #[inline(always)]
+ fn from(v: v128) -> XYZ<f32> {
+ let mut out: MaybeUninit<v128> = MaybeUninit::uninit();
+ unsafe {
+ v128_store(out.as_mut_ptr(), v);
+ *(&out.assume_init() as *const v128 as *const XYZ<f32>)
+ }
+ }
+}
+
+impl From<v128> for XY<f32> {
+ #[inline(always)]
+ fn from(v: v128) -> XY<f32> {
+ let mut out: MaybeUninit<v128> = MaybeUninit::uninit();
+ unsafe {
+ v128_store(out.as_mut_ptr(), v);
+ *(&out.assume_init() as *const v128 as *const XY<f32>)
+ }
+ }
+}
diff --git a/src/euler.rs b/src/euler.rs
new file mode 100644
index 0000000..558400a
--- /dev/null
+++ b/src/euler.rs
@@ -0,0 +1,261 @@
+/*
+Conversion from quaternions to Euler rotation sequences.
+
+From: http://bediyap.com/programming/convert-quaternion-to-euler-rotations/
+*/
+
+use super::quat::{DQuat, Quat};
+use crate::core::traits::scalar::*;
+
+/// Euler rotation sequences.
+///
+/// The angles are applied starting from the right.
+/// E.g. XYZ will first apply the z-axis rotation.
+///
+/// YXZ can be used for yaw (y-axis), pitch (x-axis), roll (z-axis).
+///
+/// The two-axis rotations (e.g. ZYZ) are not fully tested and have to be treated with caution.
+#[derive(Debug, Clone, Copy)]
+pub enum EulerRot {
+ /// Intrinsic three-axis rotation ZYX
+ ZYX,
+ /// Intrinsic three-axis rotation ZXY
+ ZXY,
+ /// Intrinsic three-axis rotation YXZ
+ YXZ,
+ /// Intrinsic three-axis rotation YZX
+ YZX,
+ /// Intrinsic three-axis rotation XYZ
+ XYZ,
+ /// Intrinsic three-axis rotation XZY
+ XZY,
+
+ /// Intrinsic two-axis rotation ZYZ
+ #[deprecated(note = "Untested! Use at own risk!")]
+ ZYZ,
+ /// Intrinsic two-axis rotation ZXZ
+ #[deprecated(note = "Untested! Use at own risk!")]
+ ZXZ,
+ /// Intrinsic two-axis rotation YXY
+ #[deprecated(note = "Untested! Use at own risk!")]
+ YXY,
+ /// Intrinsic two-axis rotation YZY
+ #[deprecated(note = "Untested! Use at own risk!")]
+ YZY,
+ /// Intrinsic two-axis rotation XYX
+ #[deprecated(note = "Untested! Use at own risk!")]
+ XYX,
+ /// Intrinsic two-axis rotation XZX
+ #[deprecated(note = "Untested! Use at own risk!")]
+ XZX,
+}
+
+impl Default for EulerRot {
+ /// Default `YXZ` as yaw (y-axis), pitch (x-axis), roll (z-axis).
+ fn default() -> Self {
+ Self::YXZ
+ }
+}
+
+/// Conversion from quaternion to euler angles.
+pub trait EulerFromQuaternion<Q: Copy>: Sized + Copy {
+ type Output: FloatEx;
+ /// Compute the angle of the first axis (X-x-x)
+ fn first(self, q: Q) -> Self::Output;
+ /// Compute then angle of the second axis (x-X-x)
+ fn second(self, q: Q) -> Self::Output;
+ /// Compute then angle of the third axis (x-x-X)
+ fn third(self, q: Q) -> Self::Output;
+
+ /// Compute all angles of a rotation in the notation order
+ fn convert_quat(self, q: Q) -> (Self::Output, Self::Output, Self::Output) {
+ (self.first(q), self.second(q), self.third(q))
+ }
+}
+
+/// Conversion from euler angles to quaternion.
+pub trait EulerToQuaternion<T>: Copy {
+ type Output;
+ /// Create the rotation quaternion for the three angles of this euler rotation sequence.
+ fn new_quat(self, u: T, v: T, w: T) -> Self::Output;
+}
+
+/// Adds a atan2 that handles the negative zero case.
+/// Basically forces positive zero in the x-argument for atan2.
+pub trait Atan2Fixed<T = Self> {
+ fn atan2_fixed(self, other: T) -> T;
+}
+
+impl Atan2Fixed for f32 {
+ fn atan2_fixed(self, other: f32) -> f32 {
+ self.atan2(if other == 0.0f32 { 0.0f32 } else { other })
+ }
+}
+impl Atan2Fixed for f64 {
+ fn atan2_fixed(self, other: f64) -> f64 {
+ self.atan2(if other == 0.0f64 { 0.0f64 } else { other })
+ }
+}
+
+macro_rules! impl_from_quat {
+ ($t:ty, $quat:ident) => {
+ impl EulerFromQuaternion<$quat> for EulerRot {
+ type Output = $t;
+ fn first(self, q: $quat) -> $t {
+ use EulerRot::*;
+ match self {
+ ZYX => (Self::Output::TWO * (q.x * q.y + q.w * q.z))
+ .atan2(q.w * q.w + q.x * q.x - q.y * q.y - q.z * q.z),
+ ZXY => (-Self::Output::TWO * (q.x * q.y - q.w * q.z))
+ .atan2(q.w * q.w - q.x * q.x + q.y * q.y - q.z * q.z),
+ YXZ => (Self::Output::TWO * (q.x * q.z + q.w * q.y))
+ .atan2(q.w * q.w - q.x * q.x - q.y * q.y + q.z * q.z),
+ YZX => (-Self::Output::TWO * (q.x * q.z - q.w * q.y))
+ .atan2(q.w * q.w + q.x * q.x - q.y * q.y - q.z * q.z),
+ XYZ => (-Self::Output::TWO * (q.y * q.z - q.w * q.x))
+ .atan2(q.w * q.w - q.x * q.x - q.y * q.y + q.z * q.z),
+ XZY => (Self::Output::TWO * (q.y * q.z + q.w * q.x))
+ .atan2(q.w * q.w - q.x * q.x + q.y * q.y - q.z * q.z),
+ #[allow(deprecated)]
+ ZYZ => (Self::Output::TWO * (q.y * q.z + q.w * q.x))
+ .atan2_fixed(-Self::Output::TWO * (q.x * q.z - q.w * q.y)),
+ #[allow(deprecated)]
+ ZXZ => (Self::Output::TWO * (q.x * q.z - q.w * q.y))
+ .atan2_fixed(Self::Output::TWO * (q.y * q.z + q.w * q.x)),
+ #[allow(deprecated)]
+ YXY => (Self::Output::TWO * (q.x * q.y + q.w * q.z))
+ .atan2_fixed(-Self::Output::TWO * (q.y * q.z - q.w * q.x)),
+ #[allow(deprecated)]
+ YZY => (Self::Output::TWO * (q.y * q.z - q.w * q.x))
+ .atan2_fixed(Self::Output::TWO * (q.x * q.y + q.w * q.z)),
+ #[allow(deprecated)]
+ XYX => (Self::Output::TWO * (q.x * q.y - q.w * q.z))
+ .atan2_fixed(Self::Output::TWO * (q.x * q.z + q.w * q.y)),
+ #[allow(deprecated)]
+ XZX => (Self::Output::TWO * (q.x * q.z + q.w * q.y))
+ .atan2_fixed(-Self::Output::TWO * (q.x * q.y - q.w * q.z)),
+ }
+ }
+
+ fn second(self, q: $quat) -> $t {
+ use EulerRot::*;
+
+ /// Clamp number to range [-1,1](-1,1) for asin() and acos(), else NaN is possible.
+ #[inline(always)]
+ fn arc_clamp<T: FloatEx>(val: T) -> T {
+ NumEx::min(NumEx::max(val, T::NEG_ONE), T::ONE)
+ }
+
+ match self {
+ ZYX => arc_clamp(-Self::Output::TWO * (q.x * q.z - q.w * q.y)).asin(),
+ ZXY => arc_clamp(Self::Output::TWO * (q.y * q.z + q.w * q.x)).asin(),
+ YXZ => arc_clamp(-Self::Output::TWO * (q.y * q.z - q.w * q.x)).asin(),
+ YZX => arc_clamp(Self::Output::TWO * (q.x * q.y + q.w * q.z)).asin(),
+ XYZ => arc_clamp(Self::Output::TWO * (q.x * q.z + q.w * q.y)).asin(),
+ XZY => arc_clamp(-Self::Output::TWO * (q.x * q.y - q.w * q.z)).asin(),
+ #[allow(deprecated)]
+ ZYZ => arc_clamp(q.w * q.w - q.x * q.x - q.y * q.y + q.z * q.z).acos(),
+ #[allow(deprecated)]
+ ZXZ => arc_clamp(q.w * q.w - q.x * q.x - q.y * q.y + q.z * q.z).acos(),
+ #[allow(deprecated)]
+ YXY => arc_clamp(q.w * q.w - q.x * q.x + q.y * q.y - q.z * q.z).acos(),
+ #[allow(deprecated)]
+ YZY => arc_clamp(q.w * q.w - q.x * q.x + q.y * q.y - q.z * q.z).acos(),
+ #[allow(deprecated)]
+ XYX => arc_clamp(q.w * q.w + q.x * q.x - q.y * q.y - q.z * q.z).acos(),
+ #[allow(deprecated)]
+ XZX => arc_clamp(q.w * q.w + q.x * q.x - q.y * q.y - q.z * q.z).acos(),
+ }
+ }
+
+ fn third(self, q: $quat) -> $t {
+ use EulerRot::*;
+ #[allow(deprecated)]
+ match self {
+ ZYX => (Self::Output::TWO * (q.y * q.z + q.w * q.x))
+ .atan2(q.w * q.w - q.x * q.x - q.y * q.y + q.z * q.z),
+ ZXY => (-Self::Output::TWO * (q.x * q.z - q.w * q.y))
+ .atan2(q.w * q.w - q.x * q.x - q.y * q.y + q.z * q.z),
+ YXZ => (Self::Output::TWO * (q.x * q.y + q.w * q.z))
+ .atan2(q.w * q.w - q.x * q.x + q.y * q.y - q.z * q.z),
+ YZX => (-Self::Output::TWO * (q.y * q.z - q.w * q.x))
+ .atan2(q.w * q.w - q.x * q.x + q.y * q.y - q.z * q.z),
+ XYZ => (-Self::Output::TWO * (q.x * q.y - q.w * q.z))
+ .atan2(q.w * q.w + q.x * q.x - q.y * q.y - q.z * q.z),
+ XZY => (Self::Output::TWO * (q.x * q.z + q.w * q.y))
+ .atan2(q.w * q.w + q.x * q.x - q.y * q.y - q.z * q.z),
+ #[allow(deprecated)]
+ ZYZ => (Self::Output::TWO * (q.y * q.z - q.w * q.x))
+ .atan2_fixed(Self::Output::TWO * (q.x * q.z + q.w * q.y)),
+ #[allow(deprecated)]
+ ZXZ => (Self::Output::TWO * (q.x * q.z + q.w * q.y))
+ .atan2_fixed(-Self::Output::TWO * (q.y * q.z - q.w * q.x)),
+ #[allow(deprecated)]
+ YXY => (Self::Output::TWO * (q.x * q.y - q.w * q.z))
+ .atan2_fixed(Self::Output::TWO * (q.y * q.z + q.w * q.x)),
+ #[allow(deprecated)]
+ YZY => (Self::Output::TWO * (q.y * q.z + q.w * q.x))
+ .atan2_fixed(-Self::Output::TWO * (q.x * q.y - q.w * q.z)),
+ #[allow(deprecated)]
+ XYX => (Self::Output::TWO * (q.x * q.y + q.w * q.z))
+ .atan2_fixed(-Self::Output::TWO * (q.x * q.z - q.w * q.y)),
+ #[allow(deprecated)]
+ XZX => (Self::Output::TWO * (q.x * q.z - q.w * q.y))
+ .atan2_fixed(Self::Output::TWO * (q.x * q.y + q.w * q.z)),
+ }
+ }
+ }
+ // End - impl EulerFromQuaternion
+ };
+}
+
+macro_rules! impl_to_quat {
+ ($t:ty, $quat:ident) => {
+ impl EulerToQuaternion<$t> for EulerRot {
+ type Output = $quat;
+ #[inline(always)]
+ fn new_quat(self, u: $t, v: $t, w: $t) -> $quat {
+ use EulerRot::*;
+ #[inline(always)]
+ fn rot_x(a: $t) -> $quat {
+ $quat::from_rotation_x(a)
+ }
+ #[inline(always)]
+ fn rot_y(a: $t) -> $quat {
+ $quat::from_rotation_y(a)
+ }
+ #[inline(always)]
+ fn rot_z(a: $t) -> $quat {
+ $quat::from_rotation_z(a)
+ }
+ match self {
+ ZYX => rot_z(u) * rot_y(v) * rot_x(w),
+ ZXY => rot_z(u) * rot_x(v) * rot_y(w),
+ YXZ => rot_y(u) * rot_x(v) * rot_z(w),
+ YZX => rot_y(u) * rot_z(v) * rot_x(w),
+ XYZ => rot_x(u) * rot_y(v) * rot_z(w),
+ XZY => rot_x(u) * rot_z(v) * rot_y(w),
+ #[allow(deprecated)]
+ ZYZ => rot_z(u) * rot_y(v) * rot_z(w),
+ #[allow(deprecated)]
+ ZXZ => rot_z(u) * rot_x(v) * rot_z(w),
+ #[allow(deprecated)]
+ YXY => rot_y(u) * rot_x(v) * rot_y(w),
+ #[allow(deprecated)]
+ YZY => rot_y(u) * rot_z(v) * rot_y(w),
+ #[allow(deprecated)]
+ XYX => rot_x(u) * rot_y(v) * rot_x(w),
+ #[allow(deprecated)]
+ XZX => rot_x(u) * rot_z(v) * rot_x(w),
+ }
+ .normalize()
+ }
+ }
+ // End - impl EulerToQuaternion
+ };
+}
+
+impl_from_quat!(f32, Quat);
+impl_from_quat!(f64, DQuat);
+impl_to_quat!(f32, Quat);
+impl_to_quat!(f64, DQuat);
diff --git a/src/features/impl_approx.rs b/src/features/impl_approx.rs
new file mode 100644
index 0000000..49c3db6
--- /dev/null
+++ b/src/features/impl_approx.rs
@@ -0,0 +1,212 @@
+use crate::{
+ Affine2, Affine3A, DAffine2, DAffine3, DMat2, DMat3, DMat4, DQuat, DVec2, DVec3, DVec4, Mat2,
+ Mat3, Mat3A, Mat4, Quat, Vec2, Vec3, Vec3A, Vec4,
+};
+use approx::{AbsDiffEq, RelativeEq, UlpsEq};
+
+macro_rules! impl_approx_as_ref {
+ ($prim:ident, $type:ty) => {
+ impl AbsDiffEq for $type {
+ type Epsilon = <$prim as AbsDiffEq>::Epsilon;
+ fn default_epsilon() -> Self::Epsilon {
+ $prim::default_epsilon()
+ }
+ fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool {
+ self.as_ref().abs_diff_eq(other.as_ref(), epsilon)
+ }
+ }
+
+ impl RelativeEq for $type {
+ fn default_max_relative() -> Self::Epsilon {
+ $prim::default_max_relative()
+ }
+ fn relative_eq(
+ &self,
+ other: &Self,
+ epsilon: Self::Epsilon,
+ max_relative: Self::Epsilon,
+ ) -> bool {
+ self.as_ref()
+ .relative_eq(other.as_ref(), epsilon, max_relative)
+ }
+ }
+
+ impl UlpsEq for $type {
+ fn default_max_ulps() -> u32 {
+ $prim::default_max_ulps()
+ }
+ fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool {
+ self.as_ref().ulps_eq(other.as_ref(), epsilon, max_ulps)
+ }
+ }
+ };
+}
+
+macro_rules! impl_approx_xzy_axes {
+ ($prim:ident, $type:ty) => {
+ impl AbsDiffEq for $type {
+ type Epsilon = <$prim as AbsDiffEq>::Epsilon;
+ fn default_epsilon() -> Self::Epsilon {
+ $prim::default_epsilon()
+ }
+ fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool {
+ AbsDiffEq::abs_diff_eq(&self.x_axis, &other.x_axis, epsilon)
+ && AbsDiffEq::abs_diff_eq(&self.y_axis, &other.y_axis, epsilon)
+ && AbsDiffEq::abs_diff_eq(&self.z_axis, &other.z_axis, epsilon)
+ }
+ }
+
+ impl RelativeEq for $type {
+ fn default_max_relative() -> Self::Epsilon {
+ $prim::default_max_relative()
+ }
+ fn relative_eq(
+ &self,
+ other: &Self,
+ epsilon: Self::Epsilon,
+ max_relative: Self::Epsilon,
+ ) -> bool {
+ RelativeEq::relative_eq(&self.x_axis, &other.x_axis, epsilon, max_relative)
+ && RelativeEq::relative_eq(&self.y_axis, &other.y_axis, epsilon, max_relative)
+ && RelativeEq::relative_eq(&self.z_axis, &other.z_axis, epsilon, max_relative)
+ }
+ }
+
+ impl UlpsEq for $type {
+ fn default_max_ulps() -> u32 {
+ $prim::default_max_ulps()
+ }
+ fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool {
+ UlpsEq::ulps_eq(&self.x_axis, &other.x_axis, epsilon, max_ulps)
+ && UlpsEq::ulps_eq(&self.y_axis, &other.y_axis, epsilon, max_ulps)
+ && UlpsEq::ulps_eq(&self.z_axis, &other.z_axis, epsilon, max_ulps)
+ }
+ }
+ };
+}
+
+macro_rules! impl_approx_xzyw_axes {
+ ($prim:ident, $type:ty) => {
+ impl AbsDiffEq for $type {
+ type Epsilon = <$prim as AbsDiffEq>::Epsilon;
+ fn default_epsilon() -> Self::Epsilon {
+ $prim::default_epsilon()
+ }
+ fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool {
+ AbsDiffEq::abs_diff_eq(&self.x_axis, &other.x_axis, epsilon)
+ && AbsDiffEq::abs_diff_eq(&self.y_axis, &other.y_axis, epsilon)
+ && AbsDiffEq::abs_diff_eq(&self.z_axis, &other.z_axis, epsilon)
+ && AbsDiffEq::abs_diff_eq(&self.w_axis, &other.w_axis, epsilon)
+ }
+ }
+
+ impl RelativeEq for $type {
+ fn default_max_relative() -> Self::Epsilon {
+ $prim::default_max_relative()
+ }
+ fn relative_eq(
+ &self,
+ other: &Self,
+ epsilon: Self::Epsilon,
+ max_relative: Self::Epsilon,
+ ) -> bool {
+ RelativeEq::relative_eq(&self.x_axis, &other.x_axis, epsilon, max_relative)
+ && RelativeEq::relative_eq(&self.y_axis, &other.y_axis, epsilon, max_relative)
+ && RelativeEq::relative_eq(&self.z_axis, &other.z_axis, epsilon, max_relative)
+ && RelativeEq::relative_eq(&self.w_axis, &other.w_axis, epsilon, max_relative)
+ }
+ }
+
+ impl UlpsEq for $type {
+ fn default_max_ulps() -> u32 {
+ $prim::default_max_ulps()
+ }
+ fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool {
+ UlpsEq::ulps_eq(&self.x_axis, &other.x_axis, epsilon, max_ulps)
+ && UlpsEq::ulps_eq(&self.y_axis, &other.y_axis, epsilon, max_ulps)
+ && UlpsEq::ulps_eq(&self.z_axis, &other.z_axis, epsilon, max_ulps)
+ && UlpsEq::ulps_eq(&self.w_axis, &other.w_axis, epsilon, max_ulps)
+ }
+ }
+ };
+}
+
+impl_approx_as_ref!(f32, Mat2);
+impl_approx_as_ref!(f32, Mat3);
+impl_approx_as_ref!(f32, Mat4);
+impl_approx_as_ref!(f32, Quat);
+impl_approx_as_ref!(f32, Vec2);
+impl_approx_as_ref!(f32, Vec3);
+impl_approx_as_ref!(f32, Vec4);
+impl_approx_as_ref!(f32, Vec3A);
+
+impl_approx_xzy_axes!(f32, Affine2);
+impl_approx_xzyw_axes!(f32, Affine3A);
+impl_approx_xzy_axes!(f32, Mat3A);
+
+impl_approx_xzy_axes!(f64, DAffine2);
+impl_approx_xzyw_axes!(f64, DAffine3);
+impl_approx_as_ref!(f64, DMat2);
+impl_approx_as_ref!(f64, DMat3);
+impl_approx_as_ref!(f64, DMat4);
+impl_approx_as_ref!(f64, DQuat);
+impl_approx_as_ref!(f64, DVec2);
+impl_approx_as_ref!(f64, DVec3);
+impl_approx_as_ref!(f64, DVec4);
+
+#[cfg(test)]
+mod test {
+ use crate::*;
+ use approx::*;
+
+ macro_rules! impl_approx_test {
+ ($prim:ident, $type:ident, $ones:expr) => {
+ let one_eps = $ones * $type::default_epsilon();
+ let two_eps = one_eps + one_eps;
+
+ let one_ulp = $ones * $prim::from_bits($prim::to_bits(1.0) + 1);
+ let four_ulp = $ones * $prim::from_bits($prim::to_bits(1.0) + 16);
+
+ approx::assert_abs_diff_eq!($ones, $ones);
+ approx::assert_abs_diff_eq!($ones, $ones + one_eps);
+ approx::assert_abs_diff_eq!($ones, $ones - one_eps);
+
+ approx::assert_abs_diff_ne!($ones, $ones + two_eps);
+ approx::assert_abs_diff_ne!($ones, $ones - two_eps);
+
+ approx::assert_relative_eq!($ones, $ones);
+ approx::assert_relative_ne!($ones, $ones - $ones);
+
+ // defaults to 4 ulps and I have no idea how to pass other parameters to this macro :)
+ approx::assert_ulps_eq!($ones, one_ulp);
+ approx::assert_ulps_ne!($ones, four_ulp);
+ };
+ ($prim:ident, $type:ident) => {
+ impl_approx_test!($prim, $type, $type::ONE)
+ };
+ }
+
+ #[test]
+ fn test_approx() {
+ const ONESF32: [f32; 16] = [1.0; 16];
+
+ impl_approx_test!(f32, Vec2);
+ impl_approx_test!(f32, Vec3);
+ impl_approx_test!(f32, Vec3A);
+ impl_approx_test!(f32, Vec4);
+ impl_approx_test!(f32, Quat, Quat::from_slice(&ONESF32));
+ impl_approx_test!(f32, Mat2, Mat2::from_cols_slice(&ONESF32));
+ impl_approx_test!(f32, Mat3, Mat3::from_cols_slice(&ONESF32));
+ impl_approx_test!(f32, Mat3A, Mat3A::from_cols_slice(&ONESF32));
+ impl_approx_test!(f32, Mat4, Mat4::from_cols_slice(&ONESF32));
+
+ const ONESF64: [f64; 16] = [1.0; 16];
+ impl_approx_test!(f64, DVec2);
+ impl_approx_test!(f64, DVec3);
+ impl_approx_test!(f64, DVec4);
+ impl_approx_test!(f64, DQuat, DQuat::from_slice(&ONESF64));
+ impl_approx_test!(f64, DMat2, DMat2::from_cols_slice(&ONESF64));
+ impl_approx_test!(f64, DMat3, DMat3::from_cols_slice(&ONESF64));
+ impl_approx_test!(f64, DMat4, DMat4::from_cols_slice(&ONESF64));
+ }
+}
diff --git a/src/features/impl_bytemuck.rs b/src/features/impl_bytemuck.rs
new file mode 100644
index 0000000..bed7ecd
--- /dev/null
+++ b/src/features/impl_bytemuck.rs
@@ -0,0 +1,98 @@
+use crate::{
+ DMat2, DMat3, DMat4, DQuat, DVec2, DVec3, DVec4, IVec2, IVec3, IVec4, Mat2, Mat3, Mat4, Quat,
+ UVec2, UVec3, UVec4, Vec2, Vec3, Vec4,
+};
+use bytemuck::{Pod, Zeroable};
+
+unsafe impl Pod for Mat2 {}
+unsafe impl Zeroable for Mat2 {}
+unsafe impl Pod for Mat3 {}
+unsafe impl Zeroable for Mat3 {}
+unsafe impl Pod for Mat4 {}
+unsafe impl Zeroable for Mat4 {}
+
+unsafe impl Pod for Quat {}
+unsafe impl Zeroable for Quat {}
+
+unsafe impl Pod for Vec2 {}
+unsafe impl Zeroable for Vec2 {}
+unsafe impl Pod for Vec3 {}
+unsafe impl Zeroable for Vec3 {}
+unsafe impl Pod for Vec4 {}
+unsafe impl Zeroable for Vec4 {}
+
+unsafe impl Pod for DMat2 {}
+unsafe impl Zeroable for DMat2 {}
+unsafe impl Pod for DMat3 {}
+unsafe impl Zeroable for DMat3 {}
+unsafe impl Pod for DMat4 {}
+unsafe impl Zeroable for DMat4 {}
+
+unsafe impl Pod for DQuat {}
+unsafe impl Zeroable for DQuat {}
+
+unsafe impl Pod for DVec2 {}
+unsafe impl Zeroable for DVec2 {}
+unsafe impl Pod for DVec3 {}
+unsafe impl Zeroable for DVec3 {}
+unsafe impl Pod for DVec4 {}
+unsafe impl Zeroable for DVec4 {}
+
+unsafe impl Pod for IVec2 {}
+unsafe impl Zeroable for IVec2 {}
+unsafe impl Pod for IVec3 {}
+unsafe impl Zeroable for IVec3 {}
+unsafe impl Pod for IVec4 {}
+unsafe impl Zeroable for IVec4 {}
+
+unsafe impl Pod for UVec2 {}
+unsafe impl Zeroable for UVec2 {}
+unsafe impl Pod for UVec3 {}
+unsafe impl Zeroable for UVec3 {}
+unsafe impl Pod for UVec4 {}
+unsafe impl Zeroable for UVec4 {}
+
+#[cfg(test)]
+mod test {
+ use crate::{
+ DMat2, DMat3, DMat4, DQuat, DVec2, DVec3, DVec4, IVec2, IVec3, IVec4, Mat2, Mat3, Mat4,
+ Quat, UVec2, UVec3, UVec4, Vec2, Vec3, Vec4,
+ };
+ use core::mem;
+
+ macro_rules! test_t {
+ ($name:ident, $t:ty) => {
+ #[test]
+ fn $name() {
+ let t = <$t>::default();
+ let b = bytemuck::bytes_of(&t);
+ assert_eq!(t.as_ref().as_ptr() as usize, b.as_ptr() as usize);
+ assert_eq!(b.len(), mem::size_of_val(&t));
+ }
+ };
+ }
+
+ test_t!(mat2, Mat2);
+ test_t!(mat3, Mat3);
+ test_t!(mat4, Mat4);
+ test_t!(quat, Quat);
+ test_t!(vec2, Vec2);
+ test_t!(vec3, Vec3);
+ test_t!(vec4, Vec4);
+
+ test_t!(dmat2, DMat2);
+ test_t!(dmat3, DMat3);
+ test_t!(dmat4, DMat4);
+ test_t!(dquat, DQuat);
+ test_t!(dvec2, DVec2);
+ test_t!(dvec3, DVec3);
+ test_t!(dvec4, DVec4);
+
+ test_t!(ivec2, IVec2);
+ test_t!(ivec3, IVec3);
+ test_t!(ivec4, IVec4);
+
+ test_t!(uvec2, UVec2);
+ test_t!(uvec3, UVec3);
+ test_t!(uvec4, UVec4);
+}
diff --git a/src/features/impl_mint.rs b/src/features/impl_mint.rs
new file mode 100644
index 0000000..5c7670b
--- /dev/null
+++ b/src/features/impl_mint.rs
@@ -0,0 +1,528 @@
+use mint::IntoMint;
+
+use crate::{
+ DMat2, DMat3, DMat4, DQuat, DVec2, DVec3, DVec4, IVec2, IVec3, IVec4, Mat2, Mat3, Mat3A, Mat4,
+ Quat, UVec2, UVec3, UVec4, Vec2, Vec3, Vec3A, Vec4,
+};
+
+macro_rules! impl_vec_types {
+ ($t:ty, $vec2:ty, $vec3:ty, $vec4:ty) => {
+ impl From<mint::Point2<$t>> for $vec2 {
+ fn from(v: mint::Point2<$t>) -> Self {
+ Self::new(v.x, v.y)
+ }
+ }
+
+ impl From<$vec2> for mint::Point2<$t> {
+ fn from(v: $vec2) -> Self {
+ Self { x: v.x, y: v.y }
+ }
+ }
+
+ impl From<mint::Vector2<$t>> for $vec2 {
+ fn from(v: mint::Vector2<$t>) -> Self {
+ Self::new(v.x, v.y)
+ }
+ }
+
+ impl From<$vec2> for mint::Vector2<$t> {
+ fn from(v: $vec2) -> Self {
+ Self { x: v.x, y: v.y }
+ }
+ }
+
+ impl IntoMint for $vec2 {
+ type MintType = mint::Vector2<$t>;
+ }
+
+ impl From<mint::Point3<$t>> for $vec3 {
+ fn from(v: mint::Point3<$t>) -> Self {
+ Self::new(v.x, v.y, v.z)
+ }
+ }
+
+ impl From<$vec3> for mint::Point3<$t> {
+ fn from(v: $vec3) -> Self {
+ Self {
+ x: v.x,
+ y: v.y,
+ z: v.z,
+ }
+ }
+ }
+
+ impl From<mint::Vector3<$t>> for $vec3 {
+ fn from(v: mint::Vector3<$t>) -> Self {
+ Self::new(v.x, v.y, v.z)
+ }
+ }
+
+ impl From<$vec3> for mint::Vector3<$t> {
+ fn from(v: $vec3) -> Self {
+ Self {
+ x: v.x,
+ y: v.y,
+ z: v.z,
+ }
+ }
+ }
+
+ impl IntoMint for $vec3 {
+ type MintType = mint::Vector3<$t>;
+ }
+
+ impl From<mint::Vector4<$t>> for $vec4 {
+ fn from(v: mint::Vector4<$t>) -> Self {
+ Self::new(v.x, v.y, v.z, v.w)
+ }
+ }
+
+ impl From<$vec4> for mint::Vector4<$t> {
+ fn from(v: $vec4) -> Self {
+ Self {
+ x: v.x,
+ y: v.y,
+ z: v.z,
+ w: v.w,
+ }
+ }
+ }
+
+ impl IntoMint for $vec4 {
+ type MintType = mint::Vector4<$t>;
+ }
+ };
+}
+
+macro_rules! impl_float_types {
+ ($t:ty, $mat2:ty, $mat3:ty, $mat4:ty, $quat:ty, $vec2:ty, $vec3:ty, $vec4:ty) => {
+ impl_vec_types!($t, $vec2, $vec3, $vec4);
+
+ impl From<mint::Quaternion<$t>> for $quat {
+ fn from(q: mint::Quaternion<$t>) -> Self {
+ Self::from_xyzw(q.v.x, q.v.y, q.v.z, q.s)
+ }
+ }
+
+ impl From<$quat> for mint::Quaternion<$t> {
+ fn from(q: $quat) -> Self {
+ Self {
+ s: q.w,
+ v: mint::Vector3 {
+ x: q.x,
+ y: q.y,
+ z: q.z,
+ },
+ }
+ }
+ }
+
+ impl IntoMint for $quat {
+ type MintType = mint::Quaternion<$t>;
+ }
+
+ impl From<mint::RowMatrix2<$t>> for $mat2 {
+ fn from(m: mint::RowMatrix2<$t>) -> Self {
+ Self::from_cols(m.x.into(), m.y.into()).transpose()
+ }
+ }
+
+ impl From<$mat2> for mint::RowMatrix2<$t> {
+ fn from(m: $mat2) -> Self {
+ let mt = m.transpose();
+ Self {
+ x: mt.x_axis.into(),
+ y: mt.y_axis.into(),
+ }
+ }
+ }
+
+ impl From<mint::ColumnMatrix2<$t>> for $mat2 {
+ fn from(m: mint::ColumnMatrix2<$t>) -> Self {
+ Self::from_cols(m.x.into(), m.y.into())
+ }
+ }
+
+ impl From<$mat2> for mint::ColumnMatrix2<$t> {
+ fn from(m: $mat2) -> Self {
+ Self {
+ x: m.x_axis.into(),
+ y: m.y_axis.into(),
+ }
+ }
+ }
+
+ impl IntoMint for $mat2 {
+ type MintType = mint::ColumnMatrix2<$t>;
+ }
+
+ impl From<mint::RowMatrix3<$t>> for $mat3 {
+ fn from(m: mint::RowMatrix3<$t>) -> Self {
+ Self::from_cols(m.x.into(), m.y.into(), m.z.into()).transpose()
+ }
+ }
+
+ impl From<$mat3> for mint::RowMatrix3<$t> {
+ fn from(m: $mat3) -> Self {
+ let mt = m.transpose();
+ Self {
+ x: mt.x_axis.into(),
+ y: mt.y_axis.into(),
+ z: mt.z_axis.into(),
+ }
+ }
+ }
+
+ impl From<mint::ColumnMatrix3<$t>> for $mat3 {
+ fn from(m: mint::ColumnMatrix3<$t>) -> Self {
+ Self::from_cols(m.x.into(), m.y.into(), m.z.into())
+ }
+ }
+
+ impl From<$mat3> for mint::ColumnMatrix3<$t> {
+ fn from(m: $mat3) -> Self {
+ Self {
+ x: m.x_axis.into(),
+ y: m.y_axis.into(),
+ z: m.z_axis.into(),
+ }
+ }
+ }
+
+ impl IntoMint for $mat3 {
+ type MintType = mint::ColumnMatrix3<$t>;
+ }
+
+ impl From<mint::RowMatrix4<$t>> for $mat4 {
+ fn from(m: mint::RowMatrix4<$t>) -> Self {
+ Self::from_cols(m.x.into(), m.y.into(), m.z.into(), m.w.into()).transpose()
+ }
+ }
+
+ impl From<$mat4> for mint::RowMatrix4<$t> {
+ fn from(m: $mat4) -> Self {
+ let mt = m.transpose();
+ Self {
+ x: mt.x_axis.into(),
+ y: mt.y_axis.into(),
+ z: mt.z_axis.into(),
+ w: mt.w_axis.into(),
+ }
+ }
+ }
+
+ impl From<mint::ColumnMatrix4<$t>> for $mat4 {
+ fn from(m: mint::ColumnMatrix4<$t>) -> Self {
+ Self::from_cols(m.x.into(), m.y.into(), m.z.into(), m.w.into())
+ }
+ }
+
+ impl From<$mat4> for mint::ColumnMatrix4<$t> {
+ fn from(m: $mat4) -> Self {
+ Self {
+ x: m.x_axis.into(),
+ y: m.y_axis.into(),
+ z: m.z_axis.into(),
+ w: m.w_axis.into(),
+ }
+ }
+ }
+
+ impl IntoMint for $mat4 {
+ type MintType = mint::ColumnMatrix4<$t>;
+ }
+ };
+}
+
+impl From<mint::Point3<f32>> for Vec3A {
+ fn from(v: mint::Point3<f32>) -> Self {
+ Self::new(v.x, v.y, v.z)
+ }
+}
+
+impl From<Vec3A> for mint::Point3<f32> {
+ fn from(v: Vec3A) -> Self {
+ Self {
+ x: v.x,
+ y: v.y,
+ z: v.z,
+ }
+ }
+}
+
+impl From<mint::Vector3<f32>> for Vec3A {
+ fn from(v: mint::Vector3<f32>) -> Self {
+ Self::new(v.x, v.y, v.z)
+ }
+}
+
+impl From<Vec3A> for mint::Vector3<f32> {
+ fn from(v: Vec3A) -> Self {
+ Self {
+ x: v.x,
+ y: v.y,
+ z: v.z,
+ }
+ }
+}
+
+impl IntoMint for Vec3A {
+ type MintType = mint::Vector3<f32>;
+}
+
+impl From<mint::RowMatrix3<f32>> for Mat3A {
+ fn from(m: mint::RowMatrix3<f32>) -> Self {
+ Self::from_cols(m.x.into(), m.y.into(), m.z.into()).transpose()
+ }
+}
+
+impl From<Mat3A> for mint::RowMatrix3<f32> {
+ fn from(m: Mat3A) -> Self {
+ let mt = m.transpose();
+ Self {
+ x: mt.x_axis.into(),
+ y: mt.y_axis.into(),
+ z: mt.z_axis.into(),
+ }
+ }
+}
+
+impl From<mint::ColumnMatrix3<f32>> for Mat3A {
+ fn from(m: mint::ColumnMatrix3<f32>) -> Self {
+ Self::from_cols(m.x.into(), m.y.into(), m.z.into())
+ }
+}
+
+impl From<Mat3A> for mint::ColumnMatrix3<f32> {
+ fn from(m: Mat3A) -> Self {
+ Self {
+ x: m.x_axis.into(),
+ y: m.y_axis.into(),
+ z: m.z_axis.into(),
+ }
+ }
+}
+
+impl IntoMint for Mat3A {
+ type MintType = mint::ColumnMatrix3<f32>;
+}
+
+impl_float_types!(f32, Mat2, Mat3, Mat4, Quat, Vec2, Vec3, Vec4);
+impl_float_types!(f64, DMat2, DMat3, DMat4, DQuat, DVec2, DVec3, DVec4);
+impl_vec_types!(i32, IVec2, IVec3, IVec4);
+impl_vec_types!(u32, UVec2, UVec3, UVec4);
+
+#[cfg(test)]
+mod test {
+ macro_rules! impl_vec_tests {
+ ($t:ty, $vec2:ident, $vec3:ident, $vec4:ident) => {
+ use crate::{$vec2, $vec3, $vec4};
+ use mint;
+
+ #[test]
+ fn test_point2() {
+ let m = mint::Point2 {
+ x: 1 as $t,
+ y: 2 as $t,
+ };
+ let g = $vec2::from(m);
+ assert_eq!(g, $vec2::new(1 as $t, 2 as $t));
+ assert_eq!(m, g.into());
+ }
+
+ #[test]
+ fn test_point3() {
+ let m = mint::Point3 {
+ x: 1 as $t,
+ y: 2 as $t,
+ z: 3 as $t,
+ };
+ let g = $vec3::from(m);
+ assert_eq!(g, $vec3::new(1 as $t, 2 as $t, 3 as $t));
+ assert_eq!(m, g.into());
+ }
+
+ #[test]
+ fn test_vector2() {
+ let m = mint::Vector2 {
+ x: 1 as $t,
+ y: 2 as $t,
+ };
+ let g = $vec2::from(m);
+ assert_eq!(g, $vec2::new(1 as $t, 2 as $t));
+ assert_eq!(m, g.into());
+ }
+
+ #[test]
+ fn test_vector3() {
+ let m = mint::Vector3 {
+ x: 1 as $t,
+ y: 2 as $t,
+ z: 3 as $t,
+ };
+ let g = $vec3::from(m);
+ assert_eq!(g, $vec3::new(1 as $t, 2 as $t, 3 as $t));
+ assert_eq!(m, g.into());
+ }
+
+ #[test]
+ fn test_vector4() {
+ let m = mint::Vector4 {
+ x: 1 as $t,
+ y: 2 as $t,
+ z: 3 as $t,
+ w: 4 as $t,
+ };
+ let g = $vec4::from(m);
+ assert_eq!(g, $vec4::new(1 as $t, 2 as $t, 3 as $t, 4 as $t));
+ assert_eq!(m, g.into());
+ }
+ };
+ }
+
+ macro_rules! impl_float_tests {
+ ($t:ty, $mat2:ident, $mat3:ident, $mat4:ident, $quat:ident, $vec2:ident, $vec3:ident, $vec4:ident) => {
+ impl_vec_tests!($t, $vec2, $vec3, $vec4);
+
+ use crate::{$mat2, $mat3, $mat4, $quat};
+
+ #[test]
+ fn test_quaternion() {
+ let m = mint::Quaternion {
+ v: mint::Vector3 {
+ x: 1.0,
+ y: 2.0,
+ z: 3.0,
+ },
+ s: 4.0,
+ };
+ let g = $quat::from(m);
+ assert_eq!(g, $quat::from_xyzw(1.0, 2.0, 3.0, 4.0));
+ assert_eq!(m, g.into());
+ }
+
+ #[test]
+ fn test_matrix2() {
+ let g = $mat2::from_cols_array_2d(&[[1.0, 2.0], [3.0, 4.0]]);
+ let m = mint::ColumnMatrix2::from(g);
+ assert_eq!(g, $mat2::from(m));
+ let mt = mint::RowMatrix2::from(g);
+ assert_eq!(mt, mint::RowMatrix2::from([[1.0, 3.0], [2.0, 4.0]]));
+ assert_eq!(g, $mat2::from(mt));
+ }
+
+ #[test]
+ fn test_matrix3() {
+ let g =
+ $mat3::from_cols_array_2d(&[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]);
+ let m = mint::ColumnMatrix3::from(g);
+ assert_eq!(g, $mat3::from(m));
+ let mt = mint::RowMatrix3::from(g);
+ assert_eq!(
+ mt,
+ mint::RowMatrix3::from([[1.0, 4.0, 7.0], [2.0, 5.0, 8.0], [3.0, 6.0, 9.0]])
+ );
+ assert_eq!(g, $mat3::from(mt));
+ }
+
+ #[test]
+ fn test_matrix4() {
+ let g = $mat4::from_cols_array_2d(&[
+ [1.0, 2.0, 3.0, 4.0],
+ [5.0, 6.0, 7.0, 8.0],
+ [9.0, 10.0, 11.0, 12.0],
+ [13.0, 14.0, 15.0, 16.0],
+ ]);
+ let m = mint::ColumnMatrix4::from(g);
+ assert_eq!(g, $mat4::from(m));
+ let mt = mint::RowMatrix4::from(g);
+ assert_eq!(
+ mt,
+ mint::RowMatrix4::from([
+ [1.0, 5.0, 9.0, 13.0],
+ [2.0, 6.0, 10.0, 14.0],
+ [3.0, 7.0, 11.0, 15.0],
+ [4.0, 8.0, 12.0, 16.0]
+ ])
+ );
+ assert_eq!(g, $mat4::from(mt));
+ }
+ };
+ }
+
+ mod f32 {
+ impl_float_tests!(f32, Mat2, Mat3, Mat4, Quat, Vec2, Vec3, Vec4);
+
+ #[test]
+ fn test_point3a() {
+ use crate::Vec3A;
+ let m = mint::Point3 {
+ x: 1.0,
+ y: 2.0,
+ z: 3.0,
+ };
+ let g = Vec3A::from(m);
+ assert_eq!(g, Vec3A::new(1.0, 2.0, 3.0));
+ assert_eq!(m, g.into());
+ }
+
+ #[test]
+ fn test_vector3a() {
+ use crate::Vec3A;
+ let m = mint::Vector3 {
+ x: 1.0,
+ y: 2.0,
+ z: 3.0,
+ };
+ let g = Vec3A::from(m);
+ assert_eq!(g, Vec3A::new(1.0, 2.0, 3.0));
+ assert_eq!(m, g.into());
+ }
+
+ #[test]
+ fn test_mat3a_col_major() {
+ use crate::Mat3A;
+ let m = mint::ColumnMatrix3 {
+ x: [0.0, 1.0, 2.0].into(),
+ y: [3.0, 4.0, 5.0].into(),
+ z: [6.0, 7.0, 8.0].into(),
+ };
+ let expected = Mat3A::from_cols(
+ [0.0, 1.0, 2.0].into(),
+ [3.0, 4.0, 5.0].into(),
+ [6.0, 7.0, 8.0].into(),
+ );
+ assert_eq!(expected, m.into());
+ assert_eq!(m, expected.into());
+ }
+
+ #[test]
+ fn test_mat3a_row_major() {
+ use crate::Mat3A;
+ let m = mint::RowMatrix3 {
+ x: [0.0, 1.0, 2.0].into(),
+ y: [3.0, 4.0, 5.0].into(),
+ z: [6.0, 7.0, 8.0].into(),
+ };
+ let expected = Mat3A::from_cols(
+ [0.0, 3.0, 6.0].into(),
+ [1.0, 4.0, 7.0].into(),
+ [2.0, 5.0, 8.0].into(),
+ );
+ assert_eq!(expected, m.into());
+ assert_eq!(m, expected.into());
+ }
+ }
+
+ mod f64 {
+ impl_float_tests!(f64, DMat2, DMat3, DMat4, DQuat, DVec2, DVec3, DVec4);
+ }
+
+ mod i32 {
+ impl_vec_tests!(i32, IVec2, IVec3, IVec4);
+ }
+
+ mod u32 {
+ impl_vec_tests!(u32, UVec2, UVec3, UVec4);
+ }
+}
diff --git a/src/features/impl_rand.rs b/src/features/impl_rand.rs
new file mode 100644
index 0000000..d3f33c2
--- /dev/null
+++ b/src/features/impl_rand.rs
@@ -0,0 +1,199 @@
+macro_rules! impl_vec_types {
+ ($t:ty, $vec2:ident, $vec3:ident, $vec4:ident) => {
+ impl Distribution<$vec2> for Standard {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $vec2 {
+ rng.gen::<[$t; 2]>().into()
+ }
+ }
+
+ impl Distribution<$vec3> for Standard {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $vec3 {
+ rng.gen::<[$t; 3]>().into()
+ }
+ }
+
+ impl Distribution<$vec4> for Standard {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $vec4 {
+ rng.gen::<[$t; 4]>().into()
+ }
+ }
+
+ #[test]
+ fn test_vec2_rand() {
+ use rand::{Rng, SeedableRng};
+ use rand_xoshiro::Xoshiro256Plus;
+ let mut rng1 = Xoshiro256Plus::seed_from_u64(0);
+ let a: ($t, $t) = rng1.gen();
+ let mut rng2 = Xoshiro256Plus::seed_from_u64(0);
+ let b: $vec2 = rng2.gen();
+ assert_eq!(a, b.into());
+ }
+
+ #[test]
+ fn test_vec3_rand() {
+ use rand::{Rng, SeedableRng};
+ use rand_xoshiro::Xoshiro256Plus;
+ let mut rng1 = Xoshiro256Plus::seed_from_u64(0);
+ let a: ($t, $t, $t) = rng1.gen();
+ let mut rng2 = Xoshiro256Plus::seed_from_u64(0);
+ let b: $vec3 = rng2.gen();
+ assert_eq!(a, b.into());
+ }
+
+ #[test]
+ fn test_vec4_rand() {
+ use rand::{Rng, SeedableRng};
+ use rand_xoshiro::Xoshiro256Plus;
+ let mut rng1 = Xoshiro256Plus::seed_from_u64(0);
+ let a: ($t, $t, $t, $t) = rng1.gen();
+ let mut rng2 = Xoshiro256Plus::seed_from_u64(0);
+ let b: $vec4 = rng2.gen();
+ assert_eq!(a, b.into());
+ }
+ };
+}
+
+macro_rules! impl_float_types {
+ ($t:ident, $mat2:ident, $mat3:ident, $mat4:ident, $quat:ident, $vec2:ident, $vec3:ident, $vec4:ident) => {
+ impl_vec_types!($t, $vec2, $vec3, $vec4);
+
+ impl Distribution<$mat2> for Standard {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $mat2 {
+ $mat2::from_cols_array(&rng.gen())
+ }
+ }
+
+ impl Distribution<$mat3> for Standard {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $mat3 {
+ $mat3::from_cols_array(&rng.gen())
+ }
+ }
+
+ impl Distribution<$mat4> for Standard {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $mat4 {
+ $mat4::from_cols_array(&rng.gen())
+ }
+ }
+
+ impl Distribution<$quat> for Standard {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $quat {
+ let yaw = -PI + rng.gen::<$t>() * 2.0 * PI;
+ let pitch = -PI + rng.gen::<$t>() * 2.0 * PI;
+ let roll = -PI + rng.gen::<$t>() * 2.0 * PI;
+ $quat::from_euler(crate::EulerRot::YXZ, yaw, pitch, roll)
+ }
+ }
+
+ #[test]
+ fn test_mat2_rand() {
+ use rand::{Rng, SeedableRng};
+ use rand_xoshiro::Xoshiro256Plus;
+ let mut rng1 = Xoshiro256Plus::seed_from_u64(0);
+ let a = $mat2::from_cols_array(&rng1.gen::<[$t; 4]>());
+ let mut rng2 = Xoshiro256Plus::seed_from_u64(0);
+ let b = rng2.gen::<$mat2>();
+ assert_eq!(a, b);
+ }
+
+ #[test]
+ fn test_mat3_rand() {
+ use rand::{Rng, SeedableRng};
+ use rand_xoshiro::Xoshiro256Plus;
+ let mut rng1 = Xoshiro256Plus::seed_from_u64(0);
+ let a = $mat3::from_cols_array(&rng1.gen::<[$t; 9]>());
+ let mut rng2 = Xoshiro256Plus::seed_from_u64(0);
+ let b = rng2.gen::<$mat3>();
+ assert_eq!(a, b);
+ }
+
+ #[test]
+ fn test_mat4_rand() {
+ use rand::{Rng, SeedableRng};
+ use rand_xoshiro::Xoshiro256Plus;
+ let mut rng1 = Xoshiro256Plus::seed_from_u64(0);
+ let a = $mat4::from_cols_array(&rng1.gen::<[$t; 16]>());
+ let mut rng2 = Xoshiro256Plus::seed_from_u64(0);
+ let b = rng2.gen::<$mat4>();
+ assert_eq!(a, b);
+ }
+
+ #[test]
+ fn test_quat_rand() {
+ use rand::{Rng, SeedableRng};
+ use rand_xoshiro::Xoshiro256Plus;
+ let mut rng1 = Xoshiro256Plus::seed_from_u64(0);
+ let a: $quat = rng1.gen();
+ assert!(a.is_normalized());
+ let mut rng2 = Xoshiro256Plus::seed_from_u64(0);
+ let b: $quat = rng2.gen();
+ assert_eq!(a, b);
+ }
+ };
+}
+
+mod f32 {
+ use crate::{Mat2, Mat3, Mat4, Quat, Vec2, Vec3, Vec3A, Vec4};
+ use core::f32::consts::PI;
+ use rand::{
+ distributions::{Distribution, Standard},
+ Rng,
+ };
+
+ impl_float_types!(f32, Mat2, Mat3, Mat4, Quat, Vec2, Vec3, Vec4);
+
+ impl Distribution<Vec3A> for Standard {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Vec3A {
+ rng.gen::<[f32; 3]>().into()
+ }
+ }
+
+ #[test]
+ fn test_vec3a_rand() {
+ use rand::{Rng, SeedableRng};
+ use rand_xoshiro::Xoshiro256Plus;
+ let mut rng1 = Xoshiro256Plus::seed_from_u64(0);
+ let a: (f32, f32, f32) = rng1.gen();
+ let mut rng2 = Xoshiro256Plus::seed_from_u64(0);
+ let b: Vec3A = rng2.gen();
+ assert_eq!(a, b.into());
+ }
+}
+
+mod f64 {
+ use crate::{DMat2, DMat3, DMat4, DQuat, DVec2, DVec3, DVec4};
+ use core::f64::consts::PI;
+ use rand::{
+ distributions::{Distribution, Standard},
+ Rng,
+ };
+
+ impl_float_types!(f64, DMat2, DMat3, DMat4, DQuat, DVec2, DVec3, DVec4);
+}
+
+mod i32 {
+ use crate::{IVec2, IVec3, IVec4};
+ use rand::{
+ distributions::{Distribution, Standard},
+ Rng,
+ };
+
+ impl_vec_types!(i32, IVec2, IVec3, IVec4);
+}
+
+mod u32 {
+ use crate::{UVec2, UVec3, UVec4};
+ use rand::{
+ distributions::{Distribution, Standard},
+ Rng,
+ };
+
+ impl_vec_types!(u32, UVec2, UVec3, UVec4);
+}
diff --git a/src/features/impl_rkyv.rs b/src/features/impl_rkyv.rs
new file mode 100644
index 0000000..5c6efdc
--- /dev/null
+++ b/src/features/impl_rkyv.rs
@@ -0,0 +1,188 @@
+#[cfg(feature = "bytecheck")]
+macro_rules! impl_rkyv {
+ (@bytecheck $type:ty) => {
+ impl<C: ?Sized> bytecheck::CheckBytes<C> for $type {
+ type Error = core::convert::Infallible;
+
+ #[inline]
+ unsafe fn check_bytes<'a>(
+ value: *const Self,
+ _: &mut C,
+ ) -> Result<&'a Self, Self::Error> {
+ Ok(&*value)
+ }
+ }
+ };
+
+ ($type:ty) => {
+ impl_rkyv_derive!(@serialize $type);
+ impl_rkyv_derive!(@archive_deserialize $type);
+ impl_rkyv!(@bytecheck $type);
+ };
+}
+
+#[cfg(not(feature = "bytecheck"))]
+macro_rules! impl_rkyv {
+ ($type:ty) => {
+ impl_rkyv_derive!(@serialize $type);
+ impl_rkyv_derive!(@archive_deserialize $type);
+ };
+}
+
+macro_rules! impl_rkyv_derive {
+ (@serialize $type:ty) => {
+ impl<S: Fallible + ?Sized> Serialize<S> for $type {
+ #[inline]
+ fn serialize(&self, _: &mut S) -> Result<Self::Resolver, S::Error> {
+ Ok(())
+ }
+ }
+ };
+
+ (@archive_deserialize $type:ty) => {
+ impl Archive for $type {
+ type Archived = $type;
+ type Resolver = ();
+
+ #[inline]
+ unsafe fn resolve(&self, _: usize, _: Self::Resolver, out: *mut Self::Archived) {
+ out.write(to_archived!(*self as Self));
+ }
+ }
+
+ impl<D: Fallible + ?Sized> Deserialize<$type, D> for $type {
+ #[inline]
+ fn deserialize(&self, _: &mut D) -> Result<$type, D::Error> {
+ Ok(from_archived!(*self))
+ }
+ }
+ };
+}
+
+mod f32 {
+ use crate::{Affine2, Affine3A, Mat2, Mat3, Mat3A, Mat4, Quat, Vec2, Vec3, Vec3A, Vec4};
+ use rkyv::{from_archived, to_archived, Archive, Deserialize, Fallible, Serialize};
+ impl_rkyv!(Affine2);
+ impl_rkyv!(Affine3A);
+ impl_rkyv!(Mat2);
+ impl_rkyv!(Mat3);
+ impl_rkyv!(Mat3A);
+ impl_rkyv!(Mat4);
+ impl_rkyv!(Quat);
+ impl_rkyv!(Vec2);
+ impl_rkyv!(Vec3);
+ impl_rkyv!(Vec3A);
+ impl_rkyv!(Vec4);
+}
+
+mod f64 {
+ use crate::{DAffine2, DAffine3, DMat2, DMat3, DMat4, DQuat, DVec2, DVec3, DVec4};
+ use rkyv::{from_archived, to_archived, Archive, Deserialize, Fallible, Serialize};
+
+ impl_rkyv!(DAffine2);
+ impl_rkyv!(DAffine3);
+ impl_rkyv!(DMat2);
+ impl_rkyv!(DMat3);
+ impl_rkyv!(DMat4);
+ impl_rkyv!(DQuat);
+ impl_rkyv!(DVec2);
+ impl_rkyv!(DVec3);
+ impl_rkyv!(DVec4);
+}
+
+mod i32 {
+ use crate::{IVec2, IVec3, IVec4};
+ use rkyv::{from_archived, to_archived, Archive, Deserialize, Fallible, Serialize};
+
+ impl_rkyv!(IVec2);
+ impl_rkyv!(IVec3);
+ impl_rkyv!(IVec4);
+}
+
+mod u32 {
+ use crate::{UVec2, UVec3, UVec4};
+ use rkyv::{from_archived, to_archived, Archive, Deserialize, Fallible, Serialize};
+
+ impl_rkyv!(UVec2);
+ impl_rkyv!(UVec3);
+ impl_rkyv!(UVec4);
+}
+
+#[cfg(test)]
+mod test {
+ pub type DefaultSerializer = rkyv::ser::serializers::CoreSerializer<256, 256>;
+ pub type DefaultDeserializer = rkyv::Infallible;
+ use rkyv::ser::Serializer;
+ use rkyv::*;
+ pub fn test_archive<T>(value: &T)
+ where
+ T: core::fmt::Debug + PartialEq + rkyv::Serialize<DefaultSerializer>,
+ T::Archived: core::fmt::Debug + PartialEq<T> + rkyv::Deserialize<T, DefaultDeserializer>,
+ {
+ let mut serializer = DefaultSerializer::default();
+ serializer
+ .serialize_value(value)
+ .expect("failed to archive value");
+ let len = serializer.pos();
+ let buffer = serializer.into_serializer().into_inner();
+
+ let archived_value = unsafe { rkyv::archived_root::<T>(&buffer[0..len]) };
+ assert_eq!(archived_value, value);
+ let mut deserializer = DefaultDeserializer::default();
+ assert_eq!(
+ &archived_value.deserialize(&mut deserializer).unwrap(),
+ value
+ );
+ }
+
+ #[test]
+ fn test_rkyv() {
+ use crate::{Affine2, Affine3A, Mat2, Mat3, Mat3A, Mat4, Quat, Vec2, Vec3, Vec3A, Vec4};
+ test_archive(&Affine2::from_cols_array(&[1.0, 0.0, 2.0, 0.0, 3.0, 4.0]));
+ test_archive(&Affine3A::from_cols_array(&[
+ 1.0, 0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, 3.0, 4.0, 5.0, 6.0,
+ ]));
+ test_archive(&Mat2::from_cols_array(&[1.0, 2.0, 3.0, 4.0]));
+ test_archive(&Mat3::from_cols_array(&[
+ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0,
+ ]));
+ test_archive(&Mat3A::from_cols_array(&[
+ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0,
+ ]));
+ test_archive(&Mat4::from_cols_array(&[
+ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ ]));
+ test_archive(&Quat::from_xyzw(1.0, 2.0, 3.0, 4.0));
+ test_archive(&Vec2::new(1.0, 2.0));
+ test_archive(&Vec3::new(1.0, 2.0, 3.0));
+ test_archive(&Vec3A::new(1.0, 2.0, 3.0));
+ test_archive(&Vec4::new(1.0, 2.0, 3.0, 4.0));
+
+ use crate::{DAffine2, DAffine3, DMat2, DMat3, DMat4, DQuat, DVec2, DVec3, DVec4};
+ test_archive(&DAffine2::from_cols_array(&[1.0, 0.0, 2.0, 0.0, 3.0, 4.0]));
+ test_archive(&DAffine3::from_cols_array(&[
+ 1.0, 0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, 3.0, 4.0, 5.0, 6.0,
+ ]));
+ test_archive(&DMat2::from_cols_array(&[1.0, 2.0, 3.0, 4.0]));
+ test_archive(&DMat3::from_cols_array(&[
+ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0,
+ ]));
+ test_archive(&DMat4::from_cols_array(&[
+ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ ]));
+ test_archive(&DQuat::from_xyzw(1.0, 2.0, 3.0, 4.0));
+ test_archive(&DVec2::new(1.0, 2.0));
+ test_archive(&DVec3::new(1.0, 2.0, 3.0));
+ test_archive(&DVec4::new(1.0, 2.0, 3.0, 4.0));
+
+ use crate::{IVec2, IVec3, IVec4};
+ test_archive(&IVec2::new(-1, 2));
+ test_archive(&IVec3::new(-1, 2, 3));
+ test_archive(&IVec4::new(-1, 2, 3, 4));
+
+ use crate::{UVec2, UVec3, UVec4};
+ test_archive(&UVec2::new(1, 2));
+ test_archive(&UVec3::new(1, 2, 3));
+ test_archive(&UVec4::new(1, 2, 3, 4));
+ }
+}
diff --git a/src/features/impl_serde.rs b/src/features/impl_serde.rs
new file mode 100644
index 0000000..c1f4a94
--- /dev/null
+++ b/src/features/impl_serde.rs
@@ -0,0 +1,787 @@
+macro_rules! impl_serde_vec2 {
+ ($t:ty, $vec2:ident) => {
+ impl Serialize for $vec2 {
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ let mut state = serializer.serialize_tuple_struct(stringify!($vec2), 2)?;
+ state.serialize_field(&self.x)?;
+ state.serialize_field(&self.y)?;
+ state.end()
+ }
+ }
+
+ impl<'de> Deserialize<'de> for $vec2 {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ struct Vec2Visitor;
+
+ impl<'de> Visitor<'de> for Vec2Visitor {
+ type Value = $vec2;
+
+ fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ formatter.write_str(concat!("struct ", stringify!($vec2)))
+ }
+
+ fn visit_seq<V>(self, mut seq: V) -> Result<$vec2, V::Error>
+ where
+ V: SeqAccess<'de>,
+ {
+ let x = seq
+ .next_element()?
+ .ok_or_else(|| de::Error::invalid_length(0, &self))?;
+ let y = seq
+ .next_element()?
+ .ok_or_else(|| de::Error::invalid_length(1, &self))?;
+ Ok($vec2::new(x, y))
+ }
+ }
+
+ deserializer.deserialize_tuple_struct(stringify!($vec2), 2, Vec2Visitor)
+ }
+ }
+
+ #[test]
+ fn test_vec2_serde() {
+ let a = $vec2::new(1 as $t, 2 as $t);
+ let serialized = serde_json::to_string(&a).unwrap();
+ assert_eq!(SX2, serialized);
+ let deserialized = serde_json::from_str(&serialized).unwrap();
+ assert_eq!(a, deserialized);
+ let deserialized = serde_json::from_str::<$vec2>(SX0);
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$vec2>(SX1);
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$vec2>(SX3);
+ assert!(deserialized.is_err());
+ }
+ };
+}
+
+macro_rules! impl_serde_vec3 {
+ ($t:ty, $vec3:ident) => {
+ impl_serde_vec3!($t, $vec3, test_vec3_serde);
+ };
+ ($t:ty, $vec3:ident, $test_name:ident) => {
+ impl Serialize for $vec3 {
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ let mut state = serializer.serialize_tuple_struct(stringify!($vec3), 3)?;
+ state.serialize_field(&self.x)?;
+ state.serialize_field(&self.y)?;
+ state.serialize_field(&self.z)?;
+ state.end()
+ }
+ }
+
+ impl<'de> Deserialize<'de> for $vec3 {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ struct Vec3Visitor;
+
+ impl<'de> Visitor<'de> for Vec3Visitor {
+ type Value = $vec3;
+
+ fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ formatter.write_str(concat!("struct ", stringify!($vec3)))
+ }
+
+ fn visit_seq<V>(self, mut seq: V) -> Result<$vec3, V::Error>
+ where
+ V: SeqAccess<'de>,
+ {
+ let x = seq
+ .next_element()?
+ .ok_or_else(|| de::Error::invalid_length(0, &self))?;
+ let y = seq
+ .next_element()?
+ .ok_or_else(|| de::Error::invalid_length(1, &self))?;
+ let z = seq
+ .next_element()?
+ .ok_or_else(|| de::Error::invalid_length(2, &self))?;
+ Ok($vec3::new(x, y, z))
+ }
+ }
+
+ deserializer.deserialize_tuple_struct(stringify!($vec3), 3, Vec3Visitor)
+ }
+ }
+
+ #[test]
+ fn $test_name() {
+ let a = $vec3::new(1 as $t, 2 as $t, 3 as $t);
+ let serialized = serde_json::to_string(&a).unwrap();
+ assert_eq!(SX3, serialized);
+ let deserialized = serde_json::from_str(&serialized).unwrap();
+ assert_eq!(a, deserialized);
+ let deserialized = serde_json::from_str::<$vec3>(SX0);
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$vec3>(SX1);
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$vec3>(SX2);
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$vec3>(SX4);
+ assert!(deserialized.is_err());
+ }
+ };
+}
+
+macro_rules! impl_serde_vec4 {
+ ($t:ty, $vec4:ident) => {
+ impl Serialize for $vec4 {
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ let mut state = serializer.serialize_tuple_struct(stringify!($vec4), 4)?;
+ state.serialize_field(&self.x)?;
+ state.serialize_field(&self.y)?;
+ state.serialize_field(&self.z)?;
+ state.serialize_field(&self.w)?;
+ state.end()
+ }
+ }
+
+ impl<'de> Deserialize<'de> for $vec4 {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ struct Vec4Visitor;
+
+ impl<'de> Visitor<'de> for Vec4Visitor {
+ type Value = $vec4;
+
+ fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ formatter.write_str(concat!("struct ", stringify!($vec4)))
+ }
+
+ fn visit_seq<V>(self, mut seq: V) -> Result<$vec4, V::Error>
+ where
+ V: SeqAccess<'de>,
+ {
+ let x = seq
+ .next_element()?
+ .ok_or_else(|| de::Error::invalid_length(0, &self))?;
+ let y = seq
+ .next_element()?
+ .ok_or_else(|| de::Error::invalid_length(1, &self))?;
+ let z = seq
+ .next_element()?
+ .ok_or_else(|| de::Error::invalid_length(2, &self))?;
+ let w = seq
+ .next_element()?
+ .ok_or_else(|| de::Error::invalid_length(3, &self))?;
+ Ok($vec4::new(x, y, z, w))
+ }
+ }
+
+ deserializer.deserialize_tuple_struct(stringify!($vec4), 4, Vec4Visitor)
+ }
+ }
+
+ #[test]
+ fn test_vec4_serde() {
+ let a = $vec4::new(1 as $t, 2 as $t, 3 as $t, 4 as $t);
+ let serialized = serde_json::to_string(&a).unwrap();
+ assert_eq!(SX4, serialized);
+ let deserialized = serde_json::from_str(&serialized).unwrap();
+ assert_eq!(a, deserialized);
+ let deserialized = serde_json::from_str::<$vec4>(SX0);
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$vec4>(SX1);
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$vec4>(SX2);
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$vec4>(SX3);
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$vec4>(SX5);
+ assert!(deserialized.is_err());
+ }
+ };
+}
+
+macro_rules! impl_serde_quat {
+ ($t:ty, $quat:ident) => {
+ impl Serialize for $quat {
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ let mut state = serializer.serialize_tuple_struct(stringify!($quat), 4)?;
+ state.serialize_field(&self.x)?;
+ state.serialize_field(&self.y)?;
+ state.serialize_field(&self.z)?;
+ state.serialize_field(&self.w)?;
+ state.end()
+ }
+ }
+
+ impl<'de> Deserialize<'de> for $quat {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ struct QuatVisitor;
+
+ impl<'de> Visitor<'de> for QuatVisitor {
+ type Value = $quat;
+
+ fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ formatter.write_str(concat!("struct ", stringify!($quat)))
+ }
+
+ fn visit_seq<V>(self, mut seq: V) -> Result<$quat, V::Error>
+ where
+ V: SeqAccess<'de>,
+ {
+ let x = seq
+ .next_element()?
+ .ok_or_else(|| de::Error::invalid_length(0, &self))?;
+ let y = seq
+ .next_element()?
+ .ok_or_else(|| de::Error::invalid_length(1, &self))?;
+ let z = seq
+ .next_element()?
+ .ok_or_else(|| de::Error::invalid_length(2, &self))?;
+ let w = seq
+ .next_element()?
+ .ok_or_else(|| de::Error::invalid_length(3, &self))?;
+ Ok($quat::from_xyzw(x, y, z, w))
+ }
+ }
+
+ deserializer.deserialize_tuple_struct(stringify!($quat), 4, QuatVisitor)
+ }
+ }
+
+ #[test]
+ fn test_quat_serde() {
+ let a = $quat::from_xyzw(1.0, 2.0, 3.0, 4.0);
+ let serialized = serde_json::to_string(&a).unwrap();
+ assert_eq!(serialized, "[1.0,2.0,3.0,4.0]");
+ let deserialized = serde_json::from_str(&serialized).unwrap();
+ assert_eq!(a, deserialized);
+ let deserialized = serde_json::from_str::<$quat>("[]");
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$quat>("[1.0]");
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$quat>("[1.0,2.0]");
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$quat>("[1.0,2.0,3.0]");
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$quat>("[1.0,2.0,3.0,4.0,5.0]");
+ assert!(deserialized.is_err());
+ }
+ };
+}
+
+macro_rules! impl_serde_mat2 {
+ ($t:ty, $mat2:ident) => {
+ impl Serialize for $mat2 {
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ let f: &[_; 4] = self.as_ref();
+ let mut state = serializer.serialize_tuple_struct(stringify!($mat2), 4)?;
+ state.serialize_field(&f[0])?;
+ state.serialize_field(&f[1])?;
+ state.serialize_field(&f[2])?;
+ state.serialize_field(&f[3])?;
+ state.end()
+ }
+ }
+
+ impl<'de> Deserialize<'de> for $mat2 {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ struct Mat2Visitor;
+
+ impl<'de> Visitor<'de> for Mat2Visitor {
+ type Value = $mat2;
+
+ fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ formatter.write_str(concat!("struct ", stringify!($mat2)))
+ }
+
+ fn visit_seq<V>(self, mut seq: V) -> Result<$mat2, V::Error>
+ where
+ V: SeqAccess<'de>,
+ {
+ let mut f = { [0.0; 4] };
+ for i in 0..4 {
+ f[i] = seq
+ .next_element()?
+ .ok_or_else(|| de::Error::invalid_length(i, &self))?;
+ }
+ Ok($mat2::from_cols_array(&f))
+ }
+ }
+
+ deserializer.deserialize_tuple_struct(stringify!($mat2), 4, Mat2Visitor)
+ }
+ }
+
+ #[test]
+ fn test_mat2_serde() {
+ let a = $mat2::from_cols_array(&[1.0, 2.0, 3.0, 4.0]);
+ let serialized = serde_json::to_string(&a).unwrap();
+ assert_eq!(serialized, "[1.0,2.0,3.0,4.0]");
+ let deserialized = serde_json::from_str(&serialized).unwrap();
+ assert_eq!(a, deserialized);
+ let deserialized = serde_json::from_str::<$mat2>("[]");
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$mat2>("[1.0]");
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$mat2>("[1.0,2.0]");
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$mat2>("[1.0,2.0,3.0]");
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$mat2>("[1.0,2.0,3.0,4.0,5.0]");
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$mat2>("[[1.0,2.0],[3.0,4.0]]");
+ assert!(deserialized.is_err());
+ }
+ };
+}
+
+macro_rules! impl_serde_mat3 {
+ ($t:ty, $mat3:ident) => {
+ impl_serde_mat3!($t, $mat3, test_mat3_serde);
+ };
+ ($t:ty, $mat3:ident, $test_name:ident) => {
+ impl Serialize for $mat3 {
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ let (m00, m01, m02) = self.x_axis.into();
+ let (m10, m11, m12) = self.y_axis.into();
+ let (m20, m21, m22) = self.z_axis.into();
+
+ let mut state = serializer.serialize_tuple_struct(stringify!($mat3), 9)?;
+ state.serialize_field(&m00)?;
+ state.serialize_field(&m01)?;
+ state.serialize_field(&m02)?;
+ state.serialize_field(&m10)?;
+ state.serialize_field(&m11)?;
+ state.serialize_field(&m12)?;
+ state.serialize_field(&m20)?;
+ state.serialize_field(&m21)?;
+ state.serialize_field(&m22)?;
+ state.end()
+ }
+ }
+
+ impl<'de> Deserialize<'de> for $mat3 {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ struct Mat3Visitor;
+
+ impl<'de> Visitor<'de> for Mat3Visitor {
+ type Value = $mat3;
+
+ fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ formatter.write_str(concat!("struct ", stringify!($mat3)))
+ }
+
+ fn visit_seq<V>(self, mut seq: V) -> Result<$mat3, V::Error>
+ where
+ V: SeqAccess<'de>,
+ {
+ let mut f = { [0.0; 9] };
+ for i in 0..9 {
+ f[i] = seq
+ .next_element()?
+ .ok_or_else(|| de::Error::invalid_length(i, &self))?;
+ }
+ Ok($mat3::from_cols_array(&f))
+ }
+ }
+
+ deserializer.deserialize_tuple_struct(stringify!($mat3), 9, Mat3Visitor)
+ }
+ }
+
+ #[test]
+ fn $test_name() {
+ let a = $mat3::from_cols_array(&[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]);
+ let serialized = serde_json::to_string(&a).unwrap();
+ assert_eq!(serialized, "[1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0]");
+ let deserialized = serde_json::from_str(&serialized).unwrap();
+ assert_eq!(a, deserialized);
+ let deserialized = serde_json::from_str::<$mat3>("[]");
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$mat3>("[1.0]");
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$mat3>("[1.0,2.0]");
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$mat3>("[1.0,2.0,3.0]");
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$mat3>("[1.0,2.0,3.0,4.0,5.0]");
+ assert!(deserialized.is_err());
+ let deserialized =
+ serde_json::from_str::<$mat3>("[[1.0,2.0,3.0],[4.0,5.0,6.0],[7.0,8.0,9.0]]");
+ assert!(deserialized.is_err());
+ }
+ };
+}
+
+macro_rules! impl_serde_mat4 {
+ ($t:ty, $mat4:ident) => {
+ impl Serialize for $mat4 {
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ let mut state = serializer.serialize_tuple_struct(stringify!($mat4), 16)?;
+ for f in self.as_ref() {
+ state.serialize_field(f)?;
+ }
+ state.end()
+ }
+ }
+
+ impl<'de> Deserialize<'de> for $mat4 {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ struct Mat4Visitor;
+
+ impl<'de> Visitor<'de> for Mat4Visitor {
+ type Value = $mat4;
+
+ fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ formatter.write_str(concat!("struct ", stringify!($mat4)))
+ }
+
+ fn visit_seq<V>(self, mut seq: V) -> Result<$mat4, V::Error>
+ where
+ V: SeqAccess<'de>,
+ {
+ let mut f = { [0.0; 16] };
+ for i in 0..16 {
+ f[i] = seq
+ .next_element()?
+ .ok_or_else(|| de::Error::invalid_length(i, &self))?;
+ }
+ Ok($mat4::from_cols_array(&f))
+ }
+ }
+
+ deserializer.deserialize_tuple_struct(stringify!($mat4), 16, Mat4Visitor)
+ }
+ }
+
+ #[test]
+ fn test_mat4_serde() {
+ let a = $mat4::from_cols_array(&[
+ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0,
+ 16.0,
+ ]);
+ let serialized = serde_json::to_string(&a).unwrap();
+ assert_eq!(
+ serialized,
+ "[1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0,13.0,14.0,15.0,16.0]"
+ );
+ let deserialized = serde_json::from_str(&serialized).unwrap();
+ assert_eq!(a, deserialized);
+ let deserialized = serde_json::from_str::<$mat4>("[]");
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$mat4>("[1.0]");
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$mat4>("[1.0,2.0]");
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$mat4>("[1.0,2.0,3.0]");
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$mat4>("[1.0,2.0,3.0,4.0,5.0]");
+ assert!(deserialized.is_err());
+ let deserialized =
+ serde_json::from_str::<$mat4>("[[1.0,2.0,3.0],[4.0,5.0,6.0],[7.0,8.0,9.0]]");
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$mat4>(
+ "[[1.0,2.0,3.0,4.0],[5.0,6.0,7.0,8.0],[9.0,10.0,11.0,12.0][13.0,14.0,15.0,16.0]]",
+ );
+ assert!(deserialized.is_err());
+ }
+ };
+}
+
+macro_rules! impl_serde_affine2 {
+ ($t:ty, $affine2:ident) => {
+ impl Serialize for $affine2 {
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ // Serialize column-wise as 3x4 matrix:
+ let mut state = serializer.serialize_tuple_struct(stringify!($affine2), 6)?;
+ state.serialize_field(&self.x_axis.x)?;
+ state.serialize_field(&self.x_axis.y)?;
+ state.serialize_field(&self.y_axis.x)?;
+ state.serialize_field(&self.y_axis.y)?;
+ state.serialize_field(&self.z_axis.x)?;
+ state.serialize_field(&self.z_axis.y)?;
+ state.end()
+ }
+ }
+
+ impl<'de> Deserialize<'de> for $affine2 {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ struct Affine2Visitor;
+
+ impl<'de> Visitor<'de> for Affine2Visitor {
+ type Value = $affine2;
+
+ fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ formatter.write_str("struct $affine2")
+ }
+
+ fn visit_seq<V>(self, mut seq: V) -> Result<$affine2, V::Error>
+ where
+ V: SeqAccess<'de>,
+ {
+ let mut f = [0.0; 6];
+ for (i, v) in f.iter_mut().enumerate() {
+ *v = seq
+ .next_element()?
+ .ok_or_else(|| de::Error::invalid_length(i, &self))?;
+ }
+ Ok($affine2::from_cols_array(&f))
+ }
+ }
+
+ deserializer.deserialize_tuple_struct(stringify!($affine2), 6, Affine2Visitor)
+ }
+ }
+
+ #[test]
+ fn test_affine2_serde() {
+ let a = $affine2::from_cols_array(&[1.0, 0.0, 2.0, 0.0, 3.0, 4.0]);
+ let serialized = serde_json::to_string(&a).unwrap();
+ assert_eq!(serialized, "[1.0,0.0,2.0,0.0,3.0,4.0]");
+ let deserialized = serde_json::from_str(&serialized).unwrap();
+ assert_eq!(a, deserialized);
+
+ let deserialized = serde_json::from_str::<$affine2>("[]");
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$affine2>("[1.0]");
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$affine2>("[1.0,2.0]");
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$affine2>("[1.0,2.0,3.0]");
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$affine2>("[1.0,2.0,3.0,4.0,5.0]");
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$affine2>("[[1.0,2.0],[3.0,4.0],[5.0,6.0]]");
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$affine2>(
+ "[[1.0,2.0,3.0,4.0],[5.0,6.0,7.0,8.0],[9.0,10.0,11.0,12.0][13.0,14.0,15.0,16.0]]",
+ );
+ assert!(deserialized.is_err());
+ }
+ };
+}
+
+macro_rules! impl_serde_affine3 {
+ ($t:ty, $affine3:ident) => {
+ impl Serialize for $affine3 {
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ // Serialize column-wise as 3x4 matrix:
+ let mut state = serializer.serialize_tuple_struct(stringify!($affine3), 12)?;
+ state.serialize_field(&self.x_axis.x)?;
+ state.serialize_field(&self.x_axis.y)?;
+ state.serialize_field(&self.x_axis.z)?;
+ state.serialize_field(&self.y_axis.x)?;
+ state.serialize_field(&self.y_axis.y)?;
+ state.serialize_field(&self.y_axis.z)?;
+ state.serialize_field(&self.z_axis.x)?;
+ state.serialize_field(&self.z_axis.y)?;
+ state.serialize_field(&self.z_axis.z)?;
+ state.serialize_field(&self.w_axis.x)?;
+ state.serialize_field(&self.w_axis.y)?;
+ state.serialize_field(&self.w_axis.z)?;
+ state.end()
+ }
+ }
+
+ impl<'de> Deserialize<'de> for $affine3 {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ struct Affine3Visitor;
+
+ impl<'de> Visitor<'de> for Affine3Visitor {
+ type Value = $affine3;
+
+ fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ formatter.write_str("struct $affine3")
+ }
+
+ fn visit_seq<V>(self, mut seq: V) -> Result<$affine3, V::Error>
+ where
+ V: SeqAccess<'de>,
+ {
+ let mut f = [0.0; 12];
+ for (i, v) in f.iter_mut().enumerate() {
+ *v = seq
+ .next_element()?
+ .ok_or_else(|| de::Error::invalid_length(i, &self))?;
+ }
+ Ok($affine3::from_cols_array(&f))
+ }
+ }
+
+ deserializer.deserialize_tuple_struct(stringify!($affine3), 12, Affine3Visitor)
+ }
+ }
+
+ #[test]
+ fn test_affine3_serde() {
+ let a = $affine3::from_cols_array(&[
+ 1.0, 0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, 3.0, 4.0, 5.0, 6.0,
+ ]);
+ let serialized = serde_json::to_string(&a).unwrap();
+ assert_eq!(
+ serialized,
+ "[1.0,0.0,0.0,0.0,2.0,0.0,0.0,0.0,3.0,4.0,5.0,6.0]"
+ );
+ let deserialized = serde_json::from_str(&serialized).unwrap();
+ assert_eq!(a, deserialized);
+
+ let deserialized = serde_json::from_str::<$affine3>("[]");
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$affine3>("[1.0]");
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$affine3>("[1.0,2.0]");
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$affine3>("[1.0,2.0,3.0]");
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$affine3>("[1.0,2.0,3.0,4.0,5.0]");
+ assert!(deserialized.is_err());
+ let deserialized =
+ serde_json::from_str::<$affine3>("[[1.0,2.0,3.0],[4.0,5.0,6.0],[7.0,8.0,9.0]]");
+ assert!(deserialized.is_err());
+ let deserialized = serde_json::from_str::<$affine3>(
+ "[[1.0,2.0,3.0,4.0],[5.0,6.0,7.0,8.0],[9.0,10.0,11.0,12.0][13.0,14.0,15.0,16.0]]",
+ );
+ assert!(deserialized.is_err());
+ }
+ };
+}
+
+macro_rules! impl_serde_vec_types {
+ ($t:ty, $vec2:ident, $vec3:ident, $vec4:ident) => {
+ impl_serde_vec2!($t, $vec2);
+ impl_serde_vec3!($t, $vec3);
+ impl_serde_vec4!($t, $vec4);
+ };
+}
+
+macro_rules! impl_serde_float_types {
+ ($t:ty, $affine2:ident, $affine3:ident, $mat2:ident, $mat3:ident, $mat4:ident, $quat:ident, $vec2:ident, $vec3:ident, $vec4:ident) => {
+ impl_serde_affine2!($t, $affine2);
+ impl_serde_affine3!($t, $affine3);
+ impl_serde_mat2!($t, $mat2);
+ impl_serde_mat3!($t, $mat3);
+ impl_serde_mat4!($t, $mat4);
+ impl_serde_quat!($t, $quat);
+ impl_serde_vec_types!($t, $vec2, $vec3, $vec4);
+ };
+}
+
+#[cfg(test)]
+mod test_float {
+ pub const SX0: &str = "[]";
+ pub const SX1: &str = "[1.0]";
+ pub const SX2: &str = "[1.0,2.0]";
+ pub const SX3: &str = "[1.0,2.0,3.0]";
+ pub const SX4: &str = "[1.0,2.0,3.0,4.0]";
+ pub const SX5: &str = "[1.0,2.0,3.0,4.0,5.0]";
+}
+
+#[cfg(test)]
+mod test_int {
+ pub const SX0: &str = "[]";
+ pub const SX1: &str = "[1]";
+ pub const SX2: &str = "[1,2]";
+ pub const SX3: &str = "[1,2,3]";
+ pub const SX4: &str = "[1,2,3,4]";
+ pub const SX5: &str = "[1,2,3,4,5]";
+}
+
+mod f32 {
+ #[cfg(test)]
+ use super::test_float::*;
+ use crate::{Affine2, Affine3A, Mat2, Mat3, Mat3A, Mat4, Quat, Vec2, Vec3, Vec3A, Vec4};
+ use core::fmt;
+ use serde::{
+ de::{self, Deserialize, Deserializer, SeqAccess, Visitor},
+ ser::{Serialize, SerializeTupleStruct, Serializer},
+ };
+
+ impl_serde_float_types!(f32, Affine2, Affine3A, Mat2, Mat3, Mat4, Quat, Vec2, Vec3, Vec4);
+ impl_serde_mat3!(f32, Mat3A, test_mat3a_serde);
+ impl_serde_vec3!(f32, Vec3A, test_vec3a_serde);
+}
+
+mod f64 {
+ #[cfg(test)]
+ use super::test_float::*;
+ use crate::{DAffine2, DAffine3, DMat2, DMat3, DMat4, DQuat, DVec2, DVec3, DVec4};
+ use core::fmt;
+ use serde::{
+ de::{self, Deserialize, Deserializer, SeqAccess, Visitor},
+ ser::{Serialize, SerializeTupleStruct, Serializer},
+ };
+
+ impl_serde_float_types!(
+ f64, DAffine2, DAffine3, DMat2, DMat3, DMat4, DQuat, DVec2, DVec3, DVec4
+ );
+}
+
+mod i32 {
+ #[cfg(test)]
+ use super::test_int::*;
+ use crate::{IVec2, IVec3, IVec4};
+ use core::fmt;
+ use serde::{
+ de::{self, Deserialize, Deserializer, SeqAccess, Visitor},
+ ser::{Serialize, SerializeTupleStruct, Serializer},
+ };
+
+ impl_serde_vec_types!(i32, IVec2, IVec3, IVec4);
+}
+
+mod u32 {
+ #[cfg(test)]
+ use super::test_int::*;
+ use crate::{UVec2, UVec3, UVec4};
+ use core::fmt;
+ use serde::{
+ de::{self, Deserialize, Deserializer, SeqAccess, Visitor},
+ ser::{Serialize, SerializeTupleStruct, Serializer},
+ };
+
+ impl_serde_vec_types!(u32, UVec2, UVec3, UVec4);
+}
diff --git a/src/features/mod.rs b/src/features/mod.rs
new file mode 100644
index 0000000..6d6436d
--- /dev/null
+++ b/src/features/mod.rs
@@ -0,0 +1,17 @@
+#[cfg(feature = "approx")]
+pub mod impl_approx;
+
+#[cfg(feature = "bytemuck")]
+pub mod impl_bytemuck;
+
+#[cfg(feature = "mint")]
+pub mod impl_mint;
+
+#[cfg(feature = "rand")]
+pub mod impl_rand;
+
+#[cfg(feature = "serde")]
+pub mod impl_serde;
+
+#[cfg(feature = "rkyv")]
+pub mod impl_rkyv;
diff --git a/src/lib.rs b/src/lib.rs
new file mode 100644
index 0000000..1e92a63
--- /dev/null
+++ b/src/lib.rs
@@ -0,0 +1,344 @@
+/*!
+# glam
+
+`glam` is a simple and fast linear algebra library for games and graphics.
+
+## Features
+
+* [`f32`](mod@f32) types
+ * vectors: [`Vec2`], [`Vec3`], [`Vec3A`] and [`Vec4`]
+ * square matrices: [`Mat2`], [`Mat3`], [`Mat3A`] and [`Mat4`]
+ * a quaternion type: [`Quat`]
+ * affine transformation types: [`Affine2`] and [`Affine3A`]
+* [`f64`](mod@f64) types
+ * vectors: [`DVec2`], [`DVec3`] and [`DVec4`]
+ * square matrices: [`DMat2`], [`DMat3`] and [`DMat4`]
+ * a quaternion type: [`DQuat`]
+ * affine transformation types: [`DAffine2`] and [`DAffine3`]
+* [`i32`](mod@i32) types
+ * vectors: [`IVec2`], [`IVec3`] and [`IVec4`]
+* [`u32`](mod@u32) types
+ * vectors: [`UVec2`], [`UVec3`] and [`UVec4`]
+* [`bool`](mod@bool) types
+ * vectors: [`BVec2`], [`BVec3`] and [`BVec4`]
+
+## SIMD
+
+`glam` is built with SIMD in mind. Many `f32` types use 128-bit SIMD vector types for storage
+and/or implementation. The use of SIMD generally enables better performance than using primitive
+numeric types such as `f32`.
+
+Some `glam` types use SIMD for storage meaning they are 16 byte aligned, these types include
+`Mat2`, `Mat3A`, `Mat4`, `Quat`, `Vec3A`, `Vec4`, `Affine2` an `Affine3A`. Types
+with an `A` suffix are a SIMD alternative to a scalar type, e.g. `Vec3` uses `f32` storage and
+`Vec3A` uses SIMD storage.
+
+When SIMD is not available on the target the types will maintain 16 byte alignment and internal
+padding so that object sizes and layouts will not change between architectures. There are scalar
+math fallback implementations exist when SIMD is not available. It is intended to add support for
+other SIMD architectures once they appear in stable Rust.
+
+Currently only SSE2 on x86/x86_64 is supported as this is what stable Rust supports.
+
+## Vec3A and Mat3A
+
+`Vec3A` is a SIMD optimized version of the `Vec3` type, which due to 16 byte alignment results
+in `Vec3A` containing 4 bytes of padding making it 16 bytes in size in total. `Mat3A` is composed
+of three `Vec3A` columns.
+
+| Type | `f32` bytes | Align bytes | Size bytes | Padding |
+|:-----------|------------:|------------:|-----------:|--------:|
+|[`Vec3`] | 12| 4| 12| 0|
+|[`Vec3A`] | 12| 16| 16| 4|
+|[`Mat3`] | 36| 4| 36| 0|
+|[`Mat3A`] | 36| 16| 48| 12|
+
+Despite this wasted space the SIMD implementations tend to outperform `f32` implementations in
+[**mathbench**](https://github.com/bitshifter/mathbench-rs) benchmarks.
+
+`glam` treats [`Vec3`] as the default 3D vector type and [`Vec3A`] a special case for optimization.
+When methods need to return a 3D vector they will generally return [`Vec3`].
+
+There are [`From`] trait implementations for converting from [`Vec4`] to a [`Vec3A`] and between
+[`Vec3`] and [`Vec3A`] (and vice versa).
+
+```
+use glam::{Vec3, Vec3A, Vec4};
+
+let v4 = Vec4::new(1.0, 2.0, 3.0, 4.0);
+
+// Convert from `Vec4` to `Vec3A`, this is a no-op if SIMD is supported.
+let v3a = Vec3A::from(v4);
+assert_eq!(Vec3A::new(1.0, 2.0, 3.0), v3a);
+
+// Convert from `Vec3A` to `Vec3`.
+let v3 = Vec3::from(v3a);
+assert_eq!(Vec3::new(1.0, 2.0, 3.0), v3);
+
+// Convert from `Vec3` to `Vec3A`.
+let v3a = Vec3A::from(v3);
+assert_eq!(Vec3A::new(1.0, 2.0, 3.0), v3a);
+```
+
+## Affine2 and Affine3A
+
+`Affine2` and `Affine3A` are composed of a linear transform matrix and a vector translation. The
+represent 2D and 3D affine transformations which are commonly used in games.
+
+The table below shows the performance advantage of `Affine2` over `Mat3A` and `Mat3A` over `Mat3`.
+
+| operation | `Mat3` | `Mat3A` | `Affine2` |
+|--------------------|-------------|------------|------------|
+| inverse | 11.4±0.09ns | 7.1±0.09ns | 5.4±0.06ns |
+| mul self | 10.5±0.04ns | 5.2±0.05ns | 4.0±0.05ns |
+| transform point2 | 2.7±0.02ns | 2.7±0.03ns | 2.8±0.04ns |
+| transform vector2 | 2.6±0.01ns | 2.6±0.03ns | 2.3±0.02ns |
+
+Performance is much closer between `Mat4` and `Affine3A` with the affine type being faster to
+invert.
+
+| operation | `Mat4` | `Affine3A` |
+|--------------------|-------------|-------------|
+| inverse | 15.9±0.11ns | 10.8±0.06ns |
+| mul self | 7.3±0.05ns | 7.0±0.06ns |
+| transform point3 | 3.6±0.02ns | 4.3±0.04ns |
+| transform point3a | 3.0±0.02ns | 3.0±0.04ns |
+| transform vector3 | 4.1±0.02ns | 3.9±0.04ns |
+| transform vector3a | 2.8±0.02ns | 2.8±0.02ns |
+
+Benchmarks were taken on an Intel Core i7-4710HQ.
+
+## Linear algebra conventions
+
+`glam` interprets vectors as column matrices (also known as column vectors) meaning when
+transforming a vector with a matrix the matrix goes on the left.
+
+```
+use glam::{Mat3, Vec3};
+let m = Mat3::IDENTITY;
+let x = Vec3::X;
+let v = m * x;
+assert_eq!(v, x);
+```
+
+Matrices are stored in memory in column-major order.
+
+All angles are in radians. Rust provides the `f32::to_radians()` and `f64::to_radians()` methods to
+convert from degrees.
+
+## Direct element access
+
+Because some types may internally be implemented using SIMD types, direct access to vector elements
+is supported by implementing the [`Deref`] and [`DerefMut`] traits.
+
+```
+use glam::Vec3A;
+let mut v = Vec3A::new(1.0, 2.0, 3.0);
+assert_eq!(3.0, v.z);
+v.z += 1.0;
+assert_eq!(4.0, v.z);
+```
+
+[`Deref`]: https://doc.rust-lang.org/std/ops/trait.Deref.html
+[`DerefMut`]: https://doc.rust-lang.org/std/ops/trait.DerefMut.html
+
+## glam assertions
+
+`glam` does not enforce validity checks on method parameters at runtime. For example methods that
+require normalized vectors as input such as `Quat::from_axis_angle(axis, angle)` will not check
+that axis is a valid normalized vector. To help catch unintended misuse of `glam` the
+`debug-glam-assert` or `glam-assert` features can be enabled to add checks ensure that inputs to
+are valid.
+
+## Vector swizzles
+
+`glam` vector types have functions allowing elements of vectors to be reordered, this includes
+creating a vector of a different size from the vectors elements.
+
+The swizzle functions are implemented using traits to add them to each vector type. This is
+primarily because there are a lot of swizzle functions which can obfuscate the other vector
+functions in documentation and so on. The traits are [`Vec2Swizzles`], [`Vec3Swizzles`] and
+[`Vec4Swizzles`].
+
+Note that the [`Vec3Swizzles`] implementation for [`Vec3A`] will return a [`Vec3A`] for 3 element
+swizzles, all other implementations will return [`Vec3`].
+
+```
+use glam::{swizzles::*, Vec2, Vec3, Vec3A, Vec4};
+
+let v = Vec4::new(1.0, 2.0, 3.0, 4.0);
+
+// Reverse elements of `v`, if SIMD is supported this will use a vector shuffle.
+let wzyx = v.wzyx();
+assert_eq!(Vec4::new(4.0, 3.0, 2.0, 1.0), wzyx);
+
+// Swizzle the yzw elements of `v` into a `Vec3`
+let yzw = v.yzw();
+assert_eq!(Vec3::new(2.0, 3.0, 4.0), yzw);
+
+// To swizzle a `Vec4` into a `Vec3A` swizzle the `Vec4` first then convert to
+// `Vec3A`. If SIMD is supported this will use a vector shuffle. The last
+// element of the shuffled `Vec4` is ignored by the `Vec3A`.
+let yzw = Vec3A::from(v.yzwx());
+assert_eq!(Vec3A::new(2.0, 3.0, 4.0), yzw);
+
+// You can swizzle from a `Vec4` to a `Vec2`
+let xy = v.xy();
+assert_eq!(Vec2::new(1.0, 2.0), xy);
+
+// And back again
+let yyxx = xy.yyxx();
+assert_eq!(Vec4::new(2.0, 2.0, 1.0, 1.0), yyxx);
+```
+
+## SIMD and scalar consistency
+
+`glam` types implement `serde` `Serialize` and `Deserialize` traits to ensure
+that they will serialize and deserialize exactly the same whether or not
+SIMD support is being used.
+
+The SIMD versions implement the `core::fmt::Debug` and `core::fmt::Display`
+traits so they print the same as the scalar version.
+
+```
+use glam::Vec4;
+let a = Vec4::new(1.0, 2.0, 3.0, 4.0);
+assert_eq!(format!("{}", a), "[1, 2, 3, 4]");
+```
+
+## Feature gates
+
+All `glam` dependencies are optional, however some are required for tests
+and benchmarks.
+
+* `std` - the default feature, has no dependencies.
+* `approx` - traits and macros for approximate float comparisons
+* `bytemuck` - for casting into slices of bytes
+* `libm` - required to compile with `no_std`
+* `mint` - for interoperating with other 3D math libraries
+* `num-traits` - required to compile `no_std`, will be included when enabling
+ the `libm` feature
+* `rand` - implementations of `Distribution` trait for all `glam` types.
+* `serde` - implementations of `Serialize` and `Deserialize` for all `glam`
+ types. Note that serialization should work between builds of `glam` with and without SIMD enabled
+* `scalar-math` - disables SIMD support and uses native alignment for all types.
+* `debug-glam-assert` - adds assertions in debug builds which check the validity of parameters
+ passed to `glam` to help catch runtime errors.
+* `glam-assert` - adds assertions to all builds which check the validity of parameters passed to
+ `glam` to help catch runtime errors.
+
+## Minimum Supported Rust Version (MSRV)
+
+The minimum supported Rust version is `1.52.1`.
+
+*/
+#![doc(html_root_url = "https://docs.rs/glam/0.20.3")]
+#![cfg_attr(not(feature = "std"), no_std)]
+#![cfg_attr(target_arch = "spirv", feature(asm, register_attr, repr_simd))]
+#![deny(
+ rust_2018_compatibility,
+ rust_2018_idioms,
+ future_incompatible,
+ nonstandard_style
+)]
+// This would require renaming a lot of stuff, disabling for now.
+#![allow(clippy::upper_case_acronyms)]
+// clippy doesn't like `to_array(&self)`
+#![allow(clippy::wrong_self_convention)]
+
+#[macro_use]
+mod macros;
+
+#[macro_use]
+mod mat;
+
+#[macro_use]
+mod vec;
+
+#[doc(hidden)]
+pub mod cast;
+
+mod affine2;
+mod affine3;
+mod core;
+mod euler;
+mod features;
+mod mat2;
+mod mat3;
+mod mat4;
+mod quat;
+mod vec2;
+mod vec3;
+mod vec4;
+mod vec_mask;
+
+#[cfg(target_arch = "spirv")]
+mod spirv;
+
+#[cfg(feature = "transform-types")]
+mod transform;
+
+#[doc(hidden)]
+pub use self::core::storage::{XY, XYZ, XYZW};
+
+/** `bool` vector mask types. */
+pub mod bool {
+ pub use super::vec_mask::{BVec2, BVec3, BVec3A, BVec4, BVec4A};
+}
+pub use self::bool::*;
+
+/** `f32` vector, quaternion and matrix types. */
+pub mod f32 {
+ pub use super::affine2::Affine2;
+ pub use super::affine3::Affine3A;
+ pub use super::mat2::{mat2, Mat2};
+ pub use super::mat3::{mat3, mat3a, Mat3, Mat3A};
+ pub use super::mat4::{mat4, Mat4};
+ pub use super::quat::{quat, Quat};
+ pub use super::vec2::{vec2, Vec2};
+ pub use super::vec3::{vec3, vec3a, Vec3, Vec3A};
+ pub use super::vec4::{vec4, Vec4};
+
+ #[cfg(feature = "transform-types")]
+ #[allow(deprecated)]
+ pub use super::transform::{TransformRT, TransformSRT};
+}
+pub use self::f32::*;
+
+/** `f64` vector, quaternion and matrix types. */
+pub mod f64 {
+ pub use super::affine2::DAffine2;
+ pub use super::affine3::DAffine3;
+ pub use super::mat2::{dmat2, DMat2};
+ pub use super::mat3::{dmat3, DMat3};
+ pub use super::mat4::{dmat4, DMat4};
+ pub use super::quat::{dquat, DQuat};
+ pub use super::vec2::{dvec2, DVec2};
+ pub use super::vec3::{dvec3, DVec3};
+ pub use super::vec4::{dvec4, DVec4};
+}
+pub use self::f64::*;
+
+/** `i32` vector types. */
+pub mod i32 {
+ pub use super::vec2::{ivec2, IVec2};
+ pub use super::vec3::{ivec3, IVec3};
+ pub use super::vec4::{ivec4, IVec4};
+}
+pub use self::i32::*;
+
+/** `u32` vector types. */
+pub mod u32 {
+ pub use super::vec2::{uvec2, UVec2};
+ pub use super::vec3::{uvec3, UVec3};
+ pub use super::vec4::{uvec4, UVec4};
+}
+pub use self::u32::*;
+
+/** Traits adding swizzle methods to all vector types. */
+pub mod swizzles;
+
+pub use self::swizzles::{Vec2Swizzles, Vec3Swizzles, Vec4Swizzles};
+
+/** Rotation Helper */
+pub use euler::EulerRot;
diff --git a/src/macros.rs b/src/macros.rs
new file mode 100644
index 0000000..721face
--- /dev/null
+++ b/src/macros.rs
@@ -0,0 +1,484 @@
+#[cfg(any(
+ all(debug_assertions, feature = "debug-glam-assert"),
+ feature = "glam-assert"
+))]
+macro_rules! glam_assert {
+ ($($arg:tt)*) => ( assert!($($arg)*); )
+}
+#[cfg(not(any(
+ all(debug_assertions, feature = "debug-glam-assert"),
+ feature = "glam-assert"
+)))]
+macro_rules! glam_assert {
+ ($($arg:tt)*) => {};
+}
+
+macro_rules! const_assert {
+ ($x:expr $(,)?) => {
+ // FIXME: everything is align 16 on spirv - ignore for now
+ #[cfg(not(target_arch = "spirv"))]
+ #[allow(unknown_lints, clippy::eq_op)]
+ const _: [(); 0 - !{
+ const ASSERT: bool = $x;
+ ASSERT
+ } as usize] = [];
+ };
+}
+
+macro_rules! const_assert_eq {
+ ($x:expr, $y:expr $(,)?) => {
+ const_assert!($x == $y);
+ };
+}
+
+#[macro_export]
+#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+macro_rules! const_m128 {
+ ($fx4:expr) => {
+ unsafe { $crate::cast::Vec4Cast { fx4: $fx4 }.m128 }
+ };
+}
+
+#[cfg(all(target_feature = "sse2", not(feature = "scalar-math")))]
+macro_rules! const_f32x4 {
+ ($fx4:expr) => {
+ unsafe { $crate::cast::Vec4Cast { fx4: $fx4 }.m128 }
+ };
+}
+
+#[cfg(all(target_feature = "simd128", not(feature = "scalar-math")))]
+macro_rules! const_f32x4 {
+ ($fx4:expr) => {
+ unsafe { $crate::cast::Vec4Cast { fx4: $fx4 }.v128 }
+ };
+}
+
+/// Creates a `Vec2` that can be used to initialize a constant value.
+///
+/// ```
+/// use glam::{const_vec2, Vec2};
+/// const ONE: Vec2 = const_vec2!([1.0; 2]);
+/// const X: Vec2 = const_vec2!([1.0, 0.0]);
+/// ```
+#[macro_export]
+macro_rules! const_vec2 {
+ ($fx2:expr) => {
+ unsafe { $crate::cast::Vec2Cast { fx2: $fx2 }.v2 }
+ };
+}
+
+/// Creates a `Vec3` that can be used to initialize a constant value.
+///
+/// ```
+/// use glam::{const_vec3, Vec3};
+/// const ONE: Vec3 = const_vec3!([1.0; 3]);
+/// const X: Vec3 = const_vec3!([1.0, 0.0, 0.0]);
+/// ```
+#[macro_export]
+macro_rules! const_vec3 {
+ ($fx3:expr) => {
+ unsafe { $crate::cast::Vec3Cast { fx3: $fx3 }.v3 }
+ };
+}
+
+/// Creates a `Vec3A` that can be used to initialize a constant value.
+///
+/// ```
+/// use glam::{const_vec3a, Vec3A};
+/// const ONE: Vec3A = const_vec3a!([1.0; 3]);
+/// const X: Vec3A = const_vec3a!([1.0, 0.0, 0.0]);
+/// ```
+#[macro_export]
+macro_rules! const_vec3a {
+ ($fx3:expr) => {
+ unsafe {
+ $crate::cast::Vec4Cast {
+ fx4: [$fx3[0], $fx3[1], $fx3[2], 0.0],
+ }
+ .v3a
+ }
+ };
+}
+
+/// Creates a `Vec4` that can be used to initialize a constant value.
+///
+/// ```
+/// use glam::{const_vec4, Vec4};
+/// const ONE: Vec4 = const_vec4!([1.0; 4]);
+/// const X: Vec4 = const_vec4!([1.0, 0.0, 0.0, 0.0]);
+/// ```
+#[macro_export]
+macro_rules! const_vec4 {
+ ($fx4:expr) => {
+ unsafe { $crate::cast::Vec4Cast { fx4: $fx4 }.v4 }
+ };
+}
+
+/// Creates a `Mat2` from two column vectors that can be used to initialize a constant value.
+///
+/// ```
+/// use glam::{const_mat2, Mat2};
+/// const ZERO: Mat2 = const_mat2!([0.0; 4]);
+/// const IDENTITY: Mat2 = const_mat2!([1.0, 0.0], [0.0, 1.0]);
+/// ```
+#[macro_export]
+macro_rules! const_mat2 {
+ ($col0:expr, $col1:expr) => {
+ unsafe {
+ $crate::cast::Mat2Cast {
+ v2x2: [$crate::const_vec2!($col0), $crate::const_vec2!($col1)],
+ }
+ .m2
+ }
+ };
+ ($fx4:expr) => {
+ $crate::const_mat2!(
+ $crate::cast::Vec4Cast { fx4: $fx4 }.fx2x2[0],
+ $crate::cast::Vec4Cast { fx4: $fx4 }.fx2x2[1]
+ )
+ };
+}
+
+/// Creates a `Mat3` from three column vectors that can be used to initialize a constant value.
+///
+/// ```
+/// use glam::{const_mat3, Mat3};
+/// const ZERO: Mat3 = const_mat3!([0.0; 9]);
+/// const IDENTITY: Mat3 = const_mat3!([1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]);
+/// ```
+#[macro_export]
+macro_rules! const_mat3 {
+ ($col0:expr, $col1:expr, $col2:expr) => {
+ unsafe {
+ $crate::cast::Mat3Cast {
+ v3x3: [
+ $crate::const_vec3!($col0),
+ $crate::const_vec3!($col1),
+ $crate::const_vec3!($col2),
+ ],
+ }
+ .m3
+ }
+ };
+ ($fx9:expr) => {
+ $crate::const_mat3!(
+ $crate::cast::F32x9Cast { fx9: $fx9 }.fx3x3[0],
+ $crate::cast::F32x9Cast { fx9: $fx9 }.fx3x3[1],
+ $crate::cast::F32x9Cast { fx9: $fx9 }.fx3x3[2]
+ )
+ };
+}
+
+/// Creates a `Mat3A` from three column vectors that can be used to initialize a constant value.
+///
+/// ```
+/// use glam::{const_mat3a, Mat3A};
+/// const ZERO: Mat3A = const_mat3a!([0.0; 9]);
+/// const IDENTITY: Mat3A = const_mat3a!([1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]);
+/// ```
+#[macro_export]
+macro_rules! const_mat3a {
+ ($col0:expr, $col1:expr, $col2:expr) => {
+ unsafe {
+ $crate::cast::Mat3ACast {
+ v3x3: [
+ $crate::const_vec3a!($col0),
+ $crate::const_vec3a!($col1),
+ $crate::const_vec3a!($col2),
+ ],
+ }
+ .m3
+ }
+ };
+ ($fx9:expr) => {
+ $crate::const_mat3a!(
+ $crate::cast::F32x9Cast { fx9: $fx9 }.fx3x3[0],
+ $crate::cast::F32x9Cast { fx9: $fx9 }.fx3x3[1],
+ $crate::cast::F32x9Cast { fx9: $fx9 }.fx3x3[2]
+ )
+ };
+}
+
+/// Creates a `Mat4` from four column vectors that can be used to initialize a constant value.
+///
+/// ```
+/// use glam::{const_mat4, Mat4};
+/// const ZERO: Mat4 = const_mat4!([0.0; 16]);
+/// const IDENTITY: Mat4 = const_mat4!(
+/// [1.0, 0.0, 0.0, 0.0],
+/// [0.0, 1.0, 0.0, 0.0],
+/// [0.0, 0.0, 1.0, 0.0],
+/// [0.0, 0.0, 0.0, 1.0]
+/// );
+/// ```
+#[macro_export]
+macro_rules! const_mat4 {
+ ($col0:expr, $col1:expr, $col2:expr, $col3:expr) => {
+ unsafe {
+ $crate::cast::Mat4Cast {
+ v4x4: [
+ $crate::const_vec4!($col0),
+ $crate::const_vec4!($col1),
+ $crate::const_vec4!($col2),
+ $crate::const_vec4!($col3),
+ ],
+ }
+ .m4
+ }
+ };
+ ($fx16:expr) => {
+ $crate::const_mat4!(
+ $crate::cast::F32x16Cast { fx16: $fx16 }.fx4x4[0],
+ $crate::cast::F32x16Cast { fx16: $fx16 }.fx4x4[1],
+ $crate::cast::F32x16Cast { fx16: $fx16 }.fx4x4[2],
+ $crate::cast::F32x16Cast { fx16: $fx16 }.fx4x4[3]
+ )
+ };
+}
+
+/// Creates a `Quat` from `x`, `y`, `z` and `w` values that can be used to initialize a constant
+/// value.
+///
+/// ```
+/// use glam::{const_quat, Quat};
+/// const IDENTITY: Quat = const_quat!([0.0, 0.0, 0.0, 1.0]);
+/// ```
+#[macro_export]
+macro_rules! const_quat {
+ ($fx4:expr) => {
+ unsafe { $crate::cast::Vec4Cast { fx4: $fx4 }.q }
+ };
+}
+
+/// Creates a `DVec2` that can be used to initialize a constant value.
+///
+/// ```
+/// use glam::{const_dvec2, DVec2};
+/// const ONE: DVec2 = const_dvec2!([1.0; 2]);
+/// const X: DVec2 = const_dvec2!([1.0, 0.0]);
+/// ```
+#[macro_export]
+macro_rules! const_dvec2 {
+ ($fx2:expr) => {
+ unsafe { $crate::cast::DVec2Cast { fx2: $fx2 }.v2 }
+ };
+}
+
+/// Creates a `DVec3` that can be used to initialize a constant value.
+///
+/// ```
+/// use glam::{const_dvec3, DVec3};
+/// const ONE: DVec3 = const_dvec3!([1.0; 3]);
+/// const X: DVec3 = const_dvec3!([1.0, 0.0, 0.0]);
+/// ```
+#[macro_export]
+macro_rules! const_dvec3 {
+ ($fx3:expr) => {
+ unsafe { $crate::cast::DVec3Cast { fx3: $fx3 }.v3 }
+ };
+}
+
+/// Creates a `DVec4` that can be used to initialize a constant value.
+///
+/// ```
+/// use glam::{const_dvec4, DVec4};
+/// const ONE: DVec4 = const_dvec4!([1.0; 4]);
+/// const X: DVec4 = const_dvec4!([1.0, 0.0, 0.0, 0.0]);
+/// ```
+#[macro_export]
+macro_rules! const_dvec4 {
+ ($fx4:expr) => {
+ unsafe { $crate::cast::DVec4Cast { fx4: $fx4 }.v4 }
+ };
+}
+
+/// Creates a `DMat2` from two column vectors that can be used to initialize a constant value.
+///
+/// ```
+/// use glam::{const_dmat2, DMat2};
+/// const ZERO: DMat2 = const_dmat2!([0.0; 4]);
+/// const IDENTITY: DMat2 = const_dmat2!([1.0, 0.0], [0.0, 1.0]);
+/// ```
+#[macro_export]
+macro_rules! const_dmat2 {
+ ($col0:expr, $col1:expr) => {
+ unsafe {
+ $crate::cast::DMat2Cast {
+ v2x2: [$crate::const_dvec2!($col0), $crate::const_dvec2!($col1)],
+ }
+ .m2
+ }
+ };
+ ($fx4:expr) => {
+ $crate::const_dmat2!(
+ $crate::cast::DVec4Cast { fx4: $fx4 }.fx2x2[0],
+ $crate::cast::DVec4Cast { fx4: $fx4 }.fx2x2[1]
+ )
+ };
+}
+
+/// Creates a `DMat3` from three column vectors that can be used to initialize a constant value.
+///
+/// ```
+/// # #[macro_use] extern crate glam;
+/// use glam::{const_dmat3, DMat3};
+/// const ZERO: DMat3 = const_dmat3!([0.0; 9]);
+/// const IDENTITY: DMat3 = const_dmat3!([1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]);
+/// ```
+#[macro_export]
+macro_rules! const_dmat3 {
+ ($col0:expr, $col1:expr, $col2:expr) => {
+ unsafe {
+ $crate::cast::DMat3Cast {
+ v3x3: [
+ $crate::const_dvec3!($col0),
+ $crate::const_dvec3!($col1),
+ $crate::const_dvec3!($col2),
+ ],
+ }
+ .m3
+ }
+ };
+ ($fx9:expr) => {
+ $crate::const_dmat3!(
+ $crate::cast::F64x9Cast { fx9: $fx9 }.fx3x3[0],
+ $crate::cast::F64x9Cast { fx9: $fx9 }.fx3x3[1],
+ $crate::cast::F64x9Cast { fx9: $fx9 }.fx3x3[2]
+ )
+ };
+}
+
+/// Creates a `DMat4` from four column vectors that can be used to initialize a constant value.
+///
+/// ```
+/// use glam::{const_dmat4, DMat4};
+/// const ZERO: DMat4 = const_dmat4!([0.0; 16]);
+/// const IDENTITY: DMat4 = const_dmat4!(
+/// [1.0, 0.0, 0.0, 0.0],
+/// [0.0, 1.0, 0.0, 0.0],
+/// [0.0, 0.0, 1.0, 0.0],
+/// [0.0, 0.0, 0.0, 1.0]
+/// );
+/// ```
+#[macro_export]
+macro_rules! const_dmat4 {
+ ($col0:expr, $col1:expr, $col2:expr, $col3:expr) => {
+ unsafe {
+ $crate::cast::DMat4Cast {
+ v4x4: [
+ $crate::const_dvec4!($col0),
+ $crate::const_dvec4!($col1),
+ $crate::const_dvec4!($col2),
+ $crate::const_dvec4!($col3),
+ ],
+ }
+ .m4
+ }
+ };
+ ($fx16:expr) => {
+ $crate::const_dmat4!(
+ $crate::cast::F64x16Cast { fx16: $fx16 }.fx4x4[0],
+ $crate::cast::F64x16Cast { fx16: $fx16 }.fx4x4[1],
+ $crate::cast::F64x16Cast { fx16: $fx16 }.fx4x4[2],
+ $crate::cast::F64x16Cast { fx16: $fx16 }.fx4x4[3]
+ )
+ };
+}
+
+/// Creates a `DQuat` from `x`, `y`, `z` and `w` values that can be used to initialize a constant
+/// value.
+///
+/// ```
+/// use glam::{const_dquat, DQuat};
+/// const IDENTITY: DQuat = const_dquat!([0.0, 0.0, 0.0, 1.0]);
+/// ```
+#[macro_export]
+macro_rules! const_dquat {
+ ($fx4:expr) => {
+ unsafe { $crate::cast::DVec4Cast { fx4: $fx4 }.q }
+ };
+}
+
+/// Creates a `IVec2` that can be used to initialize a constant value.
+///
+/// ```
+/// use glam::{const_ivec2, IVec2};
+/// const ONE: IVec2 = const_ivec2!([1; 2]);
+/// const X: IVec2 = const_ivec2!([1, 0]);
+/// ```
+#[macro_export]
+macro_rules! const_ivec2 {
+ ($ix2:expr) => {
+ unsafe { $crate::cast::IVec2Cast { ix2: $ix2 }.v2 }
+ };
+}
+
+/// Creates a `IVec3` that can be used to initialize a constant value.
+///
+/// ```
+/// use glam::{const_ivec3, IVec3};
+/// const ONE: IVec3 = const_ivec3!([1; 3]);
+/// const X: IVec3 = const_ivec3!([1, 0, 0]);
+/// ```
+#[macro_export]
+macro_rules! const_ivec3 {
+ ($ix3:expr) => {
+ unsafe { $crate::cast::IVec3Cast { ix3: $ix3 }.v3 }
+ };
+}
+
+/// Creates a `IVec4` that can be used to initialize a constant value.
+///
+/// ```
+/// use glam::{const_ivec4, IVec4};
+/// const ONE: IVec4 = const_ivec4!([1; 4]);
+/// const X: IVec4 = const_ivec4!([1, 0, 0, 0]);
+/// ```
+#[macro_export]
+macro_rules! const_ivec4 {
+ ($ix4:expr) => {
+ unsafe { $crate::cast::IVec4Cast { ix4: $ix4 }.v4 }
+ };
+}
+
+/// Creates a `UVec2` that can be used to initialize a constant value.
+///
+/// ```
+/// use glam::{const_uvec2, UVec2};
+/// const ONE: UVec2 = const_uvec2!([1; 2]);
+/// const X: UVec2 = const_uvec2!([1, 0]);
+/// ```
+#[macro_export]
+macro_rules! const_uvec2 {
+ ($ux2:expr) => {
+ unsafe { $crate::cast::UVec2Cast { ux2: $ux2 }.v2 }
+ };
+}
+
+/// Creates a `UVec3` that can be used to initialize a constant value.
+///
+/// ```
+/// use glam::{const_uvec3, UVec3};
+/// const ONE: UVec3 = const_uvec3!([1; 3]);
+/// const X: UVec3 = const_uvec3!([1, 0, 0]);
+/// ```
+#[macro_export]
+macro_rules! const_uvec3 {
+ ($ux3:expr) => {
+ unsafe { $crate::cast::UVec3Cast { ux3: $ux3 }.v3 }
+ };
+}
+
+/// Creates a `UVec4` that can be used to initialize a constant value.
+///
+/// ```
+/// use glam::{const_uvec4, UVec4};
+/// const ONE: UVec4 = const_uvec4!([1; 4]);
+/// const X: UVec4 = const_uvec4!([1, 0, 0, 0]);
+/// ```
+#[macro_export]
+macro_rules! const_uvec4 {
+ ($ux4:expr) => {
+ unsafe { $crate::cast::UVec4Cast { ux4: $ux4 }.v4 }
+ };
+}
diff --git a/src/mat.rs b/src/mat.rs
new file mode 100644
index 0000000..b97e8ed
--- /dev/null
+++ b/src/mat.rs
@@ -0,0 +1,114 @@
+// Adds common vector trait implementations.
+// The traits here should be supported for all types of $t and all sizes of vector.
+macro_rules! impl_matn_common_traits {
+ ($t:ty, $matn:ident, $vecn:ident) => {
+ impl Default for $matn {
+ #[inline(always)]
+ fn default() -> Self {
+ Self::IDENTITY
+ }
+ }
+
+ impl Add<$matn> for $matn {
+ type Output = Self;
+ #[inline(always)]
+ fn add(self, other: Self) -> Self::Output {
+ Self(self.0.add_matrix(&other.0))
+ }
+ }
+
+ impl AddAssign<$matn> for $matn {
+ #[inline(always)]
+ fn add_assign(&mut self, other: Self) {
+ self.0 = self.0.add_matrix(&other.0);
+ }
+ }
+
+ impl Sub<$matn> for $matn {
+ type Output = Self;
+ #[inline(always)]
+ fn sub(self, other: Self) -> Self::Output {
+ Self(self.0.sub_matrix(&other.0))
+ }
+ }
+
+ impl SubAssign<$matn> for $matn {
+ #[inline(always)]
+ fn sub_assign(&mut self, other: Self) {
+ self.0 = self.0.sub_matrix(&other.0);
+ }
+ }
+
+ impl Neg for $matn {
+ type Output = Self;
+ #[inline(always)]
+ fn neg(self) -> Self::Output {
+ Self(self.0.neg_matrix())
+ }
+ }
+
+ impl Mul<$matn> for $matn {
+ type Output = Self;
+ #[inline(always)]
+ fn mul(self, other: Self) -> Self::Output {
+ Self(self.0.mul_matrix(&other.0))
+ }
+ }
+
+ impl MulAssign<$matn> for $matn {
+ #[inline(always)]
+ fn mul_assign(&mut self, other: Self) {
+ self.0 = self.0.mul_matrix(&other.0);
+ }
+ }
+
+ impl Mul<$vecn> for $matn {
+ type Output = $vecn;
+ #[inline(always)]
+ fn mul(self, other: $vecn) -> Self::Output {
+ $vecn(self.0.mul_vector(other.0))
+ }
+ }
+
+ impl Mul<$matn> for $t {
+ type Output = $matn;
+ #[inline(always)]
+ fn mul(self, other: $matn) -> Self::Output {
+ $matn(other.0.mul_scalar(self))
+ }
+ }
+
+ impl Mul<$t> for $matn {
+ type Output = Self;
+ #[inline(always)]
+ fn mul(self, other: $t) -> Self::Output {
+ Self(self.0.mul_scalar(other))
+ }
+ }
+
+ impl MulAssign<$t> for $matn {
+ #[inline(always)]
+ fn mul_assign(&mut self, other: $t) {
+ self.0 = self.0.mul_scalar(other);
+ }
+ }
+
+ impl<'a> Sum<&'a Self> for $matn {
+ fn sum<I>(iter: I) -> Self
+ where
+ I: Iterator<Item = &'a Self>,
+ {
+ iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
+ }
+ }
+
+ impl<'a> Product<&'a Self> for $matn {
+ fn product<I>(iter: I) -> Self
+ where
+ I: Iterator<Item = &'a Self>,
+ {
+ iter.fold(Self::IDENTITY, |a, &b| Self::mul(a, b))
+ }
+ }
+ };
+}
diff --git a/src/mat2.rs b/src/mat2.rs
new file mode 100644
index 0000000..36ced76
--- /dev/null
+++ b/src/mat2.rs
@@ -0,0 +1,398 @@
+use crate::core::{
+ storage::{Columns2, XY},
+ traits::matrix::{FloatMatrix2x2, Matrix2x2, MatrixConst},
+};
+use crate::{DMat3, DVec2, Mat3, Vec2};
+#[cfg(not(target_arch = "spirv"))]
+use core::fmt;
+use core::iter::{Product, Sum};
+use core::ops::{Add, AddAssign, Deref, DerefMut, Mul, MulAssign, Neg, Sub, SubAssign};
+
+#[cfg(all(
+ target_arch = "x86",
+ target_feature = "sse2",
+ not(feature = "scalar-math")
+))]
+use core::arch::x86::*;
+#[cfg(all(
+ target_arch = "x86_64",
+ target_feature = "sse2",
+ not(feature = "scalar-math")
+))]
+use core::arch::x86_64::*;
+
+#[cfg(target_feature = "simd128")]
+use core::arch::wasm32::v128;
+
+macro_rules! impl_mat2_methods {
+ ($t:ty, $vec2:ident, $mat3:ident, $inner:ident) => {
+ /// A 2x2 matrix with all elements set to `0.0`.
+ pub const ZERO: Self = Self($inner::ZERO);
+
+ /// A 2x2 identity matrix, where all diagonal elements are `1`, and all off-diagonal elements are `0`.
+ pub const IDENTITY: Self = Self($inner::IDENTITY);
+
+ /// All NAN:s.
+ pub const NAN: Self = Self(<$inner as crate::core::traits::scalar::NanConstEx>::NAN);
+
+ /// Creates a 2x2 matrix from two column vectors.
+ #[inline(always)]
+ pub fn from_cols(x_axis: $vec2, y_axis: $vec2) -> Self {
+ Self($inner::from_cols(x_axis.0, y_axis.0))
+ }
+
+ /// Creates a 2x2 matrix from a `[S; 4]` array stored in column major order.
+ /// If your data is stored in row major you will need to `transpose` the returned
+ /// matrix.
+ #[inline(always)]
+ pub fn from_cols_array(m: &[$t; 4]) -> Self {
+ Self($inner::from_cols_array(m))
+ }
+
+ /// Creates a `[S; 4]` array storing data in column major order.
+ /// If you require data in row major order `transpose` the matrix first.
+ #[inline(always)]
+ pub fn to_cols_array(&self) -> [$t; 4] {
+ self.0.to_cols_array()
+ }
+
+ /// Creates a 2x2 matrix from a `[[S; 2]; 2]` 2D array stored in column major order.
+ /// If your data is in row major order you will need to `transpose` the returned
+ /// matrix.
+ #[inline(always)]
+ pub fn from_cols_array_2d(m: &[[$t; 2]; 2]) -> Self {
+ Self($inner::from_cols_array_2d(m))
+ }
+
+ /// Creates a `[[S; 2]; 2]` 2D array storing data in column major order.
+ /// If you require data in row major order `transpose` the matrix first.
+ #[inline(always)]
+ pub fn to_cols_array_2d(&self) -> [[$t; 2]; 2] {
+ self.0.to_cols_array_2d()
+ }
+
+ /// Creates a 2x2 matrix with its diagonal set to `diagonal` and all other entries set to 0.
+ #[doc(alias = "scale")]
+ #[inline(always)]
+ pub fn from_diagonal(diagonal: $vec2) -> Self {
+ Self($inner::from_diagonal(diagonal.0))
+ }
+
+ /// Creates a 2x2 matrix containing the combining non-uniform `scale` and rotation of
+ /// `angle` (in radians).
+ #[inline(always)]
+ pub fn from_scale_angle(scale: $vec2, angle: $t) -> Self {
+ Self($inner::from_scale_angle(scale.0, angle))
+ }
+
+ /// Creates a 2x2 matrix containing a rotation of `angle` (in radians).
+ #[inline(always)]
+ pub fn from_angle(angle: $t) -> Self {
+ Self($inner::from_angle(angle))
+ }
+
+ /// Creates a 2x2 matrix from a 3x3 matrix, discarding the 2nd row and column.
+ #[inline(always)]
+ pub fn from_mat3(m: $mat3) -> Self {
+ Self::from_cols($vec2(m.x_axis.0.into()), $vec2(m.y_axis.0.into()))
+ }
+
+ /// Creates a 2x2 matrix from the first 4 values in `slice`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `slice` is less than 4 elements long.
+ #[inline(always)]
+ pub fn from_cols_slice(slice: &[$t]) -> Self {
+ Self(Matrix2x2::from_cols_slice(slice))
+ }
+
+ /// Writes the columns of `self` to the first 4 elements in `slice`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `slice` is less than 4 elements long.
+ #[inline(always)]
+ pub fn write_cols_to_slice(self, slice: &mut [$t]) {
+ Matrix2x2::write_cols_to_slice(&self.0, slice)
+ }
+
+ /// Returns the matrix column for the given `index`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `index` is greater than 1.
+ #[inline]
+ pub fn col(&self, index: usize) -> $vec2 {
+ match index {
+ 0 => self.x_axis,
+ 1 => self.y_axis,
+ _ => panic!("index out of bounds"),
+ }
+ }
+
+ /// Returns a mutable reference to the matrix column for the given `index`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `index` is greater than 1.
+ #[inline]
+ pub fn col_mut(&mut self, index: usize) -> &mut $vec2 {
+ match index {
+ 0 => &mut self.x_axis,
+ 1 => &mut self.y_axis,
+ _ => panic!("index out of bounds"),
+ }
+ }
+
+ /// Returns the matrix row for the given `index`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `index` is greater than 1.
+ #[inline]
+ pub fn row(&self, index: usize) -> $vec2 {
+ match index {
+ 0 => $vec2::new(self.x_axis.x, self.y_axis.x),
+ 1 => $vec2::new(self.x_axis.y, self.y_axis.y),
+ _ => panic!("index out of bounds"),
+ }
+ }
+
+ /// Returns `true` if, and only if, all elements are finite.
+ /// If any element is either `NaN`, positive or negative infinity, this will return `false`.
+ #[inline]
+ pub fn is_finite(&self) -> bool {
+ // TODO
+ self.x_axis.is_finite() && self.y_axis.is_finite()
+ }
+
+ /// Returns `true` if any elements are `NaN`.
+ #[inline]
+ pub fn is_nan(&self) -> bool {
+ self.x_axis.is_nan() || self.y_axis.is_nan()
+ }
+
+ /// Returns the transpose of `self`.
+ #[must_use]
+ #[inline(always)]
+ pub fn transpose(&self) -> Self {
+ Self(self.0.transpose())
+ }
+
+ /// Returns the determinant of `self`.
+ #[inline(always)]
+ pub fn determinant(&self) -> $t {
+ self.0.determinant()
+ }
+
+ /// Returns the inverse of `self`.
+ ///
+ /// If the matrix is not invertible the returned matrix will be invalid.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if the determinant of `self` is zero when `glam_assert` is enabled.
+ #[must_use]
+ #[inline(always)]
+ pub fn inverse(&self) -> Self {
+ Self(self.0.inverse())
+ }
+
+ /// Transforms a 2D vector.
+ #[inline(always)]
+ pub fn mul_vec2(&self, other: $vec2) -> $vec2 {
+ $vec2(self.0.mul_vector(other.0))
+ }
+
+ /// Multiplies two 2x2 matrices.
+ #[inline(always)]
+ pub fn mul_mat2(&self, other: &Self) -> Self {
+ Self(self.0.mul_matrix(&other.0))
+ }
+
+ /// Adds two 2x2 matrices.
+ #[inline(always)]
+ pub fn add_mat2(&self, other: &Self) -> Self {
+ Self(self.0.add_matrix(&other.0))
+ }
+
+ /// Subtracts two 2x2 matrices.
+ #[inline(always)]
+ pub fn sub_mat2(&self, other: &Self) -> Self {
+ Self(self.0.sub_matrix(&other.0))
+ }
+
+ /// Multiplies a 2x2 matrix by a scalar.
+ #[inline(always)]
+ pub fn mul_scalar(&self, other: $t) -> Self {
+ Self(self.0.mul_scalar(other))
+ }
+
+ /// Returns true if the absolute difference of all elements between `self` and `other`
+ /// is less than or equal to `max_abs_diff`.
+ ///
+ /// This can be used to compare if two matrices contain similar elements. It works best
+ /// when comparing with a known value. The `max_abs_diff` that should be used used
+ /// depends on the values being compared against.
+ ///
+ /// For more see
+ /// [comparing floating point numbers](https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/).
+ #[inline(always)]
+ pub fn abs_diff_eq(&self, other: &Self, max_abs_diff: $t) -> bool {
+ self.0.abs_diff_eq(&other.0, max_abs_diff)
+ }
+ };
+}
+
+macro_rules! impl_mat2_traits {
+ ($t:ty, $new:ident, $mat2:ident, $vec2:ident) => {
+ /// Creates a 2x2 matrix from two column vectors.
+ #[inline(always)]
+ pub fn $new(x_axis: $vec2, y_axis: $vec2) -> $mat2 {
+ $mat2::from_cols(x_axis, y_axis)
+ }
+
+ impl_matn_common_traits!($t, $mat2, $vec2);
+
+ impl PartialEq for $mat2 {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ self.x_axis.eq(&other.x_axis) && self.y_axis.eq(&other.y_axis)
+ }
+ }
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl AsRef<[$t; 4]> for $mat2 {
+ #[inline(always)]
+ fn as_ref(&self) -> &[$t; 4] {
+ unsafe { &*(self as *const Self as *const [$t; 4]) }
+ }
+ }
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl AsMut<[$t; 4]> for $mat2 {
+ #[inline(always)]
+ fn as_mut(&mut self) -> &mut [$t; 4] {
+ unsafe { &mut *(self as *mut Self as *mut [$t; 4]) }
+ }
+ }
+
+ impl Deref for $mat2 {
+ type Target = Columns2<$vec2>;
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*(self as *const Self as *const Self::Target) }
+ }
+ }
+
+ impl DerefMut for $mat2 {
+ #[inline(always)]
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ unsafe { &mut *(self as *mut Self as *mut Self::Target) }
+ }
+ }
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl fmt::Debug for $mat2 {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct(stringify!($mat2))
+ .field("x_axis", &self.x_axis)
+ .field("y_axis", &self.y_axis)
+ .finish()
+ }
+ }
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl fmt::Display for $mat2 {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "[{}, {}]", self.x_axis, self.y_axis)
+ }
+ }
+ };
+}
+
+#[cfg(all(target_feature = "sse2", not(feature = "scalar-math")))]
+type InnerF32 = __m128;
+
+#[cfg(all(target_feature = "simd128", not(feature = "scalar-math")))]
+type InnerF32 = v128;
+
+#[cfg(any(
+ not(any(target_feature = "sse2", target_feature = "simd128")),
+ feature = "scalar-math"
+))]
+type InnerF32 = crate::core::storage::Columns2<XY<f32>>;
+
+/// A 2x2 column major matrix.
+#[derive(Clone, Copy)]
+#[cfg_attr(
+ not(any(
+ feature = "scalar-math",
+ target_arch = "spirv",
+ target_feature = "sse2",
+ target_feature = "simd128"
+ )),
+ repr(C, align(16))
+)]
+#[cfg_attr(feature = "cuda", repr(C, align(8)))]
+#[cfg_attr(
+ all(
+ any(
+ feature = "scalar-math",
+ target_arch = "spirv",
+ target_feature = "sse2",
+ target_feature = "simd128"
+ ),
+ not(feature = "cuda"),
+ ),
+ repr(transparent)
+)]
+pub struct Mat2(pub(crate) InnerF32);
+
+impl Mat2 {
+ impl_mat2_methods!(f32, Vec2, Mat3, InnerF32);
+
+ #[inline(always)]
+ pub fn as_dmat2(&self) -> DMat2 {
+ DMat2::from_cols(self.x_axis.as_dvec2(), self.y_axis.as_dvec2())
+ }
+}
+impl_mat2_traits!(f32, mat2, Mat2, Vec2);
+
+type InnerF64 = crate::core::storage::Columns2<XY<f64>>;
+
+/// A 2x2 column major matrix.
+#[derive(Clone, Copy)]
+#[cfg_attr(feature = "cuda", repr(C, align(16)))]
+#[cfg_attr(not(feature = "cuda"), repr(transparent))]
+pub struct DMat2(pub(crate) InnerF64);
+
+impl DMat2 {
+ impl_mat2_methods!(f64, DVec2, DMat3, InnerF64);
+
+ #[inline(always)]
+ pub fn as_mat2(&self) -> Mat2 {
+ Mat2::from_cols(self.x_axis.as_vec2(), self.y_axis.as_vec2())
+ }
+}
+impl_mat2_traits!(f64, dmat2, DMat2, DVec2);
+
+mod const_test_mat2 {
+ #[cfg(any(feature = "scalar-math", target_arch = "spirv"))]
+ const_assert_eq!(
+ core::mem::align_of::<super::Vec2>(),
+ core::mem::align_of::<super::Mat2>()
+ );
+ #[cfg(not(any(feature = "scalar-math", target_arch = "spirv")))]
+ const_assert_eq!(16, core::mem::align_of::<super::Mat2>());
+ const_assert_eq!(16, core::mem::size_of::<super::Mat2>());
+}
+
+mod const_test_dmat2 {
+ const_assert_eq!(
+ core::mem::align_of::<super::DVec2>(),
+ core::mem::align_of::<super::DMat2>()
+ );
+ const_assert_eq!(32, core::mem::size_of::<super::DMat2>());
+}
diff --git a/src/mat3.rs b/src/mat3.rs
new file mode 100644
index 0000000..564832a
--- /dev/null
+++ b/src/mat3.rs
@@ -0,0 +1,596 @@
+use crate::core::{
+ storage::{Columns3, XYZ},
+ traits::matrix::{FloatMatrix3x3, Matrix3x3, MatrixConst},
+};
+use crate::{DMat2, DMat4, DQuat, DVec2, DVec3, EulerRot, Mat2, Mat4, Quat, Vec2, Vec3, Vec3A};
+#[cfg(not(target_arch = "spirv"))]
+use core::fmt;
+use core::iter::{Product, Sum};
+use core::ops::{Add, AddAssign, Deref, DerefMut, Mul, MulAssign, Neg, Sub, SubAssign};
+
+#[cfg(all(
+ target_arch = "x86",
+ target_feature = "sse2",
+ not(feature = "scalar-math")
+))]
+use core::arch::x86::*;
+#[cfg(all(
+ target_arch = "x86_64",
+ target_feature = "sse2",
+ not(feature = "scalar-math")
+))]
+use core::arch::x86_64::*;
+
+#[cfg(target_feature = "simd128")]
+use core::arch::wasm32::v128;
+
+macro_rules! define_mat3_struct {
+ ($mat3:ident, $inner:ident) => {
+ /// A 3x3 column major matrix.
+ ///
+ /// This 3x3 matrix type features convenience methods for creating and using linear and
+ /// affine transformations. If you are primarily dealing with 2D affine transformations the
+ /// [`Affine2`](crate::Affine2) type is much faster and more space efficient than using a
+ /// 3x3 matrix.
+ ///
+ /// Linear transformations including 3D rotation and scale can be created using methods
+ /// such as [`Self::from_diagonal()`], [`Self::from_quat()`], [`Self::from_axis_angle()`],
+ /// [`Self::from_rotation_x()`], [`Self::from_rotation_y()`], or
+ /// [`Self::from_rotation_z()`].
+ ///
+ /// The resulting matrices can be use to transform 3D vectors using regular vector
+ /// multiplication.
+ ///
+ /// Affine transformations including 2D translation, rotation and scale can be created
+ /// using methods such as [`Self::from_translation()`], [`Self::from_angle()`],
+ /// [`Self::from_scale()`] and [`Self::from_scale_angle_translation()`].
+ ///
+ /// The [`Self::transform_point2()`] and [`Self::transform_vector2()`] convenience methods
+ /// are provided for performing affine transforms on 2D vectors and points. These multiply
+ /// 2D inputs as 3D vectors with an implicit `z` value of `1` for points and `0` for
+ /// vectors respectively. These methods assume that `Self` contains a valid affine
+ /// transform.
+ #[derive(Clone, Copy)]
+ #[cfg_attr(not(target_arch = "spirv"), repr(C))]
+ pub struct $mat3(pub(crate) $inner);
+ };
+}
+
+macro_rules! impl_mat3_methods {
+ ($t:ty, $vec3:ident, $vec3a:ident, $vec2:ident, $quat:ident, $mat2:ident, $mat4:ident, $inner:ident) => {
+ /// A 3x3 matrix with all elements set to `0.0`.
+ pub const ZERO: Self = Self($inner::ZERO);
+
+ /// A 3x3 identity matrix, where all diagonal elements are `1`, and all off-diagonal
+ /// elements are `0`.
+ pub const IDENTITY: Self = Self($inner::IDENTITY);
+
+ /// All NAN:s.
+ pub const NAN: Self = Self(<$inner as crate::core::traits::scalar::NanConstEx>::NAN);
+
+ /// Creates a 3x3 matrix from three column vectors.
+ #[inline(always)]
+ pub fn from_cols(x_axis: $vec3a, y_axis: $vec3a, z_axis: $vec3a) -> Self {
+ Self(Matrix3x3::from_cols(x_axis.0, y_axis.0, z_axis.0))
+ }
+
+ /// Creates a 3x3 matrix from a `[S; 9]` array stored in column major order.
+ /// If your data is stored in row major you will need to `transpose` the returned
+ /// matrix.
+ #[inline(always)]
+ pub fn from_cols_array(m: &[$t; 9]) -> Self {
+ Self(Matrix3x3::from_cols_array(m))
+ }
+
+ /// Creates a `[S; 9]` array storing data in column major order.
+ /// If you require data in row major order `transpose` the matrix first.
+ #[inline(always)]
+ pub fn to_cols_array(&self) -> [$t; 9] {
+ self.0.to_cols_array()
+ }
+
+ /// Creates a 3x3 matrix from a `[[S; 3]; 3]` 2D array stored in column major order.
+ /// If your data is in row major order you will need to `transpose` the returned
+ /// matrix.
+ #[inline(always)]
+ pub fn from_cols_array_2d(m: &[[$t; 3]; 3]) -> Self {
+ Self(Matrix3x3::from_cols_array_2d(m))
+ }
+
+ /// Creates a `[[S; 3]; 3]` 2D array storing data in column major order.
+ /// If you require data in row major order `transpose` the matrix first.
+ #[inline(always)]
+ pub fn to_cols_array_2d(&self) -> [[$t; 3]; 3] {
+ self.0.to_cols_array_2d()
+ }
+
+ /// Creates a 3x3 matrix with its diagonal set to `diagonal` and all other entries set to 0.
+ /// The resulting matrix is a 3D scale transfom.
+ #[doc(alias = "scale")]
+ #[inline(always)]
+ pub fn from_diagonal(diagonal: $vec3) -> Self {
+ Self($inner::from_diagonal(diagonal.0))
+ }
+
+ /// Creates a 3x3 matrix from a 4x4 matrix, discarding the 3rd row and column.
+ pub fn from_mat4(m: $mat4) -> Self {
+ Self::from_cols(
+ $vec3a(m.x_axis.0.into()),
+ $vec3a(m.y_axis.0.into()),
+ $vec3a(m.z_axis.0.into()),
+ )
+ }
+
+ /// Creates a 3D rotation matrix from the given quaternion.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `rotation` is not normalized when `glam_assert` is enabled.
+ #[inline(always)]
+ pub fn from_quat(rotation: $quat) -> Self {
+ // TODO: SIMD?
+ Self($inner::from_quaternion(rotation.0.into()))
+ }
+
+ /// Creates a 3D rotation matrix from a normalized rotation `axis` and `angle` (in
+ /// radians).
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `axis` is not normalized when `glam_assert` is enabled.
+ #[inline(always)]
+ pub fn from_axis_angle(axis: $vec3, angle: $t) -> Self {
+ Self(FloatMatrix3x3::from_axis_angle(axis.0, angle))
+ }
+
+ #[inline(always)]
+ /// Creates a 3D rotation matrix from the given euler rotation sequence and the angles (in
+ /// radians).
+ pub fn from_euler(order: EulerRot, a: $t, b: $t, c: $t) -> Self {
+ let quat = $quat::from_euler(order, a, b, c);
+ Self::from_quat(quat)
+ }
+
+ /// Creates a 3D rotation matrix from `angle` (in radians) around the x axis.
+ #[inline(always)]
+ pub fn from_rotation_x(angle: $t) -> Self {
+ Self($inner::from_rotation_x(angle))
+ }
+
+ /// Creates a 3D rotation matrix from `angle` (in radians) around the y axis.
+ #[inline(always)]
+ pub fn from_rotation_y(angle: $t) -> Self {
+ Self($inner::from_rotation_y(angle))
+ }
+
+ /// Creates a 3D rotation matrix from `angle` (in radians) around the z axis.
+ #[inline(always)]
+ pub fn from_rotation_z(angle: $t) -> Self {
+ Self($inner::from_rotation_z(angle))
+ }
+
+ /// Creates an affine transformation matrix from the given 2D `translation`.
+ ///
+ /// The resulting matrix can be used to transform 2D points and vectors. See
+ /// [`Self::transform_point2()`] and [`Self::transform_vector2()`].
+ #[inline(always)]
+ pub fn from_translation(translation: $vec2) -> Self {
+ Self(Matrix3x3::from_translation(translation.0))
+ }
+
+ /// Creates an affine transformation matrix from the given 2D rotation `angle` (in
+ /// radians).
+ ///
+ /// The resulting matrix can be used to transform 2D points and vectors. See
+ /// [`Self::transform_point2()`] and [`Self::transform_vector2()`].
+ #[inline(always)]
+ pub fn from_angle(angle: $t) -> Self {
+ Self(FloatMatrix3x3::from_angle(angle))
+ }
+
+ /// Creates an affine transformation matrix from the given 2D `scale`, rotation `angle` (in
+ /// radians) and `translation`.
+ ///
+ /// The resulting matrix can be used to transform 2D points and vectors. See
+ /// [`Self::transform_point2()`] and [`Self::transform_vector2()`].
+ #[inline(always)]
+ pub fn from_scale_angle_translation(scale: $vec2, angle: $t, translation: $vec2) -> Self {
+ Self(FloatMatrix3x3::from_scale_angle_translation(
+ scale.0,
+ angle,
+ translation.0,
+ ))
+ }
+
+ /// Creates an affine transformation matrix from the given non-uniform 2D `scale`.
+ ///
+ /// The resulting matrix can be used to transform 2D points and vectors. See
+ /// [`Self::transform_point2()`] and [`Self::transform_vector2()`].
+ ///
+ /// # Panics
+ ///
+ /// Will panic if all elements of `scale` are zero when `glam_assert` is enabled.
+ #[inline(always)]
+ pub fn from_scale(scale: $vec2) -> Self {
+ Self(Matrix3x3::from_scale(scale.0))
+ }
+
+ /// Creates an affine transformation matrix from the given 2x2 matrix.
+ ///
+ /// The resulting matrix can be used to transform 2D points and vectors. See
+ /// [`Self::transform_point2()`] and [`Self::transform_vector2()`].
+ #[inline(always)]
+ pub fn from_mat2(m: $mat2) -> Self {
+ Self::from_cols((m.x_axis, 0.0).into(), (m.y_axis, 0.0).into(), $vec3a::Z)
+ }
+
+ /// Creates a 3x3 matrix from the first 9 values in `slice`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `slice` is less than 9 elements long.
+ #[inline(always)]
+ pub fn from_cols_slice(slice: &[$t]) -> Self {
+ Self(Matrix3x3::from_cols_slice(slice))
+ }
+
+ /// Writes the columns of `self` to the first 9 elements in `slice`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `slice` is less than 9 elements long.
+ #[inline(always)]
+ pub fn write_cols_to_slice(self, slice: &mut [$t]) {
+ Matrix3x3::write_cols_to_slice(&self.0, slice)
+ }
+
+ /// Returns the matrix column for the given `index`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `index` is greater than 2.
+ #[inline]
+ pub fn col(&self, index: usize) -> $vec3a {
+ match index {
+ 0 => self.x_axis,
+ 1 => self.y_axis,
+ 2 => self.z_axis,
+ _ => panic!("index out of bounds"),
+ }
+ }
+
+ /// Returns a mutable reference to the matrix column for the given `index`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `index` is greater than 2.
+ #[inline]
+ pub fn col_mut(&mut self, index: usize) -> &mut $vec3a {
+ match index {
+ 0 => &mut self.x_axis,
+ 1 => &mut self.y_axis,
+ 2 => &mut self.z_axis,
+ _ => panic!("index out of bounds"),
+ }
+ }
+
+ /// Returns the matrix row for the given `index`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `index` is greater than 2.
+ #[inline]
+ pub fn row(&self, index: usize) -> $vec3a {
+ match index {
+ 0 => $vec3a::new(self.x_axis.x, self.y_axis.x, self.z_axis.x),
+ 1 => $vec3a::new(self.x_axis.y, self.y_axis.y, self.z_axis.y),
+ 2 => $vec3a::new(self.x_axis.z, self.y_axis.z, self.z_axis.z),
+ _ => panic!("index out of bounds"),
+ }
+ }
+
+ /// Returns `true` if, and only if, all elements are finite.
+ /// If any element is either `NaN`, positive or negative infinity, this will return `false`.
+ #[inline]
+ pub fn is_finite(&self) -> bool {
+ self.x_axis.is_finite() && self.y_axis.is_finite() && self.z_axis.is_finite()
+ }
+
+ /// Returns `true` if any elements are `NaN`.
+ #[inline]
+ pub fn is_nan(&self) -> bool {
+ self.x_axis.is_nan() || self.y_axis.is_nan() || self.z_axis.is_nan()
+ }
+
+ /// Returns the transpose of `self`.
+ #[must_use]
+ #[inline(always)]
+ pub fn transpose(&self) -> Self {
+ Self(self.0.transpose())
+ }
+
+ /// Returns the determinant of `self`.
+ #[inline(always)]
+ pub fn determinant(&self) -> $t {
+ self.0.determinant()
+ }
+
+ /// Returns the inverse of `self`.
+ ///
+ /// If the matrix is not invertible the returned matrix will be invalid.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if the determinant of `self` is zero when `glam_assert` is enabled.
+ #[must_use]
+ #[inline(always)]
+ pub fn inverse(&self) -> Self {
+ Self(self.0.inverse())
+ }
+
+ /// Transforms a 3D vector.
+ #[inline(always)]
+ pub fn mul_vec3(&self, other: $vec3) -> $vec3 {
+ $vec3(self.0.mul_vector(other.0.into()).into())
+ }
+
+ /// Multiplies two 3x3 matrices.
+ #[inline]
+ pub fn mul_mat3(&self, other: &Self) -> Self {
+ Self(self.0.mul_matrix(&other.0))
+ }
+
+ /// Adds two 3x3 matrices.
+ #[inline(always)]
+ pub fn add_mat3(&self, other: &Self) -> Self {
+ Self(self.0.add_matrix(&other.0))
+ }
+
+ /// Subtracts two 3x3 matrices.
+ #[inline(always)]
+ pub fn sub_mat3(&self, other: &Self) -> Self {
+ Self(self.0.sub_matrix(&other.0))
+ }
+
+ /// Multiplies a 3x3 matrix by a scalar.
+ #[inline(always)]
+ pub fn mul_scalar(&self, other: $t) -> Self {
+ Self(self.0.mul_scalar(other))
+ }
+
+ /// Transforms the given 2D vector as a point.
+ ///
+ /// This is the equivalent of multiplying `other` as a 3D vector where `z` is `1`.
+ ///
+ /// This method assumes that `self` contains a valid affine transform.
+ #[inline(always)]
+ pub fn transform_point2(&self, other: $vec2) -> $vec2 {
+ $mat2::from_cols($vec2(self.x_axis.0.into()), $vec2(self.y_axis.0.into())) * other
+ + $vec2(self.z_axis.0.into())
+ }
+
+ /// Rotates the given 2D vector.
+ ///
+ /// This is the equivalent of multiplying `other` as a 3D vector where `z` is `0`.
+ ///
+ /// This method assumes that `self` contains a valid affine transform.
+ #[inline(always)]
+ pub fn transform_vector2(&self, other: $vec2) -> $vec2 {
+ $mat2::from_cols($vec2(self.x_axis.0.into()), $vec2(self.y_axis.0.into())) * other
+ }
+
+ /// Returns true if the absolute difference of all elements between `self` and `other`
+ /// is less than or equal to `max_abs_diff`.
+ ///
+ /// This can be used to compare if two matrices contain similar elements. It works best
+ /// when comparing with a known value. The `max_abs_diff` that should be used used
+ /// depends on the values being compared against.
+ ///
+ /// For more see
+ /// [comparing floating point numbers](https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/).
+ #[inline(always)]
+ pub fn abs_diff_eq(&self, other: Self, max_abs_diff: $t) -> bool {
+ self.0.abs_diff_eq(&other.0, max_abs_diff)
+ }
+ };
+}
+
+macro_rules! impl_mat3_traits {
+ ($t:ty, $new:ident, $mat3:ident, $vec3:ident, $vec3a:ident) => {
+ /// Creates a 3x3 matrix from three column vectors.
+ #[inline(always)]
+ pub fn $new(x_axis: $vec3a, y_axis: $vec3a, z_axis: $vec3a) -> $mat3 {
+ $mat3::from_cols(x_axis, y_axis, z_axis)
+ }
+
+ impl_matn_common_traits!($t, $mat3, $vec3a);
+
+ impl PartialEq for $mat3 {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ self.x_axis.eq(&other.x_axis)
+ && self.y_axis.eq(&other.y_axis)
+ && self.z_axis.eq(&other.z_axis)
+ }
+ }
+
+ impl Deref for $mat3 {
+ type Target = Columns3<$vec3a>;
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*(self as *const Self as *const Self::Target) }
+ }
+ }
+
+ impl DerefMut for $mat3 {
+ #[inline(always)]
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ unsafe { &mut *(self as *mut Self as *mut Self::Target) }
+ }
+ }
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl fmt::Display for $mat3 {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "[{}, {}, {}]", self.x_axis, self.y_axis, self.z_axis)
+ }
+ }
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl fmt::Debug for $mat3 {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("$mat3")
+ .field("x_axis", &self.x_axis)
+ .field("y_axis", &self.y_axis)
+ .field("z_axis", &self.z_axis)
+ .finish()
+ }
+ }
+ };
+}
+
+macro_rules! impl_mat3_traits_unsafe {
+ ($t:ty, $mat3:ident) => {
+ #[cfg(not(target_arch = "spirv"))]
+ impl AsRef<[$t; 9]> for $mat3 {
+ #[inline(always)]
+ fn as_ref(&self) -> &[$t; 9] {
+ unsafe { &*(self as *const Self as *const [$t; 9]) }
+ }
+ }
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl AsMut<[$t; 9]> for $mat3 {
+ #[inline(always)]
+ fn as_mut(&mut self) -> &mut [$t; 9] {
+ unsafe { &mut *(self as *mut Self as *mut [$t; 9]) }
+ }
+ }
+ };
+}
+
+type InnerF32 = Columns3<XYZ<f32>>;
+define_mat3_struct!(Mat3, InnerF32);
+
+impl Mat3 {
+ impl_mat3_methods!(f32, Vec3, Vec3, Vec2, Quat, Mat2, Mat4, InnerF32);
+
+ /// Transforms a `Vec3A`.
+ #[inline]
+ pub fn mul_vec3a(&self, other: Vec3A) -> Vec3A {
+ self.mul_vec3(other.into()).into()
+ }
+
+ #[inline(always)]
+ pub fn as_dmat3(&self) -> DMat3 {
+ DMat3::from_cols(
+ self.x_axis.as_dvec3(),
+ self.y_axis.as_dvec3(),
+ self.z_axis.as_dvec3(),
+ )
+ }
+}
+impl_mat3_traits!(f32, mat3, Mat3, Vec3, Vec3);
+impl_mat3_traits_unsafe!(f32, Mat3);
+
+impl Mul<Vec3A> for Mat3 {
+ type Output = Vec3A;
+ #[inline(always)]
+ fn mul(self, other: Vec3A) -> Vec3A {
+ self.mul_vec3a(other)
+ }
+}
+
+#[cfg(all(target_feature = "sse2", not(feature = "scalar-math")))]
+type InnerF32A = Columns3<__m128>;
+
+#[cfg(all(target_feature = "simd128", not(feature = "scalar-math")))]
+type InnerF32A = Columns3<v128>;
+
+#[cfg(any(
+ not(any(target_feature = "sse2", target_feature = "simd128")),
+ feature = "scalar-math"
+))]
+type InnerF32A = Columns3<crate::core::storage::XYZF32A16>;
+define_mat3_struct!(Mat3A, InnerF32A);
+
+impl Mat3A {
+ impl_mat3_methods!(f32, Vec3, Vec3A, Vec2, Quat, Mat2, Mat4, InnerF32A);
+
+ /// Transforms a `Vec3A`.
+ #[inline]
+ pub fn mul_vec3a(&self, other: Vec3A) -> Vec3A {
+ Vec3A(self.0.mul_vector(other.0))
+ }
+
+ #[inline(always)]
+ pub fn as_dmat3(&self) -> DMat3 {
+ DMat3::from_cols(
+ self.x_axis.as_dvec3(),
+ self.y_axis.as_dvec3(),
+ self.z_axis.as_dvec3(),
+ )
+ }
+}
+impl_mat3_traits!(f32, mat3a, Mat3A, Vec3, Vec3A);
+
+impl Mul<Vec3> for Mat3A {
+ type Output = Vec3;
+ #[inline(always)]
+ fn mul(self, other: Vec3) -> Vec3 {
+ self.mul_vec3(other)
+ }
+}
+
+impl From<Mat3> for Mat3A {
+ #[inline(always)]
+ fn from(m: Mat3) -> Self {
+ Self(m.0.into())
+ }
+}
+
+impl From<Mat3A> for Mat3 {
+ #[inline(always)]
+ fn from(m: Mat3A) -> Self {
+ Self(m.0.into())
+ }
+}
+
+type InnerF64 = Columns3<XYZ<f64>>;
+define_mat3_struct!(DMat3, InnerF64);
+
+impl DMat3 {
+ impl_mat3_methods!(f64, DVec3, DVec3, DVec2, DQuat, DMat2, DMat4, InnerF64);
+
+ #[inline(always)]
+ pub fn as_mat3(&self) -> Mat3 {
+ Mat3::from_cols(
+ self.x_axis.as_vec3(),
+ self.y_axis.as_vec3(),
+ self.z_axis.as_vec3(),
+ )
+ }
+}
+impl_mat3_traits!(f64, dmat3, DMat3, DVec3, DVec3);
+impl_mat3_traits_unsafe!(f64, DMat3);
+
+mod const_test_mat3 {
+ const_assert_eq!(
+ core::mem::align_of::<f32>(),
+ core::mem::align_of::<super::Mat3>()
+ );
+ const_assert_eq!(36, core::mem::size_of::<super::Mat3>());
+}
+
+mod const_test_mat3a {
+ const_assert_eq!(16, core::mem::align_of::<super::Mat3A>());
+ const_assert_eq!(48, core::mem::size_of::<super::Mat3A>());
+}
+
+mod const_test_dmat3 {
+ const_assert_eq!(
+ core::mem::align_of::<f64>(),
+ core::mem::align_of::<super::DMat3>()
+ );
+ const_assert_eq!(72, core::mem::size_of::<super::DMat3>());
+}
diff --git a/src/mat4.rs b/src/mat4.rs
new file mode 100644
index 0000000..fab1b73
--- /dev/null
+++ b/src/mat4.rs
@@ -0,0 +1,890 @@
+use crate::core::{
+ storage::{Columns4, XYZW},
+ traits::{
+ matrix::{FloatMatrix4x4, Matrix4x4, MatrixConst},
+ projection::ProjectionMatrix,
+ },
+};
+use crate::{DMat3, DQuat, DVec3, DVec4, EulerRot, Mat3, Quat, Vec3, Vec3A, Vec4};
+
+#[cfg(all(
+ target_feature = "sse2",
+ not(feature = "scalar-math"),
+ target_arch = "x86"
+))]
+use core::arch::x86::*;
+#[cfg(all(
+ target_feature = "sse2",
+ not(feature = "scalar-math"),
+ target_arch = "x86_64"
+))]
+use core::arch::x86_64::*;
+
+#[cfg(target_feature = "simd128")]
+use core::arch::wasm32::v128;
+
+#[cfg(not(target_arch = "spirv"))]
+use core::fmt;
+use core::iter::{Product, Sum};
+use core::ops::{Add, AddAssign, Deref, DerefMut, Mul, MulAssign, Neg, Sub, SubAssign};
+
+//macro_rules! define_mat4_struct {
+// ($mat4:ident, $inner:ident) => {
+// /// A 4x4 column major matrix.
+// ///
+// /// This 4x4 matrix type features convenience methods for creating and using affine
+// /// transforms and perspective projections.
+// ///
+// /// Affine transformations including 3D translation, rotation and scale can be created
+// /// using methods such as [`Self::from_translation()`], [`Self::from_quat()`],
+// /// [`Self::from_scale()`] and [`Self::from_scale_rotation_translation()`].
+// ///
+// /// Othographic projections can be created using the methods [`Self::orthographic_lh()`] for
+// /// left-handed coordinate systems and [`Self::orthographic_rh()`] for right-handed
+// /// systems. The resulting matrix is also an affine transformation.
+// ///
+// /// The [`Self::transform_point3()`] and [`Self::transform_vector3()`] convenience methods
+// /// are provided for performing affine transformations on 3D vectors and points. These
+// /// multiply 3D inputs as 4D vectors with an implicit `w` value of `1` for points and `0`
+// /// for vectors respectively. These methods assume that `Self` contains a valid affine
+// /// transform.
+// ///
+// /// Perspective projections can be created using methods such as
+// /// [`Self::perspective_lh()`], [`Self::perspective_infinite_lh()`] and
+// /// [`Self::perspective_infinite_reverse_lh()`] for left-handed co-ordinate systems and
+// /// [`Self::perspective_rh()`], [`Self::perspective_infinite_rh()`] and
+// /// [`Self::perspective_infinite_reverse_rh()`] for right-handed co-ordinate systems.
+// ///
+// /// The resulting perspective project can be use to transform 3D vectors as points with
+// /// perspective correction using the [`Self::project_point3()`] convenience method.
+// #[derive(Clone, Copy)]
+// #[repr(transparent)]
+// pub struct $mat4(pub(crate) $inner);
+// };
+//}
+
+macro_rules! impl_mat4_methods {
+ ($t:ident, $vec4:ident, $vec3:ident, $mat3:ident, $quat:ident, $inner:ident) => {
+ /// A 4x4 matrix with all elements set to `0.0`.
+ pub const ZERO: Self = Self($inner::ZERO);
+
+ /// A 4x4 identity matrix, where all diagonal elements are `1`, and all off-diagonal elements are `0`.
+ pub const IDENTITY: Self = Self($inner::IDENTITY);
+
+ /// All NAN:s.
+ pub const NAN: Self = Self(<$inner as crate::core::traits::scalar::NanConstEx>::NAN);
+
+ /// Creates a 4x4 matrix from four column vectors.
+ #[inline(always)]
+ pub fn from_cols(x_axis: $vec4, y_axis: $vec4, z_axis: $vec4, w_axis: $vec4) -> Self {
+ Self($inner::from_cols(x_axis.0, y_axis.0, z_axis.0, w_axis.0))
+ }
+
+ /// Creates a 4x4 matrix from a `[S; 16]` array stored in column major order.
+ /// If your data is stored in row major you will need to `transpose` the returned
+ /// matrix.
+ #[inline(always)]
+ pub fn from_cols_array(m: &[$t; 16]) -> Self {
+ Self($inner::from_cols_array(m))
+ }
+
+ /// Creates a `[S; 16]` array storing data in column major order.
+ /// If you require data in row major order `transpose` the matrix first.
+ #[inline(always)]
+ pub fn to_cols_array(&self) -> [$t; 16] {
+ self.0.to_cols_array()
+ }
+
+ /// Creates a 4x4 matrix from a `[[S; 4]; 4]` 2D array stored in column major order.
+ /// If your data is in row major order you will need to `transpose` the returned
+ /// matrix.
+ #[inline(always)]
+ pub fn from_cols_array_2d(m: &[[$t; 4]; 4]) -> Self {
+ Self($inner::from_cols_array_2d(m))
+ }
+
+ /// Creates a `[[S; 4]; 4]` 2D array storing data in column major order.
+ /// If you require data in row major order `transpose` the matrix first.
+ #[inline(always)]
+ pub fn to_cols_array_2d(&self) -> [[$t; 4]; 4] {
+ self.0.to_cols_array_2d()
+ }
+
+ /// Creates a 4x4 matrix with its diagonal set to `diagonal` and all other entries set to 0.
+ #[doc(alias = "scale")]
+ #[inline(always)]
+ pub fn from_diagonal(diagonal: $vec4) -> Self {
+ Self($inner::from_diagonal(diagonal.0.into()))
+ }
+
+ /// Creates an affine transformation matrix from the given 3D `scale`, `rotation` and
+ /// `translation`.
+ ///
+ /// The resulting matrix can be used to transform 3D points and vectors. See
+ /// [`Self::transform_point3()`] and [`Self::transform_vector3()`].
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `rotation` is not normalized when `glam_assert` is enabled.
+ #[inline(always)]
+ pub fn from_scale_rotation_translation(
+ scale: $vec3,
+ rotation: $quat,
+ translation: $vec3,
+ ) -> Self {
+ Self($inner::from_scale_quaternion_translation(
+ scale.0,
+ rotation.0,
+ translation.0,
+ ))
+ }
+
+ /// Creates an affine transformation matrix from the given 3D `translation`.
+ ///
+ /// The resulting matrix can be used to transform 3D points and vectors. See
+ /// [`Self::transform_point3()`] and [`Self::transform_vector3()`].
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `rotation` is not normalized when `glam_assert` is enabled.
+ #[inline(always)]
+ pub fn from_rotation_translation(rotation: $quat, translation: $vec3) -> Self {
+ Self($inner::from_quaternion_translation(
+ rotation.0,
+ translation.0,
+ ))
+ }
+
+ /// Extracts `scale`, `rotation` and `translation` from `self`. The input matrix is
+ /// expected to be a 3D affine transformation matrix otherwise the output will be invalid.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if the determinant of `self` is zero or if the resulting scale vector
+ /// contains any zero elements when `glam_assert` is enabled.
+ #[inline(always)]
+ pub fn to_scale_rotation_translation(&self) -> ($vec3, $quat, $vec3) {
+ let (scale, rotation, translation) = self.0.to_scale_quaternion_translation();
+ ($vec3(scale), $quat(rotation), $vec3(translation))
+ }
+
+ /// Creates an affine transformation matrix from the given `rotation` quaternion.
+ ///
+ /// The resulting matrix can be used to transform 3D points and vectors. See
+ /// [`Self::transform_point3()`] and [`Self::transform_vector3()`].
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `rotation` is not normalized when `glam_assert` is enabled.
+ #[inline(always)]
+ pub fn from_quat(rotation: $quat) -> Self {
+ Self($inner::from_quaternion(rotation.0))
+ }
+
+ /// Creates an affine transformation matrix from the given 3x3 linear transformation
+ /// matrix.
+ ///
+ /// The resulting matrix can be used to transform 3D points and vectors. See
+ /// [`Self::transform_point3()`] and [`Self::transform_vector3()`].
+ #[inline(always)]
+ pub fn from_mat3(m: $mat3) -> Self {
+ Self::from_cols(
+ (m.x_axis, 0.0).into(),
+ (m.y_axis, 0.0).into(),
+ (m.z_axis, 0.0).into(),
+ $vec4::W,
+ )
+ }
+
+ /// Creates an affine transformation matrix from the given 3D `translation`.
+ ///
+ /// The resulting matrix can be used to transform 3D points and vectors. See
+ /// [`Self::transform_point3()`] and [`Self::transform_vector3()`].
+ #[inline(always)]
+ pub fn from_translation(translation: $vec3) -> Self {
+ Self($inner::from_translation(translation.0))
+ }
+
+ /// Creates an affine transformation matrix containing a 3D rotation around a normalized
+ /// rotation `axis` of `angle` (in radians).
+ ///
+ /// The resulting matrix can be used to transform 3D points and vectors. See
+ /// [`Self::transform_point3()`] and [`Self::transform_vector3()`].
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `axis` is not normalized when `glam_assert` is enabled.
+ #[inline(always)]
+ pub fn from_axis_angle(axis: $vec3, angle: $t) -> Self {
+ Self($inner::from_axis_angle(axis.0, angle))
+ }
+
+ #[inline(always)]
+ /// Creates a affine transformation matrix containing a rotation from the given euler
+ /// rotation sequence and angles (in radians).
+ ///
+ /// The resulting matrix can be used to transform 3D points and vectors. See
+ /// [`Self::transform_point3()`] and [`Self::transform_vector3()`].
+ pub fn from_euler(order: EulerRot, a: $t, b: $t, c: $t) -> Self {
+ let quat = $quat::from_euler(order, a, b, c);
+ Self::from_quat(quat)
+ }
+
+ /// Creates an affine transformation matrix containing a 3D rotation around the x axis of
+ /// `angle` (in radians).
+ ///
+ /// The resulting matrix can be used to transform 3D points and vectors. See
+ /// [`Self::transform_point3()`] and [`Self::transform_vector3()`].
+ #[inline(always)]
+ pub fn from_rotation_x(angle: $t) -> Self {
+ Self($inner::from_rotation_x(angle))
+ }
+
+ /// Creates an affine transformation matrix containing a 3D rotation around the y axis of
+ /// `angle` (in radians).
+ ///
+ /// The resulting matrix can be used to transform 3D points and vectors. See
+ /// [`Self::transform_point3()`] and [`Self::transform_vector3()`].
+ #[inline(always)]
+ pub fn from_rotation_y(angle: $t) -> Self {
+ Self($inner::from_rotation_y(angle))
+ }
+
+ /// Creates an affine transformation matrix containing a 3D rotation around the z axis of
+ /// `angle` (in radians).
+ ///
+ /// The resulting matrix can be used to transform 3D points and vectors. See
+ /// [`Self::transform_point3()`] and [`Self::transform_vector3()`].
+ #[inline(always)]
+ pub fn from_rotation_z(angle: $t) -> Self {
+ Self($inner::from_rotation_z(angle))
+ }
+
+ /// Creates an affine transformation matrix containing the given 3D non-uniform `scale`.
+ ///
+ /// The resulting matrix can be used to transform 3D points and vectors. See
+ /// [`Self::transform_point3()`] and [`Self::transform_vector3()`].
+ ///
+ /// # Panics
+ ///
+ /// Will panic if all elements of `scale` are zero when `glam_assert` is enabled.
+ #[inline(always)]
+ pub fn from_scale(scale: $vec3) -> Self {
+ Self($inner::from_scale(scale.0))
+ }
+
+ /// Creates a 4x4 matrix from the first 16 values in `slice`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `slice` is less than 16 elements long.
+ #[inline(always)]
+ pub fn from_cols_slice(slice: &[$t]) -> Self {
+ Self(Matrix4x4::from_cols_slice(slice))
+ }
+
+ /// Writes the columns of `self` to the first 16 elements in `slice`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `slice` is less than 16 elements long.
+ #[inline(always)]
+ pub fn write_cols_to_slice(self, slice: &mut [$t]) {
+ Matrix4x4::write_cols_to_slice(&self.0, slice)
+ }
+
+ /// Returns the matrix column for the given `index`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `index` is greater than 3.
+ #[inline]
+ pub fn col(&self, index: usize) -> $vec4 {
+ match index {
+ 0 => self.x_axis,
+ 1 => self.y_axis,
+ 2 => self.z_axis,
+ 3 => self.w_axis,
+ _ => panic!("index out of bounds"),
+ }
+ }
+
+ /// Returns a mutable reference to the matrix column for the given `index`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `index` is greater than 3.
+ #[inline]
+ pub fn col_mut(&mut self, index: usize) -> &mut $vec4 {
+ match index {
+ 0 => &mut self.x_axis,
+ 1 => &mut self.y_axis,
+ 2 => &mut self.z_axis,
+ 3 => &mut self.w_axis,
+ _ => panic!("index out of bounds"),
+ }
+ }
+
+ /// Returns the matrix row for the given `index`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `index` is greater than 3.
+ #[inline]
+ pub fn row(&self, index: usize) -> $vec4 {
+ match index {
+ 0 => $vec4::new(self.x_axis.x, self.y_axis.x, self.z_axis.x, self.w_axis.x),
+ 1 => $vec4::new(self.x_axis.y, self.y_axis.y, self.z_axis.y, self.w_axis.y),
+ 2 => $vec4::new(self.x_axis.z, self.y_axis.z, self.z_axis.z, self.w_axis.z),
+ 3 => $vec4::new(self.x_axis.w, self.y_axis.w, self.z_axis.w, self.w_axis.w),
+ _ => panic!("index out of bounds"),
+ }
+ }
+
+ /// Returns `true` if, and only if, all elements are finite.
+ /// If any element is either `NaN`, positive or negative infinity, this will return `false`.
+ #[inline]
+ pub fn is_finite(&self) -> bool {
+ self.x_axis.is_finite()
+ && self.y_axis.is_finite()
+ && self.z_axis.is_finite()
+ && self.w_axis.is_finite()
+ }
+
+ /// Returns `true` if any elements are `NaN`.
+ #[inline]
+ pub fn is_nan(&self) -> bool {
+ self.x_axis.is_nan()
+ || self.y_axis.is_nan()
+ || self.z_axis.is_nan()
+ || self.w_axis.is_nan()
+ }
+
+ /// Returns the transpose of `self`.
+ #[must_use]
+ #[inline(always)]
+ pub fn transpose(&self) -> Self {
+ Self(self.0.transpose())
+ }
+
+ /// Returns the determinant of `self`.
+ #[inline(always)]
+ pub fn determinant(&self) -> $t {
+ self.0.determinant()
+ }
+
+ /// Returns the inverse of `self`.
+ ///
+ /// If the matrix is not invertible the returned matrix will be invalid.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if the determinant of `self` is zero when `glam_assert` is enabled.
+ #[must_use]
+ #[inline(always)]
+ pub fn inverse(&self) -> Self {
+ Self(self.0.inverse())
+ }
+
+ /// Creates a left-handed view matrix using a camera position, an up direction, and a focal
+ /// point.
+ /// For a view coordinate system with `+X=right`, `+Y=up` and `+Z=forward`.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `up` is not normalized when `glam_assert` is enabled.
+ #[inline(always)]
+ pub fn look_at_lh(eye: $vec3, center: $vec3, up: $vec3) -> Self {
+ Self($inner::look_at_lh(eye.0, center.0, up.0))
+ }
+
+ /// Creates a right-handed view matrix using a camera position, an up direction, and a focal
+ /// point.
+ /// For a view coordinate system with `+X=right`, `+Y=up` and `+Z=back`.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `up` is not normalized when `glam_assert` is enabled.
+ #[inline(always)]
+ pub fn look_at_rh(eye: $vec3, center: $vec3, up: $vec3) -> Self {
+ Self($inner::look_at_rh(eye.0, center.0, up.0))
+ }
+
+ /// Creates a right-handed perspective projection matrix with [-1,1] depth range.
+ /// This is the same as the OpenGL `gluPerspective` function.
+ /// See <https://www.khronos.org/registry/OpenGL-Refpages/gl2.1/xhtml/gluPerspective.xml>
+ #[inline(always)]
+ pub fn perspective_rh_gl(
+ fov_y_radians: $t,
+ aspect_ratio: $t,
+ z_near: $t,
+ z_far: $t,
+ ) -> Self {
+ Self($inner::perspective_rh_gl(
+ fov_y_radians,
+ aspect_ratio,
+ z_near,
+ z_far,
+ ))
+ }
+
+ /// Creates a left-handed perspective projection matrix with `[0,1]` depth range.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `z_near` or `z_far` are less than or equal to zero when `glam_assert` is
+ /// enabled.
+ #[inline(always)]
+ pub fn perspective_lh(fov_y_radians: $t, aspect_ratio: $t, z_near: $t, z_far: $t) -> Self {
+ Self($inner::perspective_lh(
+ fov_y_radians,
+ aspect_ratio,
+ z_near,
+ z_far,
+ ))
+ }
+
+ /// Creates a right-handed perspective projection matrix with `[0,1]` depth range.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `z_near` or `z_far` are less than or equal to zero when `glam_assert` is
+ /// enabled.
+ #[inline(always)]
+ pub fn perspective_rh(fov_y_radians: $t, aspect_ratio: $t, z_near: $t, z_far: $t) -> Self {
+ Self($inner::perspective_rh(
+ fov_y_radians,
+ aspect_ratio,
+ z_near,
+ z_far,
+ ))
+ }
+
+ /// Creates an infinite left-handed perspective projection matrix with `[0,1]` depth range.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `z_near` is less than or equal to zero when `glam_assert` is enabled.
+ #[inline(always)]
+ pub fn perspective_infinite_lh(fov_y_radians: $t, aspect_ratio: $t, z_near: $t) -> Self {
+ Self($inner::perspective_infinite_lh(
+ fov_y_radians,
+ aspect_ratio,
+ z_near,
+ ))
+ }
+
+ /// Creates an infinite left-handed perspective projection matrix with `[0,1]` depth range.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `z_near` is less than or equal to zero when `glam_assert` is enabled.
+ #[inline(always)]
+ pub fn perspective_infinite_reverse_lh(
+ fov_y_radians: $t,
+ aspect_ratio: $t,
+ z_near: $t,
+ ) -> Self {
+ Self($inner::perspective_infinite_reverse_lh(
+ fov_y_radians,
+ aspect_ratio,
+ z_near,
+ ))
+ }
+
+ /// Creates an infinite right-handed perspective projection matrix with
+ /// `[0,1]` depth range.
+ #[inline(always)]
+ pub fn perspective_infinite_rh(fov_y_radians: $t, aspect_ratio: $t, z_near: $t) -> Self {
+ Self($inner::perspective_infinite_rh(
+ fov_y_radians,
+ aspect_ratio,
+ z_near,
+ ))
+ }
+
+ /// Creates an infinite reverse right-handed perspective projection matrix
+ /// with `[0,1]` depth range.
+ #[inline(always)]
+ pub fn perspective_infinite_reverse_rh(
+ fov_y_radians: $t,
+ aspect_ratio: $t,
+ z_near: $t,
+ ) -> Self {
+ Self($inner::perspective_infinite_reverse_rh(
+ fov_y_radians,
+ aspect_ratio,
+ z_near,
+ ))
+ }
+
+ /// Creates a right-handed orthographic projection matrix with `[-1,1]` depth
+ /// range. This is the same as the OpenGL `glOrtho` function in OpenGL.
+ /// See
+ /// <https://www.khronos.org/registry/OpenGL-Refpages/gl2.1/xhtml/glOrtho.xml>
+ #[inline(always)]
+ pub fn orthographic_rh_gl(
+ left: $t,
+ right: $t,
+ bottom: $t,
+ top: $t,
+ near: $t,
+ far: $t,
+ ) -> Self {
+ Self($inner::orthographic_rh_gl(
+ left, right, bottom, top, near, far,
+ ))
+ }
+
+ /// Creates a left-handed orthographic projection matrix with `[0,1]` depth range.
+ #[inline(always)]
+ pub fn orthographic_lh(
+ left: $t,
+ right: $t,
+ bottom: $t,
+ top: $t,
+ near: $t,
+ far: $t,
+ ) -> Self {
+ Self($inner::orthographic_lh(left, right, bottom, top, near, far))
+ }
+
+ /// Creates a right-handed orthographic projection matrix with `[0,1]` depth range.
+ #[inline(always)]
+ pub fn orthographic_rh(
+ left: $t,
+ right: $t,
+ bottom: $t,
+ top: $t,
+ near: $t,
+ far: $t,
+ ) -> Self {
+ Self($inner::orthographic_rh(left, right, bottom, top, near, far))
+ }
+
+ /// Transforms a 4D vector.
+ #[inline(always)]
+ pub fn mul_vec4(&self, other: $vec4) -> $vec4 {
+ $vec4(self.0.mul_vector(other.0))
+ }
+
+ /// Multiplies two 4x4 matrices.
+ #[inline(always)]
+ pub fn mul_mat4(&self, other: &Self) -> Self {
+ Self(self.0.mul_matrix(&other.0))
+ }
+
+ /// Adds two 4x4 matrices.
+ #[inline(always)]
+ pub fn add_mat4(&self, other: &Self) -> Self {
+ Self(self.0.add_matrix(&other.0))
+ }
+
+ /// Subtracts two 4x4 matrices.
+ #[inline(always)]
+ pub fn sub_mat4(&self, other: &Self) -> Self {
+ Self(self.0.sub_matrix(&other.0))
+ }
+
+ /// Multiplies this matrix by a scalar value.
+ #[inline(always)]
+ pub fn mul_scalar(&self, other: $t) -> Self {
+ Self(self.0.mul_scalar(other))
+ }
+
+ /// Transforms the given 3D vector as a point, applying perspective correction.
+ ///
+ /// This is the equivalent of multiplying the 3D vector as a 4D vector where `w` is `1.0`.
+ /// The perspective divide is performed meaning the resulting 3D vector is divided by `w`.
+ ///
+ /// This method assumes that `self` contains a projective transform.
+ #[inline]
+ pub fn project_point3(&self, other: $vec3) -> $vec3 {
+ $vec3(self.0.project_point3(other.0))
+ }
+
+ /// Transforms the given 3D vector as a point.
+ ///
+ /// This is the equivalent of multiplying the 3D vector as a 4D vector where `w` is
+ /// `1.0`.
+ ///
+ /// This method assumes that `self` contains a valid affine transform. It does not perform
+ /// a persective divide, if `self` contains a perspective transform, or if you are unsure,
+ /// the [`Self::project_point3()`] method should be used instead.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if the 3rd row of `self` is not `(0, 0, 0, 1)` when `glam_assert` is enabled.
+ #[inline]
+ pub fn transform_point3(&self, other: $vec3) -> $vec3 {
+ glam_assert!(self.row(3) == $vec4::W);
+ $vec3(self.0.transform_point3(other.0))
+ }
+
+ /// Transforms the give 3D vector as a direction.
+ ///
+ /// This is the equivalent of multiplying the 3D vector as a 4D vector where `w` is
+ /// `0.0`.
+ ///
+ /// This method assumes that `self` contains a valid affine transform.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if the 3rd row of `self` is not `(0, 0, 0, 1)` when `glam_assert` is enabled.
+ #[inline]
+ pub fn transform_vector3(&self, other: $vec3) -> $vec3 {
+ glam_assert!(self.row(3) == $vec4::W);
+ $vec3(self.0.transform_vector3(other.0))
+ }
+
+ /// Returns true if the absolute difference of all elements between `self` and `other`
+ /// is less than or equal to `max_abs_diff`.
+ ///
+ /// This can be used to compare if two 4x4 matrices contain similar elements. It works
+ /// best when comparing with a known value. The `max_abs_diff` that should be used used
+ /// depends on the values being compared against.
+ ///
+ /// For more see
+ /// [comparing floating point numbers](https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/).
+ #[inline(always)]
+ pub fn abs_diff_eq(&self, other: Self, max_abs_diff: $t) -> bool {
+ self.0.abs_diff_eq(&other.0, max_abs_diff)
+ }
+ };
+}
+
+macro_rules! impl_mat4_traits {
+ ($t:ty, $new:ident, $mat4:ident, $vec4:ident) => {
+ /// Creates a 4x4 matrix from four column vectors.
+ #[inline(always)]
+ pub fn $new(x_axis: $vec4, y_axis: $vec4, z_axis: $vec4, w_axis: $vec4) -> $mat4 {
+ $mat4::from_cols(x_axis, y_axis, z_axis, w_axis)
+ }
+
+ impl_matn_common_traits!($t, $mat4, $vec4);
+
+ impl Deref for $mat4 {
+ type Target = Columns4<$vec4>;
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*(self as *const Self as *const Self::Target) }
+ }
+ }
+
+ impl DerefMut for $mat4 {
+ #[inline(always)]
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ unsafe { &mut *(self as *mut Self as *mut Self::Target) }
+ }
+ }
+
+ impl PartialEq for $mat4 {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ self.x_axis.eq(&other.x_axis)
+ && self.y_axis.eq(&other.y_axis)
+ && self.z_axis.eq(&other.z_axis)
+ && self.w_axis.eq(&other.w_axis)
+ }
+ }
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl AsRef<[$t; 16]> for $mat4 {
+ #[inline]
+ fn as_ref(&self) -> &[$t; 16] {
+ unsafe { &*(self as *const Self as *const [$t; 16]) }
+ }
+ }
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl AsMut<[$t; 16]> for $mat4 {
+ #[inline]
+ fn as_mut(&mut self) -> &mut [$t; 16] {
+ unsafe { &mut *(self as *mut Self as *mut [$t; 16]) }
+ }
+ }
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl fmt::Debug for $mat4 {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct(stringify!($mat4))
+ .field("x_axis", &self.x_axis)
+ .field("y_axis", &self.y_axis)
+ .field("z_axis", &self.z_axis)
+ .field("w_axis", &self.w_axis)
+ .finish()
+ }
+ }
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl fmt::Display for $mat4 {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ f,
+ "[{}, {}, {}, {}]",
+ self.x_axis, self.y_axis, self.z_axis, self.w_axis
+ )
+ }
+ }
+ };
+}
+
+#[cfg(all(target_feature = "sse2", not(feature = "scalar-math")))]
+type InnerF32 = Columns4<__m128>;
+
+#[cfg(all(target_feature = "simd128", not(feature = "scalar-math")))]
+type InnerF32 = Columns4<v128>;
+
+#[cfg(any(
+ not(any(target_feature = "sse2", target_feature = "simd128")),
+ feature = "scalar-math"
+))]
+type InnerF32 = Columns4<XYZW<f32>>;
+
+/// A 4x4 column major matrix.
+///
+/// This 4x4 matrix type features convenience methods for creating and using affine transforms and
+/// perspective projections. If you are primarily dealing with 3D affine transformations
+/// considering using [`Affine3A`](crate::Affine3A) which is faster than a 4x4 matrix for some
+/// affine operations.
+///
+/// Affine transformations including 3D translation, rotation and scale can be created
+/// using methods such as [`Self::from_translation()`], [`Self::from_quat()`],
+/// [`Self::from_scale()`] and [`Self::from_scale_rotation_translation()`].
+///
+/// Othographic projections can be created using the methods [`Self::orthographic_lh()`] for
+/// left-handed coordinate systems and [`Self::orthographic_rh()`] for right-handed
+/// systems. The resulting matrix is also an affine transformation.
+///
+/// The [`Self::transform_point3()`] and [`Self::transform_vector3()`] convenience methods
+/// are provided for performing affine transformations on 3D vectors and points. These
+/// multiply 3D inputs as 4D vectors with an implicit `w` value of `1` for points and `0`
+/// for vectors respectively. These methods assume that `Self` contains a valid affine
+/// transform.
+///
+/// Perspective projections can be created using methods such as
+/// [`Self::perspective_lh()`], [`Self::perspective_infinite_lh()`] and
+/// [`Self::perspective_infinite_reverse_lh()`] for left-handed co-ordinate systems and
+/// [`Self::perspective_rh()`], [`Self::perspective_infinite_rh()`] and
+/// [`Self::perspective_infinite_reverse_rh()`] for right-handed co-ordinate systems.
+///
+/// The resulting perspective project can be use to transform 3D vectors as points with
+/// perspective correction using the [`Self::project_point3()`] convenience method.
+#[derive(Clone, Copy)]
+#[cfg_attr(
+ any(
+ not(any(feature = "scalar-math", target_arch = "spirv")),
+ feature = "cuda"
+ ),
+ repr(C, align(16))
+)]
+#[cfg_attr(
+ all(
+ any(feature = "scalar-math", target_arch = "spirv"),
+ not(feature = "cuda"),
+ ),
+ repr(transparent)
+)]
+pub struct Mat4(pub(crate) InnerF32);
+// define_mat4_struct!(Mat4, InnerF32);
+
+impl Mat4 {
+ impl_mat4_methods!(f32, Vec4, Vec3, Mat3, Quat, InnerF32);
+
+ /// Transforms the given `Vec3A` as 3D point.
+ ///
+ /// This is the equivalent of multiplying the `Vec3A` as a 4D vector where `w` is `1.0`.
+ #[inline(always)]
+ pub fn transform_point3a(&self, other: Vec3A) -> Vec3A {
+ #[allow(clippy::useless_conversion)]
+ Vec3A(self.0.transform_float4_as_point3(other.0.into()).into())
+ }
+
+ /// Transforms the give `Vec3A` as 3D vector.
+ ///
+ /// This is the equivalent of multiplying the `Vec3A` as a 4D vector where `w` is `0.0`.
+ #[inline(always)]
+ pub fn transform_vector3a(&self, other: Vec3A) -> Vec3A {
+ #[allow(clippy::useless_conversion)]
+ Vec3A(self.0.transform_float4_as_vector3(other.0.into()).into())
+ }
+
+ #[inline(always)]
+ pub fn as_dmat4(&self) -> DMat4 {
+ DMat4::from_cols(
+ self.x_axis.as_dvec4(),
+ self.y_axis.as_dvec4(),
+ self.z_axis.as_dvec4(),
+ self.w_axis.as_dvec4(),
+ )
+ }
+}
+impl_mat4_traits!(f32, mat4, Mat4, Vec4);
+
+type InnerF64 = Columns4<XYZW<f64>>;
+
+/// A 4x4 column major matrix.
+///
+/// This 4x4 matrix type features convenience methods for creating and using affine transforms and
+/// perspective projections. If you are primarily dealing with 3D affine transformations
+/// considering using [`DAffine3`](crate::DAffine3) which is faster than a 4x4 matrix for some
+/// affine operations.
+///
+/// Affine transformations including 3D translation, rotation and scale can be created
+/// using methods such as [`Self::from_translation()`], [`Self::from_quat()`],
+/// [`Self::from_scale()`] and [`Self::from_scale_rotation_translation()`].
+///
+/// Othographic projections can be created using the methods [`Self::orthographic_lh()`] for
+/// left-handed coordinate systems and [`Self::orthographic_rh()`] for right-handed
+/// systems. The resulting matrix is also an affine transformation.
+///
+/// The [`Self::transform_point3()`] and [`Self::transform_vector3()`] convenience methods
+/// are provided for performing affine transformations on 3D vectors and points. These
+/// multiply 3D inputs as 4D vectors with an implicit `w` value of `1` for points and `0`
+/// for vectors respectively. These methods assume that `Self` contains a valid affine
+/// transform.
+///
+/// Perspective projections can be created using methods such as
+/// [`Self::perspective_lh()`], [`Self::perspective_infinite_lh()`] and
+/// [`Self::perspective_infinite_reverse_lh()`] for left-handed co-ordinate systems and
+/// [`Self::perspective_rh()`], [`Self::perspective_infinite_rh()`] and
+/// [`Self::perspective_infinite_reverse_rh()`] for right-handed co-ordinate systems.
+///
+/// The resulting perspective project can be use to transform 3D vectors as points with
+/// perspective correction using the [`Self::project_point3()`] convenience method.
+#[derive(Clone, Copy)]
+#[cfg_attr(not(feature = "cuda"), repr(transparent))]
+#[cfg_attr(feature = "cuda", repr(C, align(16)))]
+pub struct DMat4(pub(crate) InnerF64);
+// define_mat4_struct!(DMat4, InnerF64);
+
+impl DMat4 {
+ impl_mat4_methods!(f64, DVec4, DVec3, DMat3, DQuat, InnerF64);
+
+ #[inline(always)]
+ pub fn as_mat4(&self) -> Mat4 {
+ Mat4::from_cols(
+ self.x_axis.as_vec4(),
+ self.y_axis.as_vec4(),
+ self.z_axis.as_vec4(),
+ self.w_axis.as_vec4(),
+ )
+ }
+}
+impl_mat4_traits!(f64, dmat4, DMat4, DVec4);
+
+mod const_test_mat4 {
+ const_assert_eq!(
+ core::mem::align_of::<super::Vec4>(),
+ core::mem::align_of::<super::Mat4>()
+ );
+ const_assert_eq!(64, core::mem::size_of::<super::Mat4>());
+}
+
+mod const_test_dmat4 {
+ const_assert_eq!(
+ core::mem::align_of::<super::DVec4>(),
+ core::mem::align_of::<super::DMat4>()
+ );
+ const_assert_eq!(128, core::mem::size_of::<super::DMat4>());
+}
diff --git a/src/quat.rs b/src/quat.rs
new file mode 100644
index 0000000..135c942
--- /dev/null
+++ b/src/quat.rs
@@ -0,0 +1,825 @@
+use crate::core::traits::{
+ quaternion::Quaternion,
+ vector::{FloatVector4, MaskVector4, Vector, Vector4, Vector4Const},
+};
+use crate::euler::{EulerFromQuaternion, EulerRot, EulerToQuaternion};
+use crate::{DMat3, DMat4, DVec2, DVec3, DVec4};
+use crate::{Mat3, Mat4, Vec2, Vec3, Vec3A, Vec4};
+
+#[cfg(not(feature = "std"))]
+use num_traits::Float;
+
+#[cfg(all(
+ target_arch = "x86",
+ target_feature = "sse2",
+ not(feature = "scalar-math")
+))]
+use core::arch::x86::*;
+#[cfg(all(
+ target_arch = "x86_64",
+ target_feature = "sse2",
+ not(feature = "scalar-math")
+))]
+use core::arch::x86_64::*;
+
+#[cfg(all(target_feature = "simd128", not(feature = "scalar-math")))]
+use core::arch::wasm32::v128;
+
+#[cfg(not(target_arch = "spirv"))]
+use core::fmt;
+use core::iter::{Product, Sum};
+use core::ops::{Add, Deref, Div, Mul, MulAssign, Neg, Sub};
+
+macro_rules! impl_quat_methods {
+ ($t:ident, $quat:ident, $vec2:ident, $vec3:ident, $vec4:ident, $mat3:ident, $mat4:ident, $inner:ident) => {
+ /// The identity quaternion. Corresponds to no rotation.
+ pub const IDENTITY: Self = Self($inner::W);
+
+ /// All NAN:s.
+ pub const NAN: Self = Self(<$inner as crate::core::traits::scalar::NanConstEx>::NAN);
+
+ /// Creates a new rotation quaternion.
+ ///
+ /// This should generally not be called manually unless you know what you are doing.
+ /// Use one of the other constructors instead such as `identity` or `from_axis_angle`.
+ ///
+ /// `from_xyzw` is mostly used by unit tests and `serde` deserialization.
+ ///
+ /// # Preconditions
+ ///
+ /// This function does not check if the input is normalized, it is up to the user to
+ /// provide normalized input or to normalized the resulting quaternion.
+ #[inline(always)]
+ pub fn from_xyzw(x: $t, y: $t, z: $t, w: $t) -> Self {
+ Self(Vector4::new(x, y, z, w))
+ }
+
+ /// Creates a rotation quaternion from an array.
+ ///
+ /// # Preconditions
+ ///
+ /// This function does not check if the input is normalized, it is up to the user to
+ /// provide normalized input or to normalized the resulting quaternion.
+ #[inline(always)]
+ pub fn from_array(a: [$t; 4]) -> Self {
+ let q = Vector4::from_array(a);
+ Self(q)
+ }
+
+ /// Creates a new rotation quaternion from a 4D vector.
+ ///
+ /// # Preconditions
+ ///
+ /// This function does not check if the input is normalized, it is up to the user to
+ /// provide normalized input or to normalized the resulting quaternion.
+ #[inline(always)]
+ pub fn from_vec4(v: $vec4) -> Self {
+ Self(v.0)
+ }
+
+ /// Creates a rotation quaternion from a slice.
+ ///
+ /// # Preconditions
+ ///
+ /// This function does not check if the input is normalized, it is up to the user to
+ /// provide normalized input or to normalized the resulting quaternion.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `slice` length is less than 4.
+ #[inline(always)]
+ pub fn from_slice(slice: &[$t]) -> Self {
+ Self(Vector4::from_slice_unaligned(slice))
+ }
+
+ /// Writes the quaternion to an unaligned slice.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `slice` length is less than 4.
+ #[inline(always)]
+ pub fn write_to_slice(self, slice: &mut [$t]) {
+ Vector4::write_to_slice_unaligned(self.0, slice)
+ }
+
+ /// Create a quaternion for a normalized rotation `axis` and `angle` (in radians).
+ /// The axis must be normalized (unit-length).
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `axis` is not normalized when `glam_assert` is enabled.
+ #[inline(always)]
+ pub fn from_axis_angle(axis: $vec3, angle: $t) -> Self {
+ Self($inner::from_axis_angle(axis.0, angle))
+ }
+
+ /// Create a quaternion that rotates `v.length()` radians around `v.normalize()`.
+ ///
+ /// `from_scaled_axis(Vec3::ZERO)` results in the identity quaternion.
+ #[inline(always)]
+ pub fn from_scaled_axis(v: $vec3) -> Self {
+ // Self($inner::from_scaled_axis(v.0))
+ let length = v.length();
+ if length == 0.0 {
+ Self::IDENTITY
+ } else {
+ Self::from_axis_angle(v / length, length)
+ }
+ }
+
+ /// Creates a quaternion from the `angle` (in radians) around the x axis.
+ #[inline(always)]
+ pub fn from_rotation_x(angle: $t) -> Self {
+ Self($inner::from_rotation_x(angle))
+ }
+
+ /// Creates a quaternion from the `angle` (in radians) around the y axis.
+ #[inline(always)]
+ pub fn from_rotation_y(angle: $t) -> Self {
+ Self($inner::from_rotation_y(angle))
+ }
+
+ /// Creates a quaternion from the `angle` (in radians) around the z axis.
+ #[inline(always)]
+ pub fn from_rotation_z(angle: $t) -> Self {
+ Self($inner::from_rotation_z(angle))
+ }
+
+ #[inline(always)]
+ /// Creates a quaternion from the given euler rotation sequence and the angles (in radians).
+ pub fn from_euler(euler: EulerRot, a: $t, b: $t, c: $t) -> Self {
+ euler.new_quat(a, b, c)
+ }
+
+ /// Creates a quaternion from a 3x3 rotation matrix.
+ #[inline]
+ pub fn from_mat3(mat: &$mat3) -> Self {
+ Self(Quaternion::from_rotation_axes(
+ mat.x_axis.0,
+ mat.y_axis.0,
+ mat.z_axis.0,
+ ))
+ }
+
+ /// Creates a quaternion from a 3x3 rotation matrix inside a homogeneous 4x4 matrix.
+ #[inline]
+ pub fn from_mat4(mat: &$mat4) -> Self {
+ Self(Quaternion::from_rotation_axes(
+ mat.x_axis.0.into(),
+ mat.y_axis.0.into(),
+ mat.z_axis.0.into(),
+ ))
+ }
+
+ /// Gets the minimal rotation for transforming `from` to `to`. The rotation is in the
+ /// plane spanned by the two vectors. Will rotate at most 180 degrees.
+ ///
+ /// The input vectors must be normalized (unit-length).
+ ///
+ /// `from_rotation_arc(from, to) * from ≈ to`.
+ ///
+ /// For near-singular cases (from≈to and from≈-to) the current implementation
+ /// is only accurate to about 0.001 (for `f32`).
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `from` or `to` are not normalized when `glam_assert` is enabled.
+ pub fn from_rotation_arc(from: $vec3, to: $vec3) -> Self {
+ glam_assert!(from.is_normalized());
+ glam_assert!(to.is_normalized());
+
+ const ONE_MINUS_EPS: $t = 1.0 - 2.0 * core::$t::EPSILON;
+ let dot = from.dot(to);
+ if dot > ONE_MINUS_EPS {
+ // 0° singulary: from ≈ to
+ Self::IDENTITY
+ } else if dot < -ONE_MINUS_EPS {
+ // 180° singulary: from ≈ -to
+ use core::$t::consts::PI; // half a turn = 𝛕/2 = 180°
+ Self::from_axis_angle(from.any_orthonormal_vector(), PI)
+ } else {
+ let c = from.cross(to);
+ Self::from_xyzw(c.x, c.y, c.z, 1.0 + dot).normalize()
+ }
+ }
+
+ /// Gets the minimal rotation for transforming `from` to either `to` or `-to`. This means
+ /// that the resulting quaternion will rotate `from` so that it is colinear with `to`.
+ ///
+ /// The rotation is in the plane spanned by the two vectors. Will rotate at most 90
+ /// degrees.
+ ///
+ /// The input vectors must be normalized (unit-length).
+ ///
+ /// `to.dot(from_rotation_arc_colinear(from, to) * from).abs() ≈ 1`.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `from` or `to` are not normalized when `glam_assert` is enabled.
+ pub fn from_rotation_arc_colinear(from: $vec3, to: $vec3) -> Self {
+ if from.dot(to) < 0.0 {
+ Self::from_rotation_arc(from, -to)
+ } else {
+ Self::from_rotation_arc(from, to)
+ }
+ }
+
+ /// Gets the minimal rotation for transforming `from` to `to`. The resulting rotation is
+ /// around the z axis. Will rotate at most 180 degrees.
+ ///
+ /// The input vectors must be normalized (unit-length).
+ ///
+ /// `from_rotation_arc_2d(from, to) * from ≈ to`.
+ ///
+ /// For near-singular cases (from≈to and from≈-to) the current implementation
+ /// is only accurate to about 0.001 (for `f32`).
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `from` or `to` are not normalized when `glam_assert` is enabled.
+ pub fn from_rotation_arc_2d(from: $vec2, to: $vec2) -> Self {
+ glam_assert!(from.is_normalized());
+ glam_assert!(to.is_normalized());
+
+ const ONE_MINUS_EPSILON: $t = 1.0 - 2.0 * core::$t::EPSILON;
+ let dot = from.dot(to);
+ if dot > ONE_MINUS_EPSILON {
+ // 0° singulary: from ≈ to
+ Self::IDENTITY
+ } else if dot < -ONE_MINUS_EPSILON {
+ // 180° singulary: from ≈ -to
+ const COS_FRAC_PI_2: $t = 0.0;
+ const SIN_FRAC_PI_2: $t = 1.0;
+ // rotation around z by PI radians
+ Self::from_xyzw(0.0, 0.0, SIN_FRAC_PI_2, COS_FRAC_PI_2)
+ } else {
+ // vector3 cross where z=0
+ let z = from.x * to.y - to.x * from.y;
+ let w = 1.0 + dot;
+ // calculate length with x=0 and y=0 to normalize
+ let len_rcp = 1.0 / (z * z + w * w).sqrt();
+ Self::from_xyzw(0.0, 0.0, z * len_rcp, w * len_rcp)
+ }
+ }
+
+ /// Returns the rotation axis and angle (in radians) of `self`.
+ #[inline(always)]
+ pub fn to_axis_angle(self) -> ($vec3, $t) {
+ let (axis, angle) = self.0.to_axis_angle();
+ ($vec3(axis), angle)
+ }
+
+ /// Returns the rotation axis scaled by the rotation in radians.
+ #[inline(always)]
+ pub fn to_scaled_axis(self) -> $vec3 {
+ let (axis, angle) = self.0.to_axis_angle();
+ $vec3(axis) * angle
+ }
+
+ /// Returns the rotation angles for the given euler rotation sequence.
+ #[inline(always)]
+ pub fn to_euler(self, euler: EulerRot) -> ($t, $t, $t) {
+ euler.convert_quat(self)
+ }
+
+ /// `[x, y, z, w]`
+ #[inline(always)]
+ pub fn to_array(&self) -> [$t; 4] {
+ [self.x, self.y, self.z, self.w]
+ }
+
+ /// Returns the vector part of the quaternion.
+ #[inline(always)]
+ pub fn xyz(self) -> $vec3 {
+ $vec3::new(self.x, self.y, self.z)
+ }
+
+ /// Returns the quaternion conjugate of `self`. For a unit quaternion the
+ /// conjugate is also the inverse.
+ #[must_use]
+ #[inline(always)]
+ pub fn conjugate(self) -> Self {
+ Self(self.0.conjugate())
+ }
+
+ /// Returns the inverse of a normalized quaternion.
+ ///
+ /// Typically quaternion inverse returns the conjugate of a normalized quaternion.
+ /// Because `self` is assumed to already be unit length this method *does not* normalize
+ /// before returning the conjugate.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `self` is not normalized when `glam_assert` is enabled.
+ #[must_use]
+ #[inline(always)]
+ pub fn inverse(self) -> Self {
+ glam_assert!(self.is_normalized());
+ self.conjugate()
+ }
+
+ /// Computes the dot product of `self` and `other`. The dot product is
+ /// equal to the the cosine of the angle between two quaternion rotations.
+ #[inline(always)]
+ pub fn dot(self, other: Self) -> $t {
+ Vector4::dot(self.0, other.0)
+ }
+
+ /// Computes the length of `self`.
+ #[doc(alias = "magnitude")]
+ #[inline(always)]
+ pub fn length(self) -> $t {
+ FloatVector4::length(self.0)
+ }
+
+ /// Computes the squared length of `self`.
+ ///
+ /// This is generally faster than `length()` as it avoids a square
+ /// root operation.
+ #[doc(alias = "magnitude2")]
+ #[inline(always)]
+ pub fn length_squared(self) -> $t {
+ FloatVector4::length_squared(self.0)
+ }
+
+ /// Computes `1.0 / length()`.
+ ///
+ /// For valid results, `self` must _not_ be of length zero.
+ #[inline(always)]
+ pub fn length_recip(self) -> $t {
+ FloatVector4::length_recip(self.0)
+ }
+
+ /// Returns `self` normalized to length 1.0.
+ ///
+ /// For valid results, `self` must _not_ be of length zero.
+ ///
+ /// Panics
+ ///
+ /// Will panic if `self` is zero length when `glam_assert` is enabled.
+ #[must_use]
+ #[inline(always)]
+ pub fn normalize(self) -> Self {
+ Self(FloatVector4::normalize(self.0))
+ }
+
+ /// Returns `true` if, and only if, all elements are finite.
+ /// If any element is either `NaN`, positive or negative infinity, this will return `false`.
+ #[inline(always)]
+ pub fn is_finite(self) -> bool {
+ FloatVector4::is_finite(self.0)
+ }
+
+ #[inline(always)]
+ pub fn is_nan(self) -> bool {
+ FloatVector4::is_nan(self.0)
+ }
+
+ /// Returns whether `self` of length `1.0` or not.
+ ///
+ /// Uses a precision threshold of `1e-6`.
+ #[inline(always)]
+ pub fn is_normalized(self) -> bool {
+ FloatVector4::is_normalized(self.0)
+ }
+
+ #[inline(always)]
+ pub fn is_near_identity(self) -> bool {
+ self.0.is_near_identity()
+ }
+
+ /// Returns the angle (in radians) for the minimal rotation
+ /// for transforming this quaternion into another.
+ ///
+ /// Both quaternions must be normalized.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `self` or `other` are not normalized when `glam_assert` is enabled.
+ pub fn angle_between(self, other: Self) -> $t {
+ glam_assert!(self.is_normalized() && other.is_normalized());
+ use crate::core::traits::scalar::FloatEx;
+ self.dot(other).abs().acos_approx() * 2.0
+ }
+
+ /// Returns true if the absolute difference of all elements between `self` and `other`
+ /// is less than or equal to `max_abs_diff`.
+ ///
+ /// This can be used to compare if two quaternions contain similar elements. It works
+ /// best when comparing with a known value. The `max_abs_diff` that should be used used
+ /// depends on the values being compared against.
+ ///
+ /// For more see
+ /// [comparing floating point numbers](https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/).
+ #[inline(always)]
+ pub fn abs_diff_eq(self, other: Self, max_abs_diff: $t) -> bool {
+ FloatVector4::abs_diff_eq(self.0, other.0, max_abs_diff)
+ }
+
+ /// Performs a linear interpolation between `self` and `other` based on
+ /// the value `s`.
+ ///
+ /// When `s` is `0.0`, the result will be equal to `self`. When `s`
+ /// is `1.0`, the result will be equal to `other`.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `self` or `end` are not normalized when `glam_assert` is enabled.
+ #[inline(always)]
+ #[doc(alias = "mix")]
+ pub fn lerp(self, end: Self, s: $t) -> Self {
+ Self(self.0.lerp(end.0, s))
+ }
+
+ /// Performs a spherical linear interpolation between `self` and `end`
+ /// based on the value `s`.
+ ///
+ /// When `s` is `0.0`, the result will be equal to `self`. When `s`
+ /// is `1.0`, the result will be equal to `end`.
+ ///
+ /// Note that a rotation can be represented by two quaternions: `q` and
+ /// `-q`. The slerp path between `q` and `end` will be different from the
+ /// path between `-q` and `end`. One path will take the long way around and
+ /// one will take the short way. In order to correct for this, the `dot`
+ /// product between `self` and `end` should be positive. If the `dot`
+ /// product is negative, slerp between `-self` and `end`.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `self` or `end` are not normalized when `glam_assert` is enabled.
+ #[inline(always)]
+ pub fn slerp(self, end: Self, s: $t) -> Self {
+ Self(self.0.slerp(end.0, s))
+ }
+
+ /// Multiplies a quaternion and a 3D vector, returning the rotated vector.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `self` is not normalized when `glam_assert` is enabled.
+ #[inline(always)]
+ pub fn mul_vec3(self, other: $vec3) -> $vec3 {
+ $vec3(self.0.mul_vector3(other.0))
+ }
+
+ /// Multiplies two quaternions. If they each represent a rotation, the result will
+ /// represent the combined rotation.
+ ///
+ /// Note that due to floating point rounding the result may not be perfectly normalized.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `self` or `other` are not normalized when `glam_assert` is enabled.
+ #[inline(always)]
+ pub fn mul_quat(self, other: Self) -> Self {
+ Self(self.0.mul_quaternion(other.0))
+ }
+ };
+}
+
+macro_rules! impl_quat_traits {
+ ($t:ty, $new:ident, $quat:ident, $vec3:ident, $vec4:ident, $inner:ident) => {
+ /// Creates a quaternion from `x`, `y`, `z` and `w` values.
+ ///
+ /// This should generally not be called manually unless you know what you are doing. Use
+ /// one of the other constructors instead such as `identity` or `from_axis_angle`.
+ #[inline]
+ pub fn $new(x: $t, y: $t, z: $t, w: $t) -> $quat {
+ $quat::from_xyzw(x, y, z, w)
+ }
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl fmt::Debug for $quat {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_tuple(stringify!($quat))
+ .field(&self.x)
+ .field(&self.y)
+ .field(&self.z)
+ .field(&self.w)
+ .finish()
+ }
+ }
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl fmt::Display for $quat {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "[{}, {}, {}, {}]", self.x, self.y, self.z, self.w)
+ }
+ }
+
+ impl Add<$quat> for $quat {
+ type Output = Self;
+ /// Adds two quaternions.
+ ///
+ /// The sum is not guaranteed to be normalized.
+ ///
+ /// Note that addition is not the same as combining the rotations represented by the
+ /// two quaternions! That corresponds to multiplication.
+ #[inline]
+ fn add(self, other: Self) -> Self {
+ Self(self.0.add(other.0))
+ }
+ }
+
+ impl Sub<$quat> for $quat {
+ type Output = Self;
+ /// Subtracts the other quaternion from self.
+ ///
+ /// The difference is not guaranteed to be normalized.
+ #[inline]
+ fn sub(self, other: Self) -> Self {
+ Self(self.0.sub(other.0))
+ }
+ }
+
+ impl Mul<$t> for $quat {
+ type Output = Self;
+ /// Multiplies a quaternion by a scalar value.
+ ///
+ /// The product is not guaranteed to be normalized.
+ #[inline]
+ fn mul(self, other: $t) -> Self {
+ Self(self.0.scale(other))
+ }
+ }
+
+ impl Div<$t> for $quat {
+ type Output = Self;
+ /// Divides a quaternion by a scalar value.
+ /// The quotient is not guaranteed to be normalized.
+ #[inline]
+ fn div(self, other: $t) -> Self {
+ Self(self.0.scale(other.recip()))
+ }
+ }
+
+ impl Mul<$quat> for $quat {
+ type Output = Self;
+ /// Multiplies two quaternions. If they each represent a rotation, the result will
+ /// represent the combined rotation.
+ ///
+ /// Note that due to floating point rounding the result may not be perfectly
+ /// normalized.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `self` or `other` are not normalized when `glam_assert` is enabled.
+ #[inline]
+ fn mul(self, other: Self) -> Self {
+ Self(self.0.mul_quaternion(other.0))
+ }
+ }
+
+ impl MulAssign<$quat> for $quat {
+ /// Multiplies two quaternions. If they each represent a rotation, the result will
+ /// represent the combined rotation.
+ ///
+ /// Note that due to floating point rounding the result may not be perfectly
+ /// normalized.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `self` or `other` are not normalized when `glam_assert` is enabled.
+ #[inline]
+ fn mul_assign(&mut self, other: Self) {
+ self.0 = self.0.mul_quaternion(other.0);
+ }
+ }
+
+ impl Mul<$vec3> for $quat {
+ type Output = $vec3;
+ /// Multiplies a quaternion and a 3D vector, returning the rotated vector.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `self` is not normalized when `glam_assert` is enabled.
+ #[inline]
+ fn mul(self, other: $vec3) -> Self::Output {
+ $vec3(self.0.mul_vector3(other.0))
+ }
+ }
+
+ impl Neg for $quat {
+ type Output = Self;
+ #[inline]
+ fn neg(self) -> Self {
+ Self(self.0.scale(-1.0))
+ }
+ }
+
+ impl Default for $quat {
+ #[inline]
+ fn default() -> Self {
+ Self::IDENTITY
+ }
+ }
+
+ impl PartialEq for $quat {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ MaskVector4::all(self.0.cmpeq(other.0))
+ }
+ }
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl AsRef<[$t; 4]> for $quat {
+ #[inline(always)]
+ fn as_ref(&self) -> &[$t; 4] {
+ unsafe { &*(self as *const Self as *const [$t; 4]) }
+ }
+ }
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl AsMut<[$t; 4]> for $quat {
+ #[inline(always)]
+ fn as_mut(&mut self) -> &mut [$t; 4] {
+ unsafe { &mut *(self as *mut Self as *mut [$t; 4]) }
+ }
+ }
+
+ impl From<$quat> for $vec4 {
+ #[inline(always)]
+ fn from(q: $quat) -> Self {
+ $vec4(q.0)
+ }
+ }
+
+ impl From<$quat> for ($t, $t, $t, $t) {
+ #[inline(always)]
+ fn from(q: $quat) -> Self {
+ Vector4::into_tuple(q.0)
+ }
+ }
+
+ impl From<$quat> for [$t; 4] {
+ #[inline(always)]
+ fn from(q: $quat) -> Self {
+ Vector4::into_array(q.0)
+ }
+ }
+
+ impl From<$quat> for $inner {
+ // TODO: write test
+ #[inline(always)]
+ fn from(q: $quat) -> Self {
+ q.0
+ }
+ }
+
+ impl Deref for $quat {
+ type Target = crate::XYZW<$t>;
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ self.0.as_ref_xyzw()
+ }
+ }
+
+ impl<'a> Sum<&'a Self> for $quat {
+ fn sum<I>(iter: I) -> Self
+ where
+ I: Iterator<Item = &'a Self>,
+ {
+ use crate::core::traits::vector::VectorConst;
+ iter.fold(Self($inner::ZERO), |a, &b| Self::add(a, b))
+ }
+ }
+
+ impl<'a> Product<&'a Self> for $quat {
+ fn product<I>(iter: I) -> Self
+ where
+ I: Iterator<Item = &'a Self>,
+ {
+ iter.fold(Self::IDENTITY, |a, &b| Self::mul(a, b))
+ }
+ }
+ };
+}
+
+#[cfg(all(target_feature = "sse2", not(feature = "scalar-math")))]
+type InnerF32 = __m128;
+
+#[cfg(all(target_feature = "simd128", not(feature = "scalar-math")))]
+type InnerF32 = v128;
+
+#[cfg(any(
+ not(any(target_feature = "sse2", target_feature = "simd128")),
+ feature = "scalar-math"
+))]
+type InnerF32 = crate::XYZW<f32>;
+
+/// A quaternion representing an orientation.
+///
+/// This quaternion is intended to be of unit length but may denormalize due to
+/// floating point "error creep" which can occur when successive quaternion
+/// operations are applied.
+///
+/// This type is 16 byte aligned.
+#[derive(Clone, Copy)]
+#[cfg_attr(
+ not(any(
+ feature = "scalar-math",
+ target_arch = "spirv",
+ target_feature = "sse2",
+ target_feature = "simd128"
+ )),
+ repr(C, align(16))
+)]
+#[cfg_attr(
+ any(
+ feature = "scalar-math",
+ target_arch = "spirv",
+ target_feature = "sse2",
+ target_feature = "simd128"
+ ),
+ repr(transparent)
+)]
+pub struct Quat(pub(crate) InnerF32);
+
+impl Quat {
+ impl_quat_methods!(f32, Quat, Vec2, Vec3, Vec4, Mat3, Mat4, InnerF32);
+
+ /// Multiplies a quaternion and a 3D vector, returning the rotated vector.
+ #[inline(always)]
+ pub fn mul_vec3a(self, other: Vec3A) -> Vec3A {
+ #[allow(clippy::useless_conversion)]
+ Vec3A(self.0.mul_float4_as_vector3(other.0.into()).into())
+ }
+
+ #[inline(always)]
+ pub fn as_f64(self) -> DQuat {
+ DQuat::from_xyzw(self.x as f64, self.y as f64, self.z as f64, self.w as f64)
+ }
+
+ /// Creates a quaternion from a 3x3 rotation matrix inside a 3D affine transform.
+ #[inline]
+ pub fn from_affine3(mat: &crate::Affine3A) -> Self {
+ Self(Quaternion::from_rotation_axes(
+ mat.x_axis.0.into(),
+ mat.y_axis.0.into(),
+ mat.z_axis.0.into(),
+ ))
+ }
+}
+impl_quat_traits!(f32, quat, Quat, Vec3, Vec4, InnerF32);
+
+impl Mul<Vec3A> for Quat {
+ type Output = Vec3A;
+ #[inline(always)]
+ fn mul(self, other: Vec3A) -> Self::Output {
+ self.mul_vec3a(other)
+ }
+}
+
+type InnerF64 = crate::XYZW<f64>;
+
+/// A quaternion representing an orientation.
+///
+/// This quaternion is intended to be of unit length but may denormalize due to
+/// floating point "error creep" which can occur when successive quaternion
+/// operations are applied.
+#[derive(Clone, Copy)]
+#[repr(transparent)]
+pub struct DQuat(pub(crate) InnerF64);
+
+impl DQuat {
+ impl_quat_methods!(f64, DQuat, DVec2, DVec3, DVec4, DMat3, DMat4, InnerF64);
+
+ #[inline(always)]
+ pub fn as_f32(self) -> Quat {
+ Quat::from_xyzw(self.x as f32, self.y as f32, self.z as f32, self.w as f32)
+ }
+
+ /// Creates a quaternion from a 3x3 rotation matrix inside a 3D affine transform.
+ #[inline]
+ pub fn from_affine3(mat: &crate::DAffine3) -> Self {
+ Self(Quaternion::from_rotation_axes(
+ mat.x_axis.0,
+ mat.y_axis.0,
+ mat.z_axis.0,
+ ))
+ }
+}
+impl_quat_traits!(f64, dquat, DQuat, DVec3, DVec4, InnerF64);
+
+#[cfg(any(feature = "scalar-math", target_arch = "spirv"))]
+mod const_test_quat {
+ const_assert_eq!(
+ core::mem::align_of::<f32>(),
+ core::mem::align_of::<super::Quat>()
+ );
+ const_assert_eq!(16, core::mem::size_of::<super::Quat>());
+}
+
+#[cfg(not(any(feature = "scalar-math", target_arch = "spirv")))]
+mod const_test_quat {
+ const_assert_eq!(16, core::mem::align_of::<super::Quat>());
+ const_assert_eq!(16, core::mem::size_of::<super::Quat>());
+}
+
+mod const_test_dquat {
+ const_assert_eq!(
+ core::mem::align_of::<f64>(),
+ core::mem::align_of::<super::DQuat>()
+ );
+ const_assert_eq!(32, core::mem::size_of::<super::DQuat>());
+}
diff --git a/src/spirv.rs b/src/spirv.rs
new file mode 100644
index 0000000..7b618cd
--- /dev/null
+++ b/src/spirv.rs
@@ -0,0 +1,26 @@
+#[cfg(target_arch = "spirv")]
+macro_rules! unsupported_features {
+ ($($feature:literal),+ $(,)?) => {
+ $(
+ #[cfg(feature = $feature)]
+ compile_error!(
+ concat!(
+ "`",
+ $feature,
+ "`",
+ " feature is not supported when building for SPIR-V.",
+ )
+ );
+ )+
+ }
+}
+
+#[cfg(target_arch = "spirv")]
+unsupported_features! {
+ "approx",
+ "debug-glam-assert",
+ "glam-assert",
+ "rand",
+ "serde",
+ "std",
+}
diff --git a/src/swizzles/dvec2_impl_scalar.rs b/src/swizzles/dvec2_impl_scalar.rs
new file mode 100644
index 0000000..1cb65e9
--- /dev/null
+++ b/src/swizzles/dvec2_impl_scalar.rs
@@ -0,0 +1,118 @@
+// Generated by swizzlegen. Do not edit.
+
+use super::Vec2Swizzles;
+use crate::{DVec2, DVec3, DVec4};
+
+impl Vec2Swizzles for DVec2 {
+ type Vec3 = DVec3;
+ type Vec4 = DVec4;
+
+ #[inline]
+ fn xxxx(self) -> DVec4 {
+ DVec4::new(self.x, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn xxxy(self) -> DVec4 {
+ DVec4::new(self.x, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn xxyx(self) -> DVec4 {
+ DVec4::new(self.x, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn xxyy(self) -> DVec4 {
+ DVec4::new(self.x, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn xyxx(self) -> DVec4 {
+ DVec4::new(self.x, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn xyxy(self) -> DVec4 {
+ DVec4::new(self.x, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn xyyx(self) -> DVec4 {
+ DVec4::new(self.x, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn xyyy(self) -> DVec4 {
+ DVec4::new(self.x, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn yxxx(self) -> DVec4 {
+ DVec4::new(self.y, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn yxxy(self) -> DVec4 {
+ DVec4::new(self.y, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn yxyx(self) -> DVec4 {
+ DVec4::new(self.y, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn yxyy(self) -> DVec4 {
+ DVec4::new(self.y, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn yyxx(self) -> DVec4 {
+ DVec4::new(self.y, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn yyxy(self) -> DVec4 {
+ DVec4::new(self.y, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn yyyx(self) -> DVec4 {
+ DVec4::new(self.y, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn yyyy(self) -> DVec4 {
+ DVec4::new(self.y, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn xxx(self) -> DVec3 {
+ DVec3::new(self.x, self.x, self.x)
+ }
+ #[inline]
+ fn xxy(self) -> DVec3 {
+ DVec3::new(self.x, self.x, self.y)
+ }
+ #[inline]
+ fn xyx(self) -> DVec3 {
+ DVec3::new(self.x, self.y, self.x)
+ }
+ #[inline]
+ fn xyy(self) -> DVec3 {
+ DVec3::new(self.x, self.y, self.y)
+ }
+ #[inline]
+ fn yxx(self) -> DVec3 {
+ DVec3::new(self.y, self.x, self.x)
+ }
+ #[inline]
+ fn yxy(self) -> DVec3 {
+ DVec3::new(self.y, self.x, self.y)
+ }
+ #[inline]
+ fn yyx(self) -> DVec3 {
+ DVec3::new(self.y, self.y, self.x)
+ }
+ #[inline]
+ fn yyy(self) -> DVec3 {
+ DVec3::new(self.y, self.y, self.y)
+ }
+ #[inline]
+ fn xx(self) -> Self {
+ Self::new(self.x, self.x)
+ }
+ #[inline]
+ fn yx(self) -> Self {
+ Self::new(self.y, self.x)
+ }
+ #[inline]
+ fn yy(self) -> Self {
+ Self::new(self.y, self.y)
+ }
+}
diff --git a/src/swizzles/dvec3_impl_scalar.rs b/src/swizzles/dvec3_impl_scalar.rs
new file mode 100644
index 0000000..53fab09
--- /dev/null
+++ b/src/swizzles/dvec3_impl_scalar.rs
@@ -0,0 +1,474 @@
+// Generated by swizzlegen. Do not edit.
+
+use super::Vec3Swizzles;
+use crate::{DVec2, DVec3, DVec4};
+
+impl Vec3Swizzles for DVec3 {
+ type Vec2 = DVec2;
+ type Vec4 = DVec4;
+
+ #[inline]
+ fn xxxx(self) -> DVec4 {
+ DVec4::new(self.x, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn xxxy(self) -> DVec4 {
+ DVec4::new(self.x, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn xxxz(self) -> DVec4 {
+ DVec4::new(self.x, self.x, self.x, self.z)
+ }
+ #[inline]
+ fn xxyx(self) -> DVec4 {
+ DVec4::new(self.x, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn xxyy(self) -> DVec4 {
+ DVec4::new(self.x, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn xxyz(self) -> DVec4 {
+ DVec4::new(self.x, self.x, self.y, self.z)
+ }
+ #[inline]
+ fn xxzx(self) -> DVec4 {
+ DVec4::new(self.x, self.x, self.z, self.x)
+ }
+ #[inline]
+ fn xxzy(self) -> DVec4 {
+ DVec4::new(self.x, self.x, self.z, self.y)
+ }
+ #[inline]
+ fn xxzz(self) -> DVec4 {
+ DVec4::new(self.x, self.x, self.z, self.z)
+ }
+ #[inline]
+ fn xyxx(self) -> DVec4 {
+ DVec4::new(self.x, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn xyxy(self) -> DVec4 {
+ DVec4::new(self.x, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn xyxz(self) -> DVec4 {
+ DVec4::new(self.x, self.y, self.x, self.z)
+ }
+ #[inline]
+ fn xyyx(self) -> DVec4 {
+ DVec4::new(self.x, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn xyyy(self) -> DVec4 {
+ DVec4::new(self.x, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn xyyz(self) -> DVec4 {
+ DVec4::new(self.x, self.y, self.y, self.z)
+ }
+ #[inline]
+ fn xyzx(self) -> DVec4 {
+ DVec4::new(self.x, self.y, self.z, self.x)
+ }
+ #[inline]
+ fn xyzy(self) -> DVec4 {
+ DVec4::new(self.x, self.y, self.z, self.y)
+ }
+ #[inline]
+ fn xyzz(self) -> DVec4 {
+ DVec4::new(self.x, self.y, self.z, self.z)
+ }
+ #[inline]
+ fn xzxx(self) -> DVec4 {
+ DVec4::new(self.x, self.z, self.x, self.x)
+ }
+ #[inline]
+ fn xzxy(self) -> DVec4 {
+ DVec4::new(self.x, self.z, self.x, self.y)
+ }
+ #[inline]
+ fn xzxz(self) -> DVec4 {
+ DVec4::new(self.x, self.z, self.x, self.z)
+ }
+ #[inline]
+ fn xzyx(self) -> DVec4 {
+ DVec4::new(self.x, self.z, self.y, self.x)
+ }
+ #[inline]
+ fn xzyy(self) -> DVec4 {
+ DVec4::new(self.x, self.z, self.y, self.y)
+ }
+ #[inline]
+ fn xzyz(self) -> DVec4 {
+ DVec4::new(self.x, self.z, self.y, self.z)
+ }
+ #[inline]
+ fn xzzx(self) -> DVec4 {
+ DVec4::new(self.x, self.z, self.z, self.x)
+ }
+ #[inline]
+ fn xzzy(self) -> DVec4 {
+ DVec4::new(self.x, self.z, self.z, self.y)
+ }
+ #[inline]
+ fn xzzz(self) -> DVec4 {
+ DVec4::new(self.x, self.z, self.z, self.z)
+ }
+ #[inline]
+ fn yxxx(self) -> DVec4 {
+ DVec4::new(self.y, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn yxxy(self) -> DVec4 {
+ DVec4::new(self.y, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn yxxz(self) -> DVec4 {
+ DVec4::new(self.y, self.x, self.x, self.z)
+ }
+ #[inline]
+ fn yxyx(self) -> DVec4 {
+ DVec4::new(self.y, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn yxyy(self) -> DVec4 {
+ DVec4::new(self.y, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn yxyz(self) -> DVec4 {
+ DVec4::new(self.y, self.x, self.y, self.z)
+ }
+ #[inline]
+ fn yxzx(self) -> DVec4 {
+ DVec4::new(self.y, self.x, self.z, self.x)
+ }
+ #[inline]
+ fn yxzy(self) -> DVec4 {
+ DVec4::new(self.y, self.x, self.z, self.y)
+ }
+ #[inline]
+ fn yxzz(self) -> DVec4 {
+ DVec4::new(self.y, self.x, self.z, self.z)
+ }
+ #[inline]
+ fn yyxx(self) -> DVec4 {
+ DVec4::new(self.y, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn yyxy(self) -> DVec4 {
+ DVec4::new(self.y, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn yyxz(self) -> DVec4 {
+ DVec4::new(self.y, self.y, self.x, self.z)
+ }
+ #[inline]
+ fn yyyx(self) -> DVec4 {
+ DVec4::new(self.y, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn yyyy(self) -> DVec4 {
+ DVec4::new(self.y, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn yyyz(self) -> DVec4 {
+ DVec4::new(self.y, self.y, self.y, self.z)
+ }
+ #[inline]
+ fn yyzx(self) -> DVec4 {
+ DVec4::new(self.y, self.y, self.z, self.x)
+ }
+ #[inline]
+ fn yyzy(self) -> DVec4 {
+ DVec4::new(self.y, self.y, self.z, self.y)
+ }
+ #[inline]
+ fn yyzz(self) -> DVec4 {
+ DVec4::new(self.y, self.y, self.z, self.z)
+ }
+ #[inline]
+ fn yzxx(self) -> DVec4 {
+ DVec4::new(self.y, self.z, self.x, self.x)
+ }
+ #[inline]
+ fn yzxy(self) -> DVec4 {
+ DVec4::new(self.y, self.z, self.x, self.y)
+ }
+ #[inline]
+ fn yzxz(self) -> DVec4 {
+ DVec4::new(self.y, self.z, self.x, self.z)
+ }
+ #[inline]
+ fn yzyx(self) -> DVec4 {
+ DVec4::new(self.y, self.z, self.y, self.x)
+ }
+ #[inline]
+ fn yzyy(self) -> DVec4 {
+ DVec4::new(self.y, self.z, self.y, self.y)
+ }
+ #[inline]
+ fn yzyz(self) -> DVec4 {
+ DVec4::new(self.y, self.z, self.y, self.z)
+ }
+ #[inline]
+ fn yzzx(self) -> DVec4 {
+ DVec4::new(self.y, self.z, self.z, self.x)
+ }
+ #[inline]
+ fn yzzy(self) -> DVec4 {
+ DVec4::new(self.y, self.z, self.z, self.y)
+ }
+ #[inline]
+ fn yzzz(self) -> DVec4 {
+ DVec4::new(self.y, self.z, self.z, self.z)
+ }
+ #[inline]
+ fn zxxx(self) -> DVec4 {
+ DVec4::new(self.z, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn zxxy(self) -> DVec4 {
+ DVec4::new(self.z, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn zxxz(self) -> DVec4 {
+ DVec4::new(self.z, self.x, self.x, self.z)
+ }
+ #[inline]
+ fn zxyx(self) -> DVec4 {
+ DVec4::new(self.z, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn zxyy(self) -> DVec4 {
+ DVec4::new(self.z, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn zxyz(self) -> DVec4 {
+ DVec4::new(self.z, self.x, self.y, self.z)
+ }
+ #[inline]
+ fn zxzx(self) -> DVec4 {
+ DVec4::new(self.z, self.x, self.z, self.x)
+ }
+ #[inline]
+ fn zxzy(self) -> DVec4 {
+ DVec4::new(self.z, self.x, self.z, self.y)
+ }
+ #[inline]
+ fn zxzz(self) -> DVec4 {
+ DVec4::new(self.z, self.x, self.z, self.z)
+ }
+ #[inline]
+ fn zyxx(self) -> DVec4 {
+ DVec4::new(self.z, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn zyxy(self) -> DVec4 {
+ DVec4::new(self.z, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn zyxz(self) -> DVec4 {
+ DVec4::new(self.z, self.y, self.x, self.z)
+ }
+ #[inline]
+ fn zyyx(self) -> DVec4 {
+ DVec4::new(self.z, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn zyyy(self) -> DVec4 {
+ DVec4::new(self.z, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn zyyz(self) -> DVec4 {
+ DVec4::new(self.z, self.y, self.y, self.z)
+ }
+ #[inline]
+ fn zyzx(self) -> DVec4 {
+ DVec4::new(self.z, self.y, self.z, self.x)
+ }
+ #[inline]
+ fn zyzy(self) -> DVec4 {
+ DVec4::new(self.z, self.y, self.z, self.y)
+ }
+ #[inline]
+ fn zyzz(self) -> DVec4 {
+ DVec4::new(self.z, self.y, self.z, self.z)
+ }
+ #[inline]
+ fn zzxx(self) -> DVec4 {
+ DVec4::new(self.z, self.z, self.x, self.x)
+ }
+ #[inline]
+ fn zzxy(self) -> DVec4 {
+ DVec4::new(self.z, self.z, self.x, self.y)
+ }
+ #[inline]
+ fn zzxz(self) -> DVec4 {
+ DVec4::new(self.z, self.z, self.x, self.z)
+ }
+ #[inline]
+ fn zzyx(self) -> DVec4 {
+ DVec4::new(self.z, self.z, self.y, self.x)
+ }
+ #[inline]
+ fn zzyy(self) -> DVec4 {
+ DVec4::new(self.z, self.z, self.y, self.y)
+ }
+ #[inline]
+ fn zzyz(self) -> DVec4 {
+ DVec4::new(self.z, self.z, self.y, self.z)
+ }
+ #[inline]
+ fn zzzx(self) -> DVec4 {
+ DVec4::new(self.z, self.z, self.z, self.x)
+ }
+ #[inline]
+ fn zzzy(self) -> DVec4 {
+ DVec4::new(self.z, self.z, self.z, self.y)
+ }
+ #[inline]
+ fn zzzz(self) -> DVec4 {
+ DVec4::new(self.z, self.z, self.z, self.z)
+ }
+ #[inline]
+ fn xxx(self) -> Self {
+ Self::new(self.x, self.x, self.x)
+ }
+ #[inline]
+ fn xxy(self) -> Self {
+ Self::new(self.x, self.x, self.y)
+ }
+ #[inline]
+ fn xxz(self) -> Self {
+ Self::new(self.x, self.x, self.z)
+ }
+ #[inline]
+ fn xyx(self) -> Self {
+ Self::new(self.x, self.y, self.x)
+ }
+ #[inline]
+ fn xyy(self) -> Self {
+ Self::new(self.x, self.y, self.y)
+ }
+ #[inline]
+ fn xzx(self) -> Self {
+ Self::new(self.x, self.z, self.x)
+ }
+ #[inline]
+ fn xzy(self) -> Self {
+ Self::new(self.x, self.z, self.y)
+ }
+ #[inline]
+ fn xzz(self) -> Self {
+ Self::new(self.x, self.z, self.z)
+ }
+ #[inline]
+ fn yxx(self) -> Self {
+ Self::new(self.y, self.x, self.x)
+ }
+ #[inline]
+ fn yxy(self) -> Self {
+ Self::new(self.y, self.x, self.y)
+ }
+ #[inline]
+ fn yxz(self) -> Self {
+ Self::new(self.y, self.x, self.z)
+ }
+ #[inline]
+ fn yyx(self) -> Self {
+ Self::new(self.y, self.y, self.x)
+ }
+ #[inline]
+ fn yyy(self) -> Self {
+ Self::new(self.y, self.y, self.y)
+ }
+ #[inline]
+ fn yyz(self) -> Self {
+ Self::new(self.y, self.y, self.z)
+ }
+ #[inline]
+ fn yzx(self) -> Self {
+ Self::new(self.y, self.z, self.x)
+ }
+ #[inline]
+ fn yzy(self) -> Self {
+ Self::new(self.y, self.z, self.y)
+ }
+ #[inline]
+ fn yzz(self) -> Self {
+ Self::new(self.y, self.z, self.z)
+ }
+ #[inline]
+ fn zxx(self) -> Self {
+ Self::new(self.z, self.x, self.x)
+ }
+ #[inline]
+ fn zxy(self) -> Self {
+ Self::new(self.z, self.x, self.y)
+ }
+ #[inline]
+ fn zxz(self) -> Self {
+ Self::new(self.z, self.x, self.z)
+ }
+ #[inline]
+ fn zyx(self) -> Self {
+ Self::new(self.z, self.y, self.x)
+ }
+ #[inline]
+ fn zyy(self) -> Self {
+ Self::new(self.z, self.y, self.y)
+ }
+ #[inline]
+ fn zyz(self) -> Self {
+ Self::new(self.z, self.y, self.z)
+ }
+ #[inline]
+ fn zzx(self) -> Self {
+ Self::new(self.z, self.z, self.x)
+ }
+ #[inline]
+ fn zzy(self) -> Self {
+ Self::new(self.z, self.z, self.y)
+ }
+ #[inline]
+ fn zzz(self) -> Self {
+ Self::new(self.z, self.z, self.z)
+ }
+ #[inline]
+ fn xx(self) -> DVec2 {
+ DVec2::new(self.x, self.x)
+ }
+ #[inline]
+ fn xy(self) -> DVec2 {
+ DVec2::new(self.x, self.y)
+ }
+ #[inline]
+ fn xz(self) -> DVec2 {
+ DVec2::new(self.x, self.z)
+ }
+ #[inline]
+ fn yx(self) -> DVec2 {
+ DVec2::new(self.y, self.x)
+ }
+ #[inline]
+ fn yy(self) -> DVec2 {
+ DVec2::new(self.y, self.y)
+ }
+ #[inline]
+ fn yz(self) -> DVec2 {
+ DVec2::new(self.y, self.z)
+ }
+ #[inline]
+ fn zx(self) -> DVec2 {
+ DVec2::new(self.z, self.x)
+ }
+ #[inline]
+ fn zy(self) -> DVec2 {
+ DVec2::new(self.z, self.y)
+ }
+ #[inline]
+ fn zz(self) -> DVec2 {
+ DVec2::new(self.z, self.z)
+ }
+}
diff --git a/src/swizzles/dvec4_impl_scalar.rs b/src/swizzles/dvec4_impl_scalar.rs
new file mode 100644
index 0000000..4b6740b
--- /dev/null
+++ b/src/swizzles/dvec4_impl_scalar.rs
@@ -0,0 +1,1350 @@
+// Generated by swizzlegen. Do not edit.
+
+use super::Vec4Swizzles;
+use crate::{DVec2, DVec3, DVec4};
+
+impl Vec4Swizzles for DVec4 {
+ type Vec2 = DVec2;
+ type Vec3 = DVec3;
+
+ #[inline]
+ fn xxxx(self) -> DVec4 {
+ DVec4::new(self.x, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn xxxy(self) -> DVec4 {
+ DVec4::new(self.x, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn xxxz(self) -> DVec4 {
+ DVec4::new(self.x, self.x, self.x, self.z)
+ }
+ #[inline]
+ fn xxxw(self) -> DVec4 {
+ DVec4::new(self.x, self.x, self.x, self.w)
+ }
+ #[inline]
+ fn xxyx(self) -> DVec4 {
+ DVec4::new(self.x, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn xxyy(self) -> DVec4 {
+ DVec4::new(self.x, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn xxyz(self) -> DVec4 {
+ DVec4::new(self.x, self.x, self.y, self.z)
+ }
+ #[inline]
+ fn xxyw(self) -> DVec4 {
+ DVec4::new(self.x, self.x, self.y, self.w)
+ }
+ #[inline]
+ fn xxzx(self) -> DVec4 {
+ DVec4::new(self.x, self.x, self.z, self.x)
+ }
+ #[inline]
+ fn xxzy(self) -> DVec4 {
+ DVec4::new(self.x, self.x, self.z, self.y)
+ }
+ #[inline]
+ fn xxzz(self) -> DVec4 {
+ DVec4::new(self.x, self.x, self.z, self.z)
+ }
+ #[inline]
+ fn xxzw(self) -> DVec4 {
+ DVec4::new(self.x, self.x, self.z, self.w)
+ }
+ #[inline]
+ fn xxwx(self) -> DVec4 {
+ DVec4::new(self.x, self.x, self.w, self.x)
+ }
+ #[inline]
+ fn xxwy(self) -> DVec4 {
+ DVec4::new(self.x, self.x, self.w, self.y)
+ }
+ #[inline]
+ fn xxwz(self) -> DVec4 {
+ DVec4::new(self.x, self.x, self.w, self.z)
+ }
+ #[inline]
+ fn xxww(self) -> DVec4 {
+ DVec4::new(self.x, self.x, self.w, self.w)
+ }
+ #[inline]
+ fn xyxx(self) -> DVec4 {
+ DVec4::new(self.x, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn xyxy(self) -> DVec4 {
+ DVec4::new(self.x, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn xyxz(self) -> DVec4 {
+ DVec4::new(self.x, self.y, self.x, self.z)
+ }
+ #[inline]
+ fn xyxw(self) -> DVec4 {
+ DVec4::new(self.x, self.y, self.x, self.w)
+ }
+ #[inline]
+ fn xyyx(self) -> DVec4 {
+ DVec4::new(self.x, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn xyyy(self) -> DVec4 {
+ DVec4::new(self.x, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn xyyz(self) -> DVec4 {
+ DVec4::new(self.x, self.y, self.y, self.z)
+ }
+ #[inline]
+ fn xyyw(self) -> DVec4 {
+ DVec4::new(self.x, self.y, self.y, self.w)
+ }
+ #[inline]
+ fn xyzx(self) -> DVec4 {
+ DVec4::new(self.x, self.y, self.z, self.x)
+ }
+ #[inline]
+ fn xyzy(self) -> DVec4 {
+ DVec4::new(self.x, self.y, self.z, self.y)
+ }
+ #[inline]
+ fn xyzz(self) -> DVec4 {
+ DVec4::new(self.x, self.y, self.z, self.z)
+ }
+ #[inline]
+ fn xywx(self) -> DVec4 {
+ DVec4::new(self.x, self.y, self.w, self.x)
+ }
+ #[inline]
+ fn xywy(self) -> DVec4 {
+ DVec4::new(self.x, self.y, self.w, self.y)
+ }
+ #[inline]
+ fn xywz(self) -> DVec4 {
+ DVec4::new(self.x, self.y, self.w, self.z)
+ }
+ #[inline]
+ fn xyww(self) -> DVec4 {
+ DVec4::new(self.x, self.y, self.w, self.w)
+ }
+ #[inline]
+ fn xzxx(self) -> DVec4 {
+ DVec4::new(self.x, self.z, self.x, self.x)
+ }
+ #[inline]
+ fn xzxy(self) -> DVec4 {
+ DVec4::new(self.x, self.z, self.x, self.y)
+ }
+ #[inline]
+ fn xzxz(self) -> DVec4 {
+ DVec4::new(self.x, self.z, self.x, self.z)
+ }
+ #[inline]
+ fn xzxw(self) -> DVec4 {
+ DVec4::new(self.x, self.z, self.x, self.w)
+ }
+ #[inline]
+ fn xzyx(self) -> DVec4 {
+ DVec4::new(self.x, self.z, self.y, self.x)
+ }
+ #[inline]
+ fn xzyy(self) -> DVec4 {
+ DVec4::new(self.x, self.z, self.y, self.y)
+ }
+ #[inline]
+ fn xzyz(self) -> DVec4 {
+ DVec4::new(self.x, self.z, self.y, self.z)
+ }
+ #[inline]
+ fn xzyw(self) -> DVec4 {
+ DVec4::new(self.x, self.z, self.y, self.w)
+ }
+ #[inline]
+ fn xzzx(self) -> DVec4 {
+ DVec4::new(self.x, self.z, self.z, self.x)
+ }
+ #[inline]
+ fn xzzy(self) -> DVec4 {
+ DVec4::new(self.x, self.z, self.z, self.y)
+ }
+ #[inline]
+ fn xzzz(self) -> DVec4 {
+ DVec4::new(self.x, self.z, self.z, self.z)
+ }
+ #[inline]
+ fn xzzw(self) -> DVec4 {
+ DVec4::new(self.x, self.z, self.z, self.w)
+ }
+ #[inline]
+ fn xzwx(self) -> DVec4 {
+ DVec4::new(self.x, self.z, self.w, self.x)
+ }
+ #[inline]
+ fn xzwy(self) -> DVec4 {
+ DVec4::new(self.x, self.z, self.w, self.y)
+ }
+ #[inline]
+ fn xzwz(self) -> DVec4 {
+ DVec4::new(self.x, self.z, self.w, self.z)
+ }
+ #[inline]
+ fn xzww(self) -> DVec4 {
+ DVec4::new(self.x, self.z, self.w, self.w)
+ }
+ #[inline]
+ fn xwxx(self) -> DVec4 {
+ DVec4::new(self.x, self.w, self.x, self.x)
+ }
+ #[inline]
+ fn xwxy(self) -> DVec4 {
+ DVec4::new(self.x, self.w, self.x, self.y)
+ }
+ #[inline]
+ fn xwxz(self) -> DVec4 {
+ DVec4::new(self.x, self.w, self.x, self.z)
+ }
+ #[inline]
+ fn xwxw(self) -> DVec4 {
+ DVec4::new(self.x, self.w, self.x, self.w)
+ }
+ #[inline]
+ fn xwyx(self) -> DVec4 {
+ DVec4::new(self.x, self.w, self.y, self.x)
+ }
+ #[inline]
+ fn xwyy(self) -> DVec4 {
+ DVec4::new(self.x, self.w, self.y, self.y)
+ }
+ #[inline]
+ fn xwyz(self) -> DVec4 {
+ DVec4::new(self.x, self.w, self.y, self.z)
+ }
+ #[inline]
+ fn xwyw(self) -> DVec4 {
+ DVec4::new(self.x, self.w, self.y, self.w)
+ }
+ #[inline]
+ fn xwzx(self) -> DVec4 {
+ DVec4::new(self.x, self.w, self.z, self.x)
+ }
+ #[inline]
+ fn xwzy(self) -> DVec4 {
+ DVec4::new(self.x, self.w, self.z, self.y)
+ }
+ #[inline]
+ fn xwzz(self) -> DVec4 {
+ DVec4::new(self.x, self.w, self.z, self.z)
+ }
+ #[inline]
+ fn xwzw(self) -> DVec4 {
+ DVec4::new(self.x, self.w, self.z, self.w)
+ }
+ #[inline]
+ fn xwwx(self) -> DVec4 {
+ DVec4::new(self.x, self.w, self.w, self.x)
+ }
+ #[inline]
+ fn xwwy(self) -> DVec4 {
+ DVec4::new(self.x, self.w, self.w, self.y)
+ }
+ #[inline]
+ fn xwwz(self) -> DVec4 {
+ DVec4::new(self.x, self.w, self.w, self.z)
+ }
+ #[inline]
+ fn xwww(self) -> DVec4 {
+ DVec4::new(self.x, self.w, self.w, self.w)
+ }
+ #[inline]
+ fn yxxx(self) -> DVec4 {
+ DVec4::new(self.y, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn yxxy(self) -> DVec4 {
+ DVec4::new(self.y, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn yxxz(self) -> DVec4 {
+ DVec4::new(self.y, self.x, self.x, self.z)
+ }
+ #[inline]
+ fn yxxw(self) -> DVec4 {
+ DVec4::new(self.y, self.x, self.x, self.w)
+ }
+ #[inline]
+ fn yxyx(self) -> DVec4 {
+ DVec4::new(self.y, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn yxyy(self) -> DVec4 {
+ DVec4::new(self.y, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn yxyz(self) -> DVec4 {
+ DVec4::new(self.y, self.x, self.y, self.z)
+ }
+ #[inline]
+ fn yxyw(self) -> DVec4 {
+ DVec4::new(self.y, self.x, self.y, self.w)
+ }
+ #[inline]
+ fn yxzx(self) -> DVec4 {
+ DVec4::new(self.y, self.x, self.z, self.x)
+ }
+ #[inline]
+ fn yxzy(self) -> DVec4 {
+ DVec4::new(self.y, self.x, self.z, self.y)
+ }
+ #[inline]
+ fn yxzz(self) -> DVec4 {
+ DVec4::new(self.y, self.x, self.z, self.z)
+ }
+ #[inline]
+ fn yxzw(self) -> DVec4 {
+ DVec4::new(self.y, self.x, self.z, self.w)
+ }
+ #[inline]
+ fn yxwx(self) -> DVec4 {
+ DVec4::new(self.y, self.x, self.w, self.x)
+ }
+ #[inline]
+ fn yxwy(self) -> DVec4 {
+ DVec4::new(self.y, self.x, self.w, self.y)
+ }
+ #[inline]
+ fn yxwz(self) -> DVec4 {
+ DVec4::new(self.y, self.x, self.w, self.z)
+ }
+ #[inline]
+ fn yxww(self) -> DVec4 {
+ DVec4::new(self.y, self.x, self.w, self.w)
+ }
+ #[inline]
+ fn yyxx(self) -> DVec4 {
+ DVec4::new(self.y, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn yyxy(self) -> DVec4 {
+ DVec4::new(self.y, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn yyxz(self) -> DVec4 {
+ DVec4::new(self.y, self.y, self.x, self.z)
+ }
+ #[inline]
+ fn yyxw(self) -> DVec4 {
+ DVec4::new(self.y, self.y, self.x, self.w)
+ }
+ #[inline]
+ fn yyyx(self) -> DVec4 {
+ DVec4::new(self.y, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn yyyy(self) -> DVec4 {
+ DVec4::new(self.y, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn yyyz(self) -> DVec4 {
+ DVec4::new(self.y, self.y, self.y, self.z)
+ }
+ #[inline]
+ fn yyyw(self) -> DVec4 {
+ DVec4::new(self.y, self.y, self.y, self.w)
+ }
+ #[inline]
+ fn yyzx(self) -> DVec4 {
+ DVec4::new(self.y, self.y, self.z, self.x)
+ }
+ #[inline]
+ fn yyzy(self) -> DVec4 {
+ DVec4::new(self.y, self.y, self.z, self.y)
+ }
+ #[inline]
+ fn yyzz(self) -> DVec4 {
+ DVec4::new(self.y, self.y, self.z, self.z)
+ }
+ #[inline]
+ fn yyzw(self) -> DVec4 {
+ DVec4::new(self.y, self.y, self.z, self.w)
+ }
+ #[inline]
+ fn yywx(self) -> DVec4 {
+ DVec4::new(self.y, self.y, self.w, self.x)
+ }
+ #[inline]
+ fn yywy(self) -> DVec4 {
+ DVec4::new(self.y, self.y, self.w, self.y)
+ }
+ #[inline]
+ fn yywz(self) -> DVec4 {
+ DVec4::new(self.y, self.y, self.w, self.z)
+ }
+ #[inline]
+ fn yyww(self) -> DVec4 {
+ DVec4::new(self.y, self.y, self.w, self.w)
+ }
+ #[inline]
+ fn yzxx(self) -> DVec4 {
+ DVec4::new(self.y, self.z, self.x, self.x)
+ }
+ #[inline]
+ fn yzxy(self) -> DVec4 {
+ DVec4::new(self.y, self.z, self.x, self.y)
+ }
+ #[inline]
+ fn yzxz(self) -> DVec4 {
+ DVec4::new(self.y, self.z, self.x, self.z)
+ }
+ #[inline]
+ fn yzxw(self) -> DVec4 {
+ DVec4::new(self.y, self.z, self.x, self.w)
+ }
+ #[inline]
+ fn yzyx(self) -> DVec4 {
+ DVec4::new(self.y, self.z, self.y, self.x)
+ }
+ #[inline]
+ fn yzyy(self) -> DVec4 {
+ DVec4::new(self.y, self.z, self.y, self.y)
+ }
+ #[inline]
+ fn yzyz(self) -> DVec4 {
+ DVec4::new(self.y, self.z, self.y, self.z)
+ }
+ #[inline]
+ fn yzyw(self) -> DVec4 {
+ DVec4::new(self.y, self.z, self.y, self.w)
+ }
+ #[inline]
+ fn yzzx(self) -> DVec4 {
+ DVec4::new(self.y, self.z, self.z, self.x)
+ }
+ #[inline]
+ fn yzzy(self) -> DVec4 {
+ DVec4::new(self.y, self.z, self.z, self.y)
+ }
+ #[inline]
+ fn yzzz(self) -> DVec4 {
+ DVec4::new(self.y, self.z, self.z, self.z)
+ }
+ #[inline]
+ fn yzzw(self) -> DVec4 {
+ DVec4::new(self.y, self.z, self.z, self.w)
+ }
+ #[inline]
+ fn yzwx(self) -> DVec4 {
+ DVec4::new(self.y, self.z, self.w, self.x)
+ }
+ #[inline]
+ fn yzwy(self) -> DVec4 {
+ DVec4::new(self.y, self.z, self.w, self.y)
+ }
+ #[inline]
+ fn yzwz(self) -> DVec4 {
+ DVec4::new(self.y, self.z, self.w, self.z)
+ }
+ #[inline]
+ fn yzww(self) -> DVec4 {
+ DVec4::new(self.y, self.z, self.w, self.w)
+ }
+ #[inline]
+ fn ywxx(self) -> DVec4 {
+ DVec4::new(self.y, self.w, self.x, self.x)
+ }
+ #[inline]
+ fn ywxy(self) -> DVec4 {
+ DVec4::new(self.y, self.w, self.x, self.y)
+ }
+ #[inline]
+ fn ywxz(self) -> DVec4 {
+ DVec4::new(self.y, self.w, self.x, self.z)
+ }
+ #[inline]
+ fn ywxw(self) -> DVec4 {
+ DVec4::new(self.y, self.w, self.x, self.w)
+ }
+ #[inline]
+ fn ywyx(self) -> DVec4 {
+ DVec4::new(self.y, self.w, self.y, self.x)
+ }
+ #[inline]
+ fn ywyy(self) -> DVec4 {
+ DVec4::new(self.y, self.w, self.y, self.y)
+ }
+ #[inline]
+ fn ywyz(self) -> DVec4 {
+ DVec4::new(self.y, self.w, self.y, self.z)
+ }
+ #[inline]
+ fn ywyw(self) -> DVec4 {
+ DVec4::new(self.y, self.w, self.y, self.w)
+ }
+ #[inline]
+ fn ywzx(self) -> DVec4 {
+ DVec4::new(self.y, self.w, self.z, self.x)
+ }
+ #[inline]
+ fn ywzy(self) -> DVec4 {
+ DVec4::new(self.y, self.w, self.z, self.y)
+ }
+ #[inline]
+ fn ywzz(self) -> DVec4 {
+ DVec4::new(self.y, self.w, self.z, self.z)
+ }
+ #[inline]
+ fn ywzw(self) -> DVec4 {
+ DVec4::new(self.y, self.w, self.z, self.w)
+ }
+ #[inline]
+ fn ywwx(self) -> DVec4 {
+ DVec4::new(self.y, self.w, self.w, self.x)
+ }
+ #[inline]
+ fn ywwy(self) -> DVec4 {
+ DVec4::new(self.y, self.w, self.w, self.y)
+ }
+ #[inline]
+ fn ywwz(self) -> DVec4 {
+ DVec4::new(self.y, self.w, self.w, self.z)
+ }
+ #[inline]
+ fn ywww(self) -> DVec4 {
+ DVec4::new(self.y, self.w, self.w, self.w)
+ }
+ #[inline]
+ fn zxxx(self) -> DVec4 {
+ DVec4::new(self.z, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn zxxy(self) -> DVec4 {
+ DVec4::new(self.z, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn zxxz(self) -> DVec4 {
+ DVec4::new(self.z, self.x, self.x, self.z)
+ }
+ #[inline]
+ fn zxxw(self) -> DVec4 {
+ DVec4::new(self.z, self.x, self.x, self.w)
+ }
+ #[inline]
+ fn zxyx(self) -> DVec4 {
+ DVec4::new(self.z, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn zxyy(self) -> DVec4 {
+ DVec4::new(self.z, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn zxyz(self) -> DVec4 {
+ DVec4::new(self.z, self.x, self.y, self.z)
+ }
+ #[inline]
+ fn zxyw(self) -> DVec4 {
+ DVec4::new(self.z, self.x, self.y, self.w)
+ }
+ #[inline]
+ fn zxzx(self) -> DVec4 {
+ DVec4::new(self.z, self.x, self.z, self.x)
+ }
+ #[inline]
+ fn zxzy(self) -> DVec4 {
+ DVec4::new(self.z, self.x, self.z, self.y)
+ }
+ #[inline]
+ fn zxzz(self) -> DVec4 {
+ DVec4::new(self.z, self.x, self.z, self.z)
+ }
+ #[inline]
+ fn zxzw(self) -> DVec4 {
+ DVec4::new(self.z, self.x, self.z, self.w)
+ }
+ #[inline]
+ fn zxwx(self) -> DVec4 {
+ DVec4::new(self.z, self.x, self.w, self.x)
+ }
+ #[inline]
+ fn zxwy(self) -> DVec4 {
+ DVec4::new(self.z, self.x, self.w, self.y)
+ }
+ #[inline]
+ fn zxwz(self) -> DVec4 {
+ DVec4::new(self.z, self.x, self.w, self.z)
+ }
+ #[inline]
+ fn zxww(self) -> DVec4 {
+ DVec4::new(self.z, self.x, self.w, self.w)
+ }
+ #[inline]
+ fn zyxx(self) -> DVec4 {
+ DVec4::new(self.z, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn zyxy(self) -> DVec4 {
+ DVec4::new(self.z, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn zyxz(self) -> DVec4 {
+ DVec4::new(self.z, self.y, self.x, self.z)
+ }
+ #[inline]
+ fn zyxw(self) -> DVec4 {
+ DVec4::new(self.z, self.y, self.x, self.w)
+ }
+ #[inline]
+ fn zyyx(self) -> DVec4 {
+ DVec4::new(self.z, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn zyyy(self) -> DVec4 {
+ DVec4::new(self.z, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn zyyz(self) -> DVec4 {
+ DVec4::new(self.z, self.y, self.y, self.z)
+ }
+ #[inline]
+ fn zyyw(self) -> DVec4 {
+ DVec4::new(self.z, self.y, self.y, self.w)
+ }
+ #[inline]
+ fn zyzx(self) -> DVec4 {
+ DVec4::new(self.z, self.y, self.z, self.x)
+ }
+ #[inline]
+ fn zyzy(self) -> DVec4 {
+ DVec4::new(self.z, self.y, self.z, self.y)
+ }
+ #[inline]
+ fn zyzz(self) -> DVec4 {
+ DVec4::new(self.z, self.y, self.z, self.z)
+ }
+ #[inline]
+ fn zyzw(self) -> DVec4 {
+ DVec4::new(self.z, self.y, self.z, self.w)
+ }
+ #[inline]
+ fn zywx(self) -> DVec4 {
+ DVec4::new(self.z, self.y, self.w, self.x)
+ }
+ #[inline]
+ fn zywy(self) -> DVec4 {
+ DVec4::new(self.z, self.y, self.w, self.y)
+ }
+ #[inline]
+ fn zywz(self) -> DVec4 {
+ DVec4::new(self.z, self.y, self.w, self.z)
+ }
+ #[inline]
+ fn zyww(self) -> DVec4 {
+ DVec4::new(self.z, self.y, self.w, self.w)
+ }
+ #[inline]
+ fn zzxx(self) -> DVec4 {
+ DVec4::new(self.z, self.z, self.x, self.x)
+ }
+ #[inline]
+ fn zzxy(self) -> DVec4 {
+ DVec4::new(self.z, self.z, self.x, self.y)
+ }
+ #[inline]
+ fn zzxz(self) -> DVec4 {
+ DVec4::new(self.z, self.z, self.x, self.z)
+ }
+ #[inline]
+ fn zzxw(self) -> DVec4 {
+ DVec4::new(self.z, self.z, self.x, self.w)
+ }
+ #[inline]
+ fn zzyx(self) -> DVec4 {
+ DVec4::new(self.z, self.z, self.y, self.x)
+ }
+ #[inline]
+ fn zzyy(self) -> DVec4 {
+ DVec4::new(self.z, self.z, self.y, self.y)
+ }
+ #[inline]
+ fn zzyz(self) -> DVec4 {
+ DVec4::new(self.z, self.z, self.y, self.z)
+ }
+ #[inline]
+ fn zzyw(self) -> DVec4 {
+ DVec4::new(self.z, self.z, self.y, self.w)
+ }
+ #[inline]
+ fn zzzx(self) -> DVec4 {
+ DVec4::new(self.z, self.z, self.z, self.x)
+ }
+ #[inline]
+ fn zzzy(self) -> DVec4 {
+ DVec4::new(self.z, self.z, self.z, self.y)
+ }
+ #[inline]
+ fn zzzz(self) -> DVec4 {
+ DVec4::new(self.z, self.z, self.z, self.z)
+ }
+ #[inline]
+ fn zzzw(self) -> DVec4 {
+ DVec4::new(self.z, self.z, self.z, self.w)
+ }
+ #[inline]
+ fn zzwx(self) -> DVec4 {
+ DVec4::new(self.z, self.z, self.w, self.x)
+ }
+ #[inline]
+ fn zzwy(self) -> DVec4 {
+ DVec4::new(self.z, self.z, self.w, self.y)
+ }
+ #[inline]
+ fn zzwz(self) -> DVec4 {
+ DVec4::new(self.z, self.z, self.w, self.z)
+ }
+ #[inline]
+ fn zzww(self) -> DVec4 {
+ DVec4::new(self.z, self.z, self.w, self.w)
+ }
+ #[inline]
+ fn zwxx(self) -> DVec4 {
+ DVec4::new(self.z, self.w, self.x, self.x)
+ }
+ #[inline]
+ fn zwxy(self) -> DVec4 {
+ DVec4::new(self.z, self.w, self.x, self.y)
+ }
+ #[inline]
+ fn zwxz(self) -> DVec4 {
+ DVec4::new(self.z, self.w, self.x, self.z)
+ }
+ #[inline]
+ fn zwxw(self) -> DVec4 {
+ DVec4::new(self.z, self.w, self.x, self.w)
+ }
+ #[inline]
+ fn zwyx(self) -> DVec4 {
+ DVec4::new(self.z, self.w, self.y, self.x)
+ }
+ #[inline]
+ fn zwyy(self) -> DVec4 {
+ DVec4::new(self.z, self.w, self.y, self.y)
+ }
+ #[inline]
+ fn zwyz(self) -> DVec4 {
+ DVec4::new(self.z, self.w, self.y, self.z)
+ }
+ #[inline]
+ fn zwyw(self) -> DVec4 {
+ DVec4::new(self.z, self.w, self.y, self.w)
+ }
+ #[inline]
+ fn zwzx(self) -> DVec4 {
+ DVec4::new(self.z, self.w, self.z, self.x)
+ }
+ #[inline]
+ fn zwzy(self) -> DVec4 {
+ DVec4::new(self.z, self.w, self.z, self.y)
+ }
+ #[inline]
+ fn zwzz(self) -> DVec4 {
+ DVec4::new(self.z, self.w, self.z, self.z)
+ }
+ #[inline]
+ fn zwzw(self) -> DVec4 {
+ DVec4::new(self.z, self.w, self.z, self.w)
+ }
+ #[inline]
+ fn zwwx(self) -> DVec4 {
+ DVec4::new(self.z, self.w, self.w, self.x)
+ }
+ #[inline]
+ fn zwwy(self) -> DVec4 {
+ DVec4::new(self.z, self.w, self.w, self.y)
+ }
+ #[inline]
+ fn zwwz(self) -> DVec4 {
+ DVec4::new(self.z, self.w, self.w, self.z)
+ }
+ #[inline]
+ fn zwww(self) -> DVec4 {
+ DVec4::new(self.z, self.w, self.w, self.w)
+ }
+ #[inline]
+ fn wxxx(self) -> DVec4 {
+ DVec4::new(self.w, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn wxxy(self) -> DVec4 {
+ DVec4::new(self.w, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn wxxz(self) -> DVec4 {
+ DVec4::new(self.w, self.x, self.x, self.z)
+ }
+ #[inline]
+ fn wxxw(self) -> DVec4 {
+ DVec4::new(self.w, self.x, self.x, self.w)
+ }
+ #[inline]
+ fn wxyx(self) -> DVec4 {
+ DVec4::new(self.w, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn wxyy(self) -> DVec4 {
+ DVec4::new(self.w, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn wxyz(self) -> DVec4 {
+ DVec4::new(self.w, self.x, self.y, self.z)
+ }
+ #[inline]
+ fn wxyw(self) -> DVec4 {
+ DVec4::new(self.w, self.x, self.y, self.w)
+ }
+ #[inline]
+ fn wxzx(self) -> DVec4 {
+ DVec4::new(self.w, self.x, self.z, self.x)
+ }
+ #[inline]
+ fn wxzy(self) -> DVec4 {
+ DVec4::new(self.w, self.x, self.z, self.y)
+ }
+ #[inline]
+ fn wxzz(self) -> DVec4 {
+ DVec4::new(self.w, self.x, self.z, self.z)
+ }
+ #[inline]
+ fn wxzw(self) -> DVec4 {
+ DVec4::new(self.w, self.x, self.z, self.w)
+ }
+ #[inline]
+ fn wxwx(self) -> DVec4 {
+ DVec4::new(self.w, self.x, self.w, self.x)
+ }
+ #[inline]
+ fn wxwy(self) -> DVec4 {
+ DVec4::new(self.w, self.x, self.w, self.y)
+ }
+ #[inline]
+ fn wxwz(self) -> DVec4 {
+ DVec4::new(self.w, self.x, self.w, self.z)
+ }
+ #[inline]
+ fn wxww(self) -> DVec4 {
+ DVec4::new(self.w, self.x, self.w, self.w)
+ }
+ #[inline]
+ fn wyxx(self) -> DVec4 {
+ DVec4::new(self.w, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn wyxy(self) -> DVec4 {
+ DVec4::new(self.w, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn wyxz(self) -> DVec4 {
+ DVec4::new(self.w, self.y, self.x, self.z)
+ }
+ #[inline]
+ fn wyxw(self) -> DVec4 {
+ DVec4::new(self.w, self.y, self.x, self.w)
+ }
+ #[inline]
+ fn wyyx(self) -> DVec4 {
+ DVec4::new(self.w, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn wyyy(self) -> DVec4 {
+ DVec4::new(self.w, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn wyyz(self) -> DVec4 {
+ DVec4::new(self.w, self.y, self.y, self.z)
+ }
+ #[inline]
+ fn wyyw(self) -> DVec4 {
+ DVec4::new(self.w, self.y, self.y, self.w)
+ }
+ #[inline]
+ fn wyzx(self) -> DVec4 {
+ DVec4::new(self.w, self.y, self.z, self.x)
+ }
+ #[inline]
+ fn wyzy(self) -> DVec4 {
+ DVec4::new(self.w, self.y, self.z, self.y)
+ }
+ #[inline]
+ fn wyzz(self) -> DVec4 {
+ DVec4::new(self.w, self.y, self.z, self.z)
+ }
+ #[inline]
+ fn wyzw(self) -> DVec4 {
+ DVec4::new(self.w, self.y, self.z, self.w)
+ }
+ #[inline]
+ fn wywx(self) -> DVec4 {
+ DVec4::new(self.w, self.y, self.w, self.x)
+ }
+ #[inline]
+ fn wywy(self) -> DVec4 {
+ DVec4::new(self.w, self.y, self.w, self.y)
+ }
+ #[inline]
+ fn wywz(self) -> DVec4 {
+ DVec4::new(self.w, self.y, self.w, self.z)
+ }
+ #[inline]
+ fn wyww(self) -> DVec4 {
+ DVec4::new(self.w, self.y, self.w, self.w)
+ }
+ #[inline]
+ fn wzxx(self) -> DVec4 {
+ DVec4::new(self.w, self.z, self.x, self.x)
+ }
+ #[inline]
+ fn wzxy(self) -> DVec4 {
+ DVec4::new(self.w, self.z, self.x, self.y)
+ }
+ #[inline]
+ fn wzxz(self) -> DVec4 {
+ DVec4::new(self.w, self.z, self.x, self.z)
+ }
+ #[inline]
+ fn wzxw(self) -> DVec4 {
+ DVec4::new(self.w, self.z, self.x, self.w)
+ }
+ #[inline]
+ fn wzyx(self) -> DVec4 {
+ DVec4::new(self.w, self.z, self.y, self.x)
+ }
+ #[inline]
+ fn wzyy(self) -> DVec4 {
+ DVec4::new(self.w, self.z, self.y, self.y)
+ }
+ #[inline]
+ fn wzyz(self) -> DVec4 {
+ DVec4::new(self.w, self.z, self.y, self.z)
+ }
+ #[inline]
+ fn wzyw(self) -> DVec4 {
+ DVec4::new(self.w, self.z, self.y, self.w)
+ }
+ #[inline]
+ fn wzzx(self) -> DVec4 {
+ DVec4::new(self.w, self.z, self.z, self.x)
+ }
+ #[inline]
+ fn wzzy(self) -> DVec4 {
+ DVec4::new(self.w, self.z, self.z, self.y)
+ }
+ #[inline]
+ fn wzzz(self) -> DVec4 {
+ DVec4::new(self.w, self.z, self.z, self.z)
+ }
+ #[inline]
+ fn wzzw(self) -> DVec4 {
+ DVec4::new(self.w, self.z, self.z, self.w)
+ }
+ #[inline]
+ fn wzwx(self) -> DVec4 {
+ DVec4::new(self.w, self.z, self.w, self.x)
+ }
+ #[inline]
+ fn wzwy(self) -> DVec4 {
+ DVec4::new(self.w, self.z, self.w, self.y)
+ }
+ #[inline]
+ fn wzwz(self) -> DVec4 {
+ DVec4::new(self.w, self.z, self.w, self.z)
+ }
+ #[inline]
+ fn wzww(self) -> DVec4 {
+ DVec4::new(self.w, self.z, self.w, self.w)
+ }
+ #[inline]
+ fn wwxx(self) -> DVec4 {
+ DVec4::new(self.w, self.w, self.x, self.x)
+ }
+ #[inline]
+ fn wwxy(self) -> DVec4 {
+ DVec4::new(self.w, self.w, self.x, self.y)
+ }
+ #[inline]
+ fn wwxz(self) -> DVec4 {
+ DVec4::new(self.w, self.w, self.x, self.z)
+ }
+ #[inline]
+ fn wwxw(self) -> DVec4 {
+ DVec4::new(self.w, self.w, self.x, self.w)
+ }
+ #[inline]
+ fn wwyx(self) -> DVec4 {
+ DVec4::new(self.w, self.w, self.y, self.x)
+ }
+ #[inline]
+ fn wwyy(self) -> DVec4 {
+ DVec4::new(self.w, self.w, self.y, self.y)
+ }
+ #[inline]
+ fn wwyz(self) -> DVec4 {
+ DVec4::new(self.w, self.w, self.y, self.z)
+ }
+ #[inline]
+ fn wwyw(self) -> DVec4 {
+ DVec4::new(self.w, self.w, self.y, self.w)
+ }
+ #[inline]
+ fn wwzx(self) -> DVec4 {
+ DVec4::new(self.w, self.w, self.z, self.x)
+ }
+ #[inline]
+ fn wwzy(self) -> DVec4 {
+ DVec4::new(self.w, self.w, self.z, self.y)
+ }
+ #[inline]
+ fn wwzz(self) -> DVec4 {
+ DVec4::new(self.w, self.w, self.z, self.z)
+ }
+ #[inline]
+ fn wwzw(self) -> DVec4 {
+ DVec4::new(self.w, self.w, self.z, self.w)
+ }
+ #[inline]
+ fn wwwx(self) -> DVec4 {
+ DVec4::new(self.w, self.w, self.w, self.x)
+ }
+ #[inline]
+ fn wwwy(self) -> DVec4 {
+ DVec4::new(self.w, self.w, self.w, self.y)
+ }
+ #[inline]
+ fn wwwz(self) -> DVec4 {
+ DVec4::new(self.w, self.w, self.w, self.z)
+ }
+ #[inline]
+ fn wwww(self) -> DVec4 {
+ DVec4::new(self.w, self.w, self.w, self.w)
+ }
+ #[inline]
+ fn xxx(self) -> DVec3 {
+ DVec3::new(self.x, self.x, self.x)
+ }
+ #[inline]
+ fn xxy(self) -> DVec3 {
+ DVec3::new(self.x, self.x, self.y)
+ }
+ #[inline]
+ fn xxz(self) -> DVec3 {
+ DVec3::new(self.x, self.x, self.z)
+ }
+ #[inline]
+ fn xxw(self) -> DVec3 {
+ DVec3::new(self.x, self.x, self.w)
+ }
+ #[inline]
+ fn xyx(self) -> DVec3 {
+ DVec3::new(self.x, self.y, self.x)
+ }
+ #[inline]
+ fn xyy(self) -> DVec3 {
+ DVec3::new(self.x, self.y, self.y)
+ }
+ #[inline]
+ fn xyz(self) -> DVec3 {
+ DVec3::new(self.x, self.y, self.z)
+ }
+ #[inline]
+ fn xyw(self) -> DVec3 {
+ DVec3::new(self.x, self.y, self.w)
+ }
+ #[inline]
+ fn xzx(self) -> DVec3 {
+ DVec3::new(self.x, self.z, self.x)
+ }
+ #[inline]
+ fn xzy(self) -> DVec3 {
+ DVec3::new(self.x, self.z, self.y)
+ }
+ #[inline]
+ fn xzz(self) -> DVec3 {
+ DVec3::new(self.x, self.z, self.z)
+ }
+ #[inline]
+ fn xzw(self) -> DVec3 {
+ DVec3::new(self.x, self.z, self.w)
+ }
+ #[inline]
+ fn xwx(self) -> DVec3 {
+ DVec3::new(self.x, self.w, self.x)
+ }
+ #[inline]
+ fn xwy(self) -> DVec3 {
+ DVec3::new(self.x, self.w, self.y)
+ }
+ #[inline]
+ fn xwz(self) -> DVec3 {
+ DVec3::new(self.x, self.w, self.z)
+ }
+ #[inline]
+ fn xww(self) -> DVec3 {
+ DVec3::new(self.x, self.w, self.w)
+ }
+ #[inline]
+ fn yxx(self) -> DVec3 {
+ DVec3::new(self.y, self.x, self.x)
+ }
+ #[inline]
+ fn yxy(self) -> DVec3 {
+ DVec3::new(self.y, self.x, self.y)
+ }
+ #[inline]
+ fn yxz(self) -> DVec3 {
+ DVec3::new(self.y, self.x, self.z)
+ }
+ #[inline]
+ fn yxw(self) -> DVec3 {
+ DVec3::new(self.y, self.x, self.w)
+ }
+ #[inline]
+ fn yyx(self) -> DVec3 {
+ DVec3::new(self.y, self.y, self.x)
+ }
+ #[inline]
+ fn yyy(self) -> DVec3 {
+ DVec3::new(self.y, self.y, self.y)
+ }
+ #[inline]
+ fn yyz(self) -> DVec3 {
+ DVec3::new(self.y, self.y, self.z)
+ }
+ #[inline]
+ fn yyw(self) -> DVec3 {
+ DVec3::new(self.y, self.y, self.w)
+ }
+ #[inline]
+ fn yzx(self) -> DVec3 {
+ DVec3::new(self.y, self.z, self.x)
+ }
+ #[inline]
+ fn yzy(self) -> DVec3 {
+ DVec3::new(self.y, self.z, self.y)
+ }
+ #[inline]
+ fn yzz(self) -> DVec3 {
+ DVec3::new(self.y, self.z, self.z)
+ }
+ #[inline]
+ fn yzw(self) -> DVec3 {
+ DVec3::new(self.y, self.z, self.w)
+ }
+ #[inline]
+ fn ywx(self) -> DVec3 {
+ DVec3::new(self.y, self.w, self.x)
+ }
+ #[inline]
+ fn ywy(self) -> DVec3 {
+ DVec3::new(self.y, self.w, self.y)
+ }
+ #[inline]
+ fn ywz(self) -> DVec3 {
+ DVec3::new(self.y, self.w, self.z)
+ }
+ #[inline]
+ fn yww(self) -> DVec3 {
+ DVec3::new(self.y, self.w, self.w)
+ }
+ #[inline]
+ fn zxx(self) -> DVec3 {
+ DVec3::new(self.z, self.x, self.x)
+ }
+ #[inline]
+ fn zxy(self) -> DVec3 {
+ DVec3::new(self.z, self.x, self.y)
+ }
+ #[inline]
+ fn zxz(self) -> DVec3 {
+ DVec3::new(self.z, self.x, self.z)
+ }
+ #[inline]
+ fn zxw(self) -> DVec3 {
+ DVec3::new(self.z, self.x, self.w)
+ }
+ #[inline]
+ fn zyx(self) -> DVec3 {
+ DVec3::new(self.z, self.y, self.x)
+ }
+ #[inline]
+ fn zyy(self) -> DVec3 {
+ DVec3::new(self.z, self.y, self.y)
+ }
+ #[inline]
+ fn zyz(self) -> DVec3 {
+ DVec3::new(self.z, self.y, self.z)
+ }
+ #[inline]
+ fn zyw(self) -> DVec3 {
+ DVec3::new(self.z, self.y, self.w)
+ }
+ #[inline]
+ fn zzx(self) -> DVec3 {
+ DVec3::new(self.z, self.z, self.x)
+ }
+ #[inline]
+ fn zzy(self) -> DVec3 {
+ DVec3::new(self.z, self.z, self.y)
+ }
+ #[inline]
+ fn zzz(self) -> DVec3 {
+ DVec3::new(self.z, self.z, self.z)
+ }
+ #[inline]
+ fn zzw(self) -> DVec3 {
+ DVec3::new(self.z, self.z, self.w)
+ }
+ #[inline]
+ fn zwx(self) -> DVec3 {
+ DVec3::new(self.z, self.w, self.x)
+ }
+ #[inline]
+ fn zwy(self) -> DVec3 {
+ DVec3::new(self.z, self.w, self.y)
+ }
+ #[inline]
+ fn zwz(self) -> DVec3 {
+ DVec3::new(self.z, self.w, self.z)
+ }
+ #[inline]
+ fn zww(self) -> DVec3 {
+ DVec3::new(self.z, self.w, self.w)
+ }
+ #[inline]
+ fn wxx(self) -> DVec3 {
+ DVec3::new(self.w, self.x, self.x)
+ }
+ #[inline]
+ fn wxy(self) -> DVec3 {
+ DVec3::new(self.w, self.x, self.y)
+ }
+ #[inline]
+ fn wxz(self) -> DVec3 {
+ DVec3::new(self.w, self.x, self.z)
+ }
+ #[inline]
+ fn wxw(self) -> DVec3 {
+ DVec3::new(self.w, self.x, self.w)
+ }
+ #[inline]
+ fn wyx(self) -> DVec3 {
+ DVec3::new(self.w, self.y, self.x)
+ }
+ #[inline]
+ fn wyy(self) -> DVec3 {
+ DVec3::new(self.w, self.y, self.y)
+ }
+ #[inline]
+ fn wyz(self) -> DVec3 {
+ DVec3::new(self.w, self.y, self.z)
+ }
+ #[inline]
+ fn wyw(self) -> DVec3 {
+ DVec3::new(self.w, self.y, self.w)
+ }
+ #[inline]
+ fn wzx(self) -> DVec3 {
+ DVec3::new(self.w, self.z, self.x)
+ }
+ #[inline]
+ fn wzy(self) -> DVec3 {
+ DVec3::new(self.w, self.z, self.y)
+ }
+ #[inline]
+ fn wzz(self) -> DVec3 {
+ DVec3::new(self.w, self.z, self.z)
+ }
+ #[inline]
+ fn wzw(self) -> DVec3 {
+ DVec3::new(self.w, self.z, self.w)
+ }
+ #[inline]
+ fn wwx(self) -> DVec3 {
+ DVec3::new(self.w, self.w, self.x)
+ }
+ #[inline]
+ fn wwy(self) -> DVec3 {
+ DVec3::new(self.w, self.w, self.y)
+ }
+ #[inline]
+ fn wwz(self) -> DVec3 {
+ DVec3::new(self.w, self.w, self.z)
+ }
+ #[inline]
+ fn www(self) -> DVec3 {
+ DVec3::new(self.w, self.w, self.w)
+ }
+ #[inline]
+ fn xx(self) -> DVec2 {
+ DVec2::new(self.x, self.x)
+ }
+ #[inline]
+ fn xy(self) -> DVec2 {
+ DVec2::new(self.x, self.y)
+ }
+ #[inline]
+ fn xz(self) -> DVec2 {
+ DVec2::new(self.x, self.z)
+ }
+ #[inline]
+ fn xw(self) -> DVec2 {
+ DVec2::new(self.x, self.w)
+ }
+ #[inline]
+ fn yx(self) -> DVec2 {
+ DVec2::new(self.y, self.x)
+ }
+ #[inline]
+ fn yy(self) -> DVec2 {
+ DVec2::new(self.y, self.y)
+ }
+ #[inline]
+ fn yz(self) -> DVec2 {
+ DVec2::new(self.y, self.z)
+ }
+ #[inline]
+ fn yw(self) -> DVec2 {
+ DVec2::new(self.y, self.w)
+ }
+ #[inline]
+ fn zx(self) -> DVec2 {
+ DVec2::new(self.z, self.x)
+ }
+ #[inline]
+ fn zy(self) -> DVec2 {
+ DVec2::new(self.z, self.y)
+ }
+ #[inline]
+ fn zz(self) -> DVec2 {
+ DVec2::new(self.z, self.z)
+ }
+ #[inline]
+ fn zw(self) -> DVec2 {
+ DVec2::new(self.z, self.w)
+ }
+ #[inline]
+ fn wx(self) -> DVec2 {
+ DVec2::new(self.w, self.x)
+ }
+ #[inline]
+ fn wy(self) -> DVec2 {
+ DVec2::new(self.w, self.y)
+ }
+ #[inline]
+ fn wz(self) -> DVec2 {
+ DVec2::new(self.w, self.z)
+ }
+ #[inline]
+ fn ww(self) -> DVec2 {
+ DVec2::new(self.w, self.w)
+ }
+}
diff --git a/src/swizzles/ivec2_impl_scalar.rs b/src/swizzles/ivec2_impl_scalar.rs
new file mode 100644
index 0000000..26702c8
--- /dev/null
+++ b/src/swizzles/ivec2_impl_scalar.rs
@@ -0,0 +1,118 @@
+// Generated by swizzlegen. Do not edit.
+
+use super::Vec2Swizzles;
+use crate::{IVec2, IVec3, IVec4};
+
+impl Vec2Swizzles for IVec2 {
+ type Vec3 = IVec3;
+ type Vec4 = IVec4;
+
+ #[inline]
+ fn xxxx(self) -> IVec4 {
+ IVec4::new(self.x, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn xxxy(self) -> IVec4 {
+ IVec4::new(self.x, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn xxyx(self) -> IVec4 {
+ IVec4::new(self.x, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn xxyy(self) -> IVec4 {
+ IVec4::new(self.x, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn xyxx(self) -> IVec4 {
+ IVec4::new(self.x, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn xyxy(self) -> IVec4 {
+ IVec4::new(self.x, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn xyyx(self) -> IVec4 {
+ IVec4::new(self.x, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn xyyy(self) -> IVec4 {
+ IVec4::new(self.x, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn yxxx(self) -> IVec4 {
+ IVec4::new(self.y, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn yxxy(self) -> IVec4 {
+ IVec4::new(self.y, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn yxyx(self) -> IVec4 {
+ IVec4::new(self.y, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn yxyy(self) -> IVec4 {
+ IVec4::new(self.y, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn yyxx(self) -> IVec4 {
+ IVec4::new(self.y, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn yyxy(self) -> IVec4 {
+ IVec4::new(self.y, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn yyyx(self) -> IVec4 {
+ IVec4::new(self.y, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn yyyy(self) -> IVec4 {
+ IVec4::new(self.y, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn xxx(self) -> IVec3 {
+ IVec3::new(self.x, self.x, self.x)
+ }
+ #[inline]
+ fn xxy(self) -> IVec3 {
+ IVec3::new(self.x, self.x, self.y)
+ }
+ #[inline]
+ fn xyx(self) -> IVec3 {
+ IVec3::new(self.x, self.y, self.x)
+ }
+ #[inline]
+ fn xyy(self) -> IVec3 {
+ IVec3::new(self.x, self.y, self.y)
+ }
+ #[inline]
+ fn yxx(self) -> IVec3 {
+ IVec3::new(self.y, self.x, self.x)
+ }
+ #[inline]
+ fn yxy(self) -> IVec3 {
+ IVec3::new(self.y, self.x, self.y)
+ }
+ #[inline]
+ fn yyx(self) -> IVec3 {
+ IVec3::new(self.y, self.y, self.x)
+ }
+ #[inline]
+ fn yyy(self) -> IVec3 {
+ IVec3::new(self.y, self.y, self.y)
+ }
+ #[inline]
+ fn xx(self) -> Self {
+ Self::new(self.x, self.x)
+ }
+ #[inline]
+ fn yx(self) -> Self {
+ Self::new(self.y, self.x)
+ }
+ #[inline]
+ fn yy(self) -> Self {
+ Self::new(self.y, self.y)
+ }
+}
diff --git a/src/swizzles/ivec3_impl_scalar.rs b/src/swizzles/ivec3_impl_scalar.rs
new file mode 100644
index 0000000..799e5d8
--- /dev/null
+++ b/src/swizzles/ivec3_impl_scalar.rs
@@ -0,0 +1,474 @@
+// Generated by swizzlegen. Do not edit.
+
+use super::Vec3Swizzles;
+use crate::{IVec2, IVec3, IVec4};
+
+impl Vec3Swizzles for IVec3 {
+ type Vec2 = IVec2;
+ type Vec4 = IVec4;
+
+ #[inline]
+ fn xxxx(self) -> IVec4 {
+ IVec4::new(self.x, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn xxxy(self) -> IVec4 {
+ IVec4::new(self.x, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn xxxz(self) -> IVec4 {
+ IVec4::new(self.x, self.x, self.x, self.z)
+ }
+ #[inline]
+ fn xxyx(self) -> IVec4 {
+ IVec4::new(self.x, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn xxyy(self) -> IVec4 {
+ IVec4::new(self.x, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn xxyz(self) -> IVec4 {
+ IVec4::new(self.x, self.x, self.y, self.z)
+ }
+ #[inline]
+ fn xxzx(self) -> IVec4 {
+ IVec4::new(self.x, self.x, self.z, self.x)
+ }
+ #[inline]
+ fn xxzy(self) -> IVec4 {
+ IVec4::new(self.x, self.x, self.z, self.y)
+ }
+ #[inline]
+ fn xxzz(self) -> IVec4 {
+ IVec4::new(self.x, self.x, self.z, self.z)
+ }
+ #[inline]
+ fn xyxx(self) -> IVec4 {
+ IVec4::new(self.x, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn xyxy(self) -> IVec4 {
+ IVec4::new(self.x, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn xyxz(self) -> IVec4 {
+ IVec4::new(self.x, self.y, self.x, self.z)
+ }
+ #[inline]
+ fn xyyx(self) -> IVec4 {
+ IVec4::new(self.x, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn xyyy(self) -> IVec4 {
+ IVec4::new(self.x, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn xyyz(self) -> IVec4 {
+ IVec4::new(self.x, self.y, self.y, self.z)
+ }
+ #[inline]
+ fn xyzx(self) -> IVec4 {
+ IVec4::new(self.x, self.y, self.z, self.x)
+ }
+ #[inline]
+ fn xyzy(self) -> IVec4 {
+ IVec4::new(self.x, self.y, self.z, self.y)
+ }
+ #[inline]
+ fn xyzz(self) -> IVec4 {
+ IVec4::new(self.x, self.y, self.z, self.z)
+ }
+ #[inline]
+ fn xzxx(self) -> IVec4 {
+ IVec4::new(self.x, self.z, self.x, self.x)
+ }
+ #[inline]
+ fn xzxy(self) -> IVec4 {
+ IVec4::new(self.x, self.z, self.x, self.y)
+ }
+ #[inline]
+ fn xzxz(self) -> IVec4 {
+ IVec4::new(self.x, self.z, self.x, self.z)
+ }
+ #[inline]
+ fn xzyx(self) -> IVec4 {
+ IVec4::new(self.x, self.z, self.y, self.x)
+ }
+ #[inline]
+ fn xzyy(self) -> IVec4 {
+ IVec4::new(self.x, self.z, self.y, self.y)
+ }
+ #[inline]
+ fn xzyz(self) -> IVec4 {
+ IVec4::new(self.x, self.z, self.y, self.z)
+ }
+ #[inline]
+ fn xzzx(self) -> IVec4 {
+ IVec4::new(self.x, self.z, self.z, self.x)
+ }
+ #[inline]
+ fn xzzy(self) -> IVec4 {
+ IVec4::new(self.x, self.z, self.z, self.y)
+ }
+ #[inline]
+ fn xzzz(self) -> IVec4 {
+ IVec4::new(self.x, self.z, self.z, self.z)
+ }
+ #[inline]
+ fn yxxx(self) -> IVec4 {
+ IVec4::new(self.y, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn yxxy(self) -> IVec4 {
+ IVec4::new(self.y, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn yxxz(self) -> IVec4 {
+ IVec4::new(self.y, self.x, self.x, self.z)
+ }
+ #[inline]
+ fn yxyx(self) -> IVec4 {
+ IVec4::new(self.y, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn yxyy(self) -> IVec4 {
+ IVec4::new(self.y, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn yxyz(self) -> IVec4 {
+ IVec4::new(self.y, self.x, self.y, self.z)
+ }
+ #[inline]
+ fn yxzx(self) -> IVec4 {
+ IVec4::new(self.y, self.x, self.z, self.x)
+ }
+ #[inline]
+ fn yxzy(self) -> IVec4 {
+ IVec4::new(self.y, self.x, self.z, self.y)
+ }
+ #[inline]
+ fn yxzz(self) -> IVec4 {
+ IVec4::new(self.y, self.x, self.z, self.z)
+ }
+ #[inline]
+ fn yyxx(self) -> IVec4 {
+ IVec4::new(self.y, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn yyxy(self) -> IVec4 {
+ IVec4::new(self.y, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn yyxz(self) -> IVec4 {
+ IVec4::new(self.y, self.y, self.x, self.z)
+ }
+ #[inline]
+ fn yyyx(self) -> IVec4 {
+ IVec4::new(self.y, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn yyyy(self) -> IVec4 {
+ IVec4::new(self.y, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn yyyz(self) -> IVec4 {
+ IVec4::new(self.y, self.y, self.y, self.z)
+ }
+ #[inline]
+ fn yyzx(self) -> IVec4 {
+ IVec4::new(self.y, self.y, self.z, self.x)
+ }
+ #[inline]
+ fn yyzy(self) -> IVec4 {
+ IVec4::new(self.y, self.y, self.z, self.y)
+ }
+ #[inline]
+ fn yyzz(self) -> IVec4 {
+ IVec4::new(self.y, self.y, self.z, self.z)
+ }
+ #[inline]
+ fn yzxx(self) -> IVec4 {
+ IVec4::new(self.y, self.z, self.x, self.x)
+ }
+ #[inline]
+ fn yzxy(self) -> IVec4 {
+ IVec4::new(self.y, self.z, self.x, self.y)
+ }
+ #[inline]
+ fn yzxz(self) -> IVec4 {
+ IVec4::new(self.y, self.z, self.x, self.z)
+ }
+ #[inline]
+ fn yzyx(self) -> IVec4 {
+ IVec4::new(self.y, self.z, self.y, self.x)
+ }
+ #[inline]
+ fn yzyy(self) -> IVec4 {
+ IVec4::new(self.y, self.z, self.y, self.y)
+ }
+ #[inline]
+ fn yzyz(self) -> IVec4 {
+ IVec4::new(self.y, self.z, self.y, self.z)
+ }
+ #[inline]
+ fn yzzx(self) -> IVec4 {
+ IVec4::new(self.y, self.z, self.z, self.x)
+ }
+ #[inline]
+ fn yzzy(self) -> IVec4 {
+ IVec4::new(self.y, self.z, self.z, self.y)
+ }
+ #[inline]
+ fn yzzz(self) -> IVec4 {
+ IVec4::new(self.y, self.z, self.z, self.z)
+ }
+ #[inline]
+ fn zxxx(self) -> IVec4 {
+ IVec4::new(self.z, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn zxxy(self) -> IVec4 {
+ IVec4::new(self.z, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn zxxz(self) -> IVec4 {
+ IVec4::new(self.z, self.x, self.x, self.z)
+ }
+ #[inline]
+ fn zxyx(self) -> IVec4 {
+ IVec4::new(self.z, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn zxyy(self) -> IVec4 {
+ IVec4::new(self.z, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn zxyz(self) -> IVec4 {
+ IVec4::new(self.z, self.x, self.y, self.z)
+ }
+ #[inline]
+ fn zxzx(self) -> IVec4 {
+ IVec4::new(self.z, self.x, self.z, self.x)
+ }
+ #[inline]
+ fn zxzy(self) -> IVec4 {
+ IVec4::new(self.z, self.x, self.z, self.y)
+ }
+ #[inline]
+ fn zxzz(self) -> IVec4 {
+ IVec4::new(self.z, self.x, self.z, self.z)
+ }
+ #[inline]
+ fn zyxx(self) -> IVec4 {
+ IVec4::new(self.z, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn zyxy(self) -> IVec4 {
+ IVec4::new(self.z, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn zyxz(self) -> IVec4 {
+ IVec4::new(self.z, self.y, self.x, self.z)
+ }
+ #[inline]
+ fn zyyx(self) -> IVec4 {
+ IVec4::new(self.z, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn zyyy(self) -> IVec4 {
+ IVec4::new(self.z, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn zyyz(self) -> IVec4 {
+ IVec4::new(self.z, self.y, self.y, self.z)
+ }
+ #[inline]
+ fn zyzx(self) -> IVec4 {
+ IVec4::new(self.z, self.y, self.z, self.x)
+ }
+ #[inline]
+ fn zyzy(self) -> IVec4 {
+ IVec4::new(self.z, self.y, self.z, self.y)
+ }
+ #[inline]
+ fn zyzz(self) -> IVec4 {
+ IVec4::new(self.z, self.y, self.z, self.z)
+ }
+ #[inline]
+ fn zzxx(self) -> IVec4 {
+ IVec4::new(self.z, self.z, self.x, self.x)
+ }
+ #[inline]
+ fn zzxy(self) -> IVec4 {
+ IVec4::new(self.z, self.z, self.x, self.y)
+ }
+ #[inline]
+ fn zzxz(self) -> IVec4 {
+ IVec4::new(self.z, self.z, self.x, self.z)
+ }
+ #[inline]
+ fn zzyx(self) -> IVec4 {
+ IVec4::new(self.z, self.z, self.y, self.x)
+ }
+ #[inline]
+ fn zzyy(self) -> IVec4 {
+ IVec4::new(self.z, self.z, self.y, self.y)
+ }
+ #[inline]
+ fn zzyz(self) -> IVec4 {
+ IVec4::new(self.z, self.z, self.y, self.z)
+ }
+ #[inline]
+ fn zzzx(self) -> IVec4 {
+ IVec4::new(self.z, self.z, self.z, self.x)
+ }
+ #[inline]
+ fn zzzy(self) -> IVec4 {
+ IVec4::new(self.z, self.z, self.z, self.y)
+ }
+ #[inline]
+ fn zzzz(self) -> IVec4 {
+ IVec4::new(self.z, self.z, self.z, self.z)
+ }
+ #[inline]
+ fn xxx(self) -> Self {
+ Self::new(self.x, self.x, self.x)
+ }
+ #[inline]
+ fn xxy(self) -> Self {
+ Self::new(self.x, self.x, self.y)
+ }
+ #[inline]
+ fn xxz(self) -> Self {
+ Self::new(self.x, self.x, self.z)
+ }
+ #[inline]
+ fn xyx(self) -> Self {
+ Self::new(self.x, self.y, self.x)
+ }
+ #[inline]
+ fn xyy(self) -> Self {
+ Self::new(self.x, self.y, self.y)
+ }
+ #[inline]
+ fn xzx(self) -> Self {
+ Self::new(self.x, self.z, self.x)
+ }
+ #[inline]
+ fn xzy(self) -> Self {
+ Self::new(self.x, self.z, self.y)
+ }
+ #[inline]
+ fn xzz(self) -> Self {
+ Self::new(self.x, self.z, self.z)
+ }
+ #[inline]
+ fn yxx(self) -> Self {
+ Self::new(self.y, self.x, self.x)
+ }
+ #[inline]
+ fn yxy(self) -> Self {
+ Self::new(self.y, self.x, self.y)
+ }
+ #[inline]
+ fn yxz(self) -> Self {
+ Self::new(self.y, self.x, self.z)
+ }
+ #[inline]
+ fn yyx(self) -> Self {
+ Self::new(self.y, self.y, self.x)
+ }
+ #[inline]
+ fn yyy(self) -> Self {
+ Self::new(self.y, self.y, self.y)
+ }
+ #[inline]
+ fn yyz(self) -> Self {
+ Self::new(self.y, self.y, self.z)
+ }
+ #[inline]
+ fn yzx(self) -> Self {
+ Self::new(self.y, self.z, self.x)
+ }
+ #[inline]
+ fn yzy(self) -> Self {
+ Self::new(self.y, self.z, self.y)
+ }
+ #[inline]
+ fn yzz(self) -> Self {
+ Self::new(self.y, self.z, self.z)
+ }
+ #[inline]
+ fn zxx(self) -> Self {
+ Self::new(self.z, self.x, self.x)
+ }
+ #[inline]
+ fn zxy(self) -> Self {
+ Self::new(self.z, self.x, self.y)
+ }
+ #[inline]
+ fn zxz(self) -> Self {
+ Self::new(self.z, self.x, self.z)
+ }
+ #[inline]
+ fn zyx(self) -> Self {
+ Self::new(self.z, self.y, self.x)
+ }
+ #[inline]
+ fn zyy(self) -> Self {
+ Self::new(self.z, self.y, self.y)
+ }
+ #[inline]
+ fn zyz(self) -> Self {
+ Self::new(self.z, self.y, self.z)
+ }
+ #[inline]
+ fn zzx(self) -> Self {
+ Self::new(self.z, self.z, self.x)
+ }
+ #[inline]
+ fn zzy(self) -> Self {
+ Self::new(self.z, self.z, self.y)
+ }
+ #[inline]
+ fn zzz(self) -> Self {
+ Self::new(self.z, self.z, self.z)
+ }
+ #[inline]
+ fn xx(self) -> IVec2 {
+ IVec2::new(self.x, self.x)
+ }
+ #[inline]
+ fn xy(self) -> IVec2 {
+ IVec2::new(self.x, self.y)
+ }
+ #[inline]
+ fn xz(self) -> IVec2 {
+ IVec2::new(self.x, self.z)
+ }
+ #[inline]
+ fn yx(self) -> IVec2 {
+ IVec2::new(self.y, self.x)
+ }
+ #[inline]
+ fn yy(self) -> IVec2 {
+ IVec2::new(self.y, self.y)
+ }
+ #[inline]
+ fn yz(self) -> IVec2 {
+ IVec2::new(self.y, self.z)
+ }
+ #[inline]
+ fn zx(self) -> IVec2 {
+ IVec2::new(self.z, self.x)
+ }
+ #[inline]
+ fn zy(self) -> IVec2 {
+ IVec2::new(self.z, self.y)
+ }
+ #[inline]
+ fn zz(self) -> IVec2 {
+ IVec2::new(self.z, self.z)
+ }
+}
diff --git a/src/swizzles/ivec4_impl_scalar.rs b/src/swizzles/ivec4_impl_scalar.rs
new file mode 100644
index 0000000..a4985af
--- /dev/null
+++ b/src/swizzles/ivec4_impl_scalar.rs
@@ -0,0 +1,1350 @@
+// Generated by swizzlegen. Do not edit.
+
+use super::Vec4Swizzles;
+use crate::{IVec2, IVec3, IVec4};
+
+impl Vec4Swizzles for IVec4 {
+ type Vec2 = IVec2;
+ type Vec3 = IVec3;
+
+ #[inline]
+ fn xxxx(self) -> IVec4 {
+ IVec4::new(self.x, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn xxxy(self) -> IVec4 {
+ IVec4::new(self.x, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn xxxz(self) -> IVec4 {
+ IVec4::new(self.x, self.x, self.x, self.z)
+ }
+ #[inline]
+ fn xxxw(self) -> IVec4 {
+ IVec4::new(self.x, self.x, self.x, self.w)
+ }
+ #[inline]
+ fn xxyx(self) -> IVec4 {
+ IVec4::new(self.x, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn xxyy(self) -> IVec4 {
+ IVec4::new(self.x, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn xxyz(self) -> IVec4 {
+ IVec4::new(self.x, self.x, self.y, self.z)
+ }
+ #[inline]
+ fn xxyw(self) -> IVec4 {
+ IVec4::new(self.x, self.x, self.y, self.w)
+ }
+ #[inline]
+ fn xxzx(self) -> IVec4 {
+ IVec4::new(self.x, self.x, self.z, self.x)
+ }
+ #[inline]
+ fn xxzy(self) -> IVec4 {
+ IVec4::new(self.x, self.x, self.z, self.y)
+ }
+ #[inline]
+ fn xxzz(self) -> IVec4 {
+ IVec4::new(self.x, self.x, self.z, self.z)
+ }
+ #[inline]
+ fn xxzw(self) -> IVec4 {
+ IVec4::new(self.x, self.x, self.z, self.w)
+ }
+ #[inline]
+ fn xxwx(self) -> IVec4 {
+ IVec4::new(self.x, self.x, self.w, self.x)
+ }
+ #[inline]
+ fn xxwy(self) -> IVec4 {
+ IVec4::new(self.x, self.x, self.w, self.y)
+ }
+ #[inline]
+ fn xxwz(self) -> IVec4 {
+ IVec4::new(self.x, self.x, self.w, self.z)
+ }
+ #[inline]
+ fn xxww(self) -> IVec4 {
+ IVec4::new(self.x, self.x, self.w, self.w)
+ }
+ #[inline]
+ fn xyxx(self) -> IVec4 {
+ IVec4::new(self.x, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn xyxy(self) -> IVec4 {
+ IVec4::new(self.x, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn xyxz(self) -> IVec4 {
+ IVec4::new(self.x, self.y, self.x, self.z)
+ }
+ #[inline]
+ fn xyxw(self) -> IVec4 {
+ IVec4::new(self.x, self.y, self.x, self.w)
+ }
+ #[inline]
+ fn xyyx(self) -> IVec4 {
+ IVec4::new(self.x, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn xyyy(self) -> IVec4 {
+ IVec4::new(self.x, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn xyyz(self) -> IVec4 {
+ IVec4::new(self.x, self.y, self.y, self.z)
+ }
+ #[inline]
+ fn xyyw(self) -> IVec4 {
+ IVec4::new(self.x, self.y, self.y, self.w)
+ }
+ #[inline]
+ fn xyzx(self) -> IVec4 {
+ IVec4::new(self.x, self.y, self.z, self.x)
+ }
+ #[inline]
+ fn xyzy(self) -> IVec4 {
+ IVec4::new(self.x, self.y, self.z, self.y)
+ }
+ #[inline]
+ fn xyzz(self) -> IVec4 {
+ IVec4::new(self.x, self.y, self.z, self.z)
+ }
+ #[inline]
+ fn xywx(self) -> IVec4 {
+ IVec4::new(self.x, self.y, self.w, self.x)
+ }
+ #[inline]
+ fn xywy(self) -> IVec4 {
+ IVec4::new(self.x, self.y, self.w, self.y)
+ }
+ #[inline]
+ fn xywz(self) -> IVec4 {
+ IVec4::new(self.x, self.y, self.w, self.z)
+ }
+ #[inline]
+ fn xyww(self) -> IVec4 {
+ IVec4::new(self.x, self.y, self.w, self.w)
+ }
+ #[inline]
+ fn xzxx(self) -> IVec4 {
+ IVec4::new(self.x, self.z, self.x, self.x)
+ }
+ #[inline]
+ fn xzxy(self) -> IVec4 {
+ IVec4::new(self.x, self.z, self.x, self.y)
+ }
+ #[inline]
+ fn xzxz(self) -> IVec4 {
+ IVec4::new(self.x, self.z, self.x, self.z)
+ }
+ #[inline]
+ fn xzxw(self) -> IVec4 {
+ IVec4::new(self.x, self.z, self.x, self.w)
+ }
+ #[inline]
+ fn xzyx(self) -> IVec4 {
+ IVec4::new(self.x, self.z, self.y, self.x)
+ }
+ #[inline]
+ fn xzyy(self) -> IVec4 {
+ IVec4::new(self.x, self.z, self.y, self.y)
+ }
+ #[inline]
+ fn xzyz(self) -> IVec4 {
+ IVec4::new(self.x, self.z, self.y, self.z)
+ }
+ #[inline]
+ fn xzyw(self) -> IVec4 {
+ IVec4::new(self.x, self.z, self.y, self.w)
+ }
+ #[inline]
+ fn xzzx(self) -> IVec4 {
+ IVec4::new(self.x, self.z, self.z, self.x)
+ }
+ #[inline]
+ fn xzzy(self) -> IVec4 {
+ IVec4::new(self.x, self.z, self.z, self.y)
+ }
+ #[inline]
+ fn xzzz(self) -> IVec4 {
+ IVec4::new(self.x, self.z, self.z, self.z)
+ }
+ #[inline]
+ fn xzzw(self) -> IVec4 {
+ IVec4::new(self.x, self.z, self.z, self.w)
+ }
+ #[inline]
+ fn xzwx(self) -> IVec4 {
+ IVec4::new(self.x, self.z, self.w, self.x)
+ }
+ #[inline]
+ fn xzwy(self) -> IVec4 {
+ IVec4::new(self.x, self.z, self.w, self.y)
+ }
+ #[inline]
+ fn xzwz(self) -> IVec4 {
+ IVec4::new(self.x, self.z, self.w, self.z)
+ }
+ #[inline]
+ fn xzww(self) -> IVec4 {
+ IVec4::new(self.x, self.z, self.w, self.w)
+ }
+ #[inline]
+ fn xwxx(self) -> IVec4 {
+ IVec4::new(self.x, self.w, self.x, self.x)
+ }
+ #[inline]
+ fn xwxy(self) -> IVec4 {
+ IVec4::new(self.x, self.w, self.x, self.y)
+ }
+ #[inline]
+ fn xwxz(self) -> IVec4 {
+ IVec4::new(self.x, self.w, self.x, self.z)
+ }
+ #[inline]
+ fn xwxw(self) -> IVec4 {
+ IVec4::new(self.x, self.w, self.x, self.w)
+ }
+ #[inline]
+ fn xwyx(self) -> IVec4 {
+ IVec4::new(self.x, self.w, self.y, self.x)
+ }
+ #[inline]
+ fn xwyy(self) -> IVec4 {
+ IVec4::new(self.x, self.w, self.y, self.y)
+ }
+ #[inline]
+ fn xwyz(self) -> IVec4 {
+ IVec4::new(self.x, self.w, self.y, self.z)
+ }
+ #[inline]
+ fn xwyw(self) -> IVec4 {
+ IVec4::new(self.x, self.w, self.y, self.w)
+ }
+ #[inline]
+ fn xwzx(self) -> IVec4 {
+ IVec4::new(self.x, self.w, self.z, self.x)
+ }
+ #[inline]
+ fn xwzy(self) -> IVec4 {
+ IVec4::new(self.x, self.w, self.z, self.y)
+ }
+ #[inline]
+ fn xwzz(self) -> IVec4 {
+ IVec4::new(self.x, self.w, self.z, self.z)
+ }
+ #[inline]
+ fn xwzw(self) -> IVec4 {
+ IVec4::new(self.x, self.w, self.z, self.w)
+ }
+ #[inline]
+ fn xwwx(self) -> IVec4 {
+ IVec4::new(self.x, self.w, self.w, self.x)
+ }
+ #[inline]
+ fn xwwy(self) -> IVec4 {
+ IVec4::new(self.x, self.w, self.w, self.y)
+ }
+ #[inline]
+ fn xwwz(self) -> IVec4 {
+ IVec4::new(self.x, self.w, self.w, self.z)
+ }
+ #[inline]
+ fn xwww(self) -> IVec4 {
+ IVec4::new(self.x, self.w, self.w, self.w)
+ }
+ #[inline]
+ fn yxxx(self) -> IVec4 {
+ IVec4::new(self.y, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn yxxy(self) -> IVec4 {
+ IVec4::new(self.y, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn yxxz(self) -> IVec4 {
+ IVec4::new(self.y, self.x, self.x, self.z)
+ }
+ #[inline]
+ fn yxxw(self) -> IVec4 {
+ IVec4::new(self.y, self.x, self.x, self.w)
+ }
+ #[inline]
+ fn yxyx(self) -> IVec4 {
+ IVec4::new(self.y, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn yxyy(self) -> IVec4 {
+ IVec4::new(self.y, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn yxyz(self) -> IVec4 {
+ IVec4::new(self.y, self.x, self.y, self.z)
+ }
+ #[inline]
+ fn yxyw(self) -> IVec4 {
+ IVec4::new(self.y, self.x, self.y, self.w)
+ }
+ #[inline]
+ fn yxzx(self) -> IVec4 {
+ IVec4::new(self.y, self.x, self.z, self.x)
+ }
+ #[inline]
+ fn yxzy(self) -> IVec4 {
+ IVec4::new(self.y, self.x, self.z, self.y)
+ }
+ #[inline]
+ fn yxzz(self) -> IVec4 {
+ IVec4::new(self.y, self.x, self.z, self.z)
+ }
+ #[inline]
+ fn yxzw(self) -> IVec4 {
+ IVec4::new(self.y, self.x, self.z, self.w)
+ }
+ #[inline]
+ fn yxwx(self) -> IVec4 {
+ IVec4::new(self.y, self.x, self.w, self.x)
+ }
+ #[inline]
+ fn yxwy(self) -> IVec4 {
+ IVec4::new(self.y, self.x, self.w, self.y)
+ }
+ #[inline]
+ fn yxwz(self) -> IVec4 {
+ IVec4::new(self.y, self.x, self.w, self.z)
+ }
+ #[inline]
+ fn yxww(self) -> IVec4 {
+ IVec4::new(self.y, self.x, self.w, self.w)
+ }
+ #[inline]
+ fn yyxx(self) -> IVec4 {
+ IVec4::new(self.y, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn yyxy(self) -> IVec4 {
+ IVec4::new(self.y, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn yyxz(self) -> IVec4 {
+ IVec4::new(self.y, self.y, self.x, self.z)
+ }
+ #[inline]
+ fn yyxw(self) -> IVec4 {
+ IVec4::new(self.y, self.y, self.x, self.w)
+ }
+ #[inline]
+ fn yyyx(self) -> IVec4 {
+ IVec4::new(self.y, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn yyyy(self) -> IVec4 {
+ IVec4::new(self.y, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn yyyz(self) -> IVec4 {
+ IVec4::new(self.y, self.y, self.y, self.z)
+ }
+ #[inline]
+ fn yyyw(self) -> IVec4 {
+ IVec4::new(self.y, self.y, self.y, self.w)
+ }
+ #[inline]
+ fn yyzx(self) -> IVec4 {
+ IVec4::new(self.y, self.y, self.z, self.x)
+ }
+ #[inline]
+ fn yyzy(self) -> IVec4 {
+ IVec4::new(self.y, self.y, self.z, self.y)
+ }
+ #[inline]
+ fn yyzz(self) -> IVec4 {
+ IVec4::new(self.y, self.y, self.z, self.z)
+ }
+ #[inline]
+ fn yyzw(self) -> IVec4 {
+ IVec4::new(self.y, self.y, self.z, self.w)
+ }
+ #[inline]
+ fn yywx(self) -> IVec4 {
+ IVec4::new(self.y, self.y, self.w, self.x)
+ }
+ #[inline]
+ fn yywy(self) -> IVec4 {
+ IVec4::new(self.y, self.y, self.w, self.y)
+ }
+ #[inline]
+ fn yywz(self) -> IVec4 {
+ IVec4::new(self.y, self.y, self.w, self.z)
+ }
+ #[inline]
+ fn yyww(self) -> IVec4 {
+ IVec4::new(self.y, self.y, self.w, self.w)
+ }
+ #[inline]
+ fn yzxx(self) -> IVec4 {
+ IVec4::new(self.y, self.z, self.x, self.x)
+ }
+ #[inline]
+ fn yzxy(self) -> IVec4 {
+ IVec4::new(self.y, self.z, self.x, self.y)
+ }
+ #[inline]
+ fn yzxz(self) -> IVec4 {
+ IVec4::new(self.y, self.z, self.x, self.z)
+ }
+ #[inline]
+ fn yzxw(self) -> IVec4 {
+ IVec4::new(self.y, self.z, self.x, self.w)
+ }
+ #[inline]
+ fn yzyx(self) -> IVec4 {
+ IVec4::new(self.y, self.z, self.y, self.x)
+ }
+ #[inline]
+ fn yzyy(self) -> IVec4 {
+ IVec4::new(self.y, self.z, self.y, self.y)
+ }
+ #[inline]
+ fn yzyz(self) -> IVec4 {
+ IVec4::new(self.y, self.z, self.y, self.z)
+ }
+ #[inline]
+ fn yzyw(self) -> IVec4 {
+ IVec4::new(self.y, self.z, self.y, self.w)
+ }
+ #[inline]
+ fn yzzx(self) -> IVec4 {
+ IVec4::new(self.y, self.z, self.z, self.x)
+ }
+ #[inline]
+ fn yzzy(self) -> IVec4 {
+ IVec4::new(self.y, self.z, self.z, self.y)
+ }
+ #[inline]
+ fn yzzz(self) -> IVec4 {
+ IVec4::new(self.y, self.z, self.z, self.z)
+ }
+ #[inline]
+ fn yzzw(self) -> IVec4 {
+ IVec4::new(self.y, self.z, self.z, self.w)
+ }
+ #[inline]
+ fn yzwx(self) -> IVec4 {
+ IVec4::new(self.y, self.z, self.w, self.x)
+ }
+ #[inline]
+ fn yzwy(self) -> IVec4 {
+ IVec4::new(self.y, self.z, self.w, self.y)
+ }
+ #[inline]
+ fn yzwz(self) -> IVec4 {
+ IVec4::new(self.y, self.z, self.w, self.z)
+ }
+ #[inline]
+ fn yzww(self) -> IVec4 {
+ IVec4::new(self.y, self.z, self.w, self.w)
+ }
+ #[inline]
+ fn ywxx(self) -> IVec4 {
+ IVec4::new(self.y, self.w, self.x, self.x)
+ }
+ #[inline]
+ fn ywxy(self) -> IVec4 {
+ IVec4::new(self.y, self.w, self.x, self.y)
+ }
+ #[inline]
+ fn ywxz(self) -> IVec4 {
+ IVec4::new(self.y, self.w, self.x, self.z)
+ }
+ #[inline]
+ fn ywxw(self) -> IVec4 {
+ IVec4::new(self.y, self.w, self.x, self.w)
+ }
+ #[inline]
+ fn ywyx(self) -> IVec4 {
+ IVec4::new(self.y, self.w, self.y, self.x)
+ }
+ #[inline]
+ fn ywyy(self) -> IVec4 {
+ IVec4::new(self.y, self.w, self.y, self.y)
+ }
+ #[inline]
+ fn ywyz(self) -> IVec4 {
+ IVec4::new(self.y, self.w, self.y, self.z)
+ }
+ #[inline]
+ fn ywyw(self) -> IVec4 {
+ IVec4::new(self.y, self.w, self.y, self.w)
+ }
+ #[inline]
+ fn ywzx(self) -> IVec4 {
+ IVec4::new(self.y, self.w, self.z, self.x)
+ }
+ #[inline]
+ fn ywzy(self) -> IVec4 {
+ IVec4::new(self.y, self.w, self.z, self.y)
+ }
+ #[inline]
+ fn ywzz(self) -> IVec4 {
+ IVec4::new(self.y, self.w, self.z, self.z)
+ }
+ #[inline]
+ fn ywzw(self) -> IVec4 {
+ IVec4::new(self.y, self.w, self.z, self.w)
+ }
+ #[inline]
+ fn ywwx(self) -> IVec4 {
+ IVec4::new(self.y, self.w, self.w, self.x)
+ }
+ #[inline]
+ fn ywwy(self) -> IVec4 {
+ IVec4::new(self.y, self.w, self.w, self.y)
+ }
+ #[inline]
+ fn ywwz(self) -> IVec4 {
+ IVec4::new(self.y, self.w, self.w, self.z)
+ }
+ #[inline]
+ fn ywww(self) -> IVec4 {
+ IVec4::new(self.y, self.w, self.w, self.w)
+ }
+ #[inline]
+ fn zxxx(self) -> IVec4 {
+ IVec4::new(self.z, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn zxxy(self) -> IVec4 {
+ IVec4::new(self.z, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn zxxz(self) -> IVec4 {
+ IVec4::new(self.z, self.x, self.x, self.z)
+ }
+ #[inline]
+ fn zxxw(self) -> IVec4 {
+ IVec4::new(self.z, self.x, self.x, self.w)
+ }
+ #[inline]
+ fn zxyx(self) -> IVec4 {
+ IVec4::new(self.z, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn zxyy(self) -> IVec4 {
+ IVec4::new(self.z, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn zxyz(self) -> IVec4 {
+ IVec4::new(self.z, self.x, self.y, self.z)
+ }
+ #[inline]
+ fn zxyw(self) -> IVec4 {
+ IVec4::new(self.z, self.x, self.y, self.w)
+ }
+ #[inline]
+ fn zxzx(self) -> IVec4 {
+ IVec4::new(self.z, self.x, self.z, self.x)
+ }
+ #[inline]
+ fn zxzy(self) -> IVec4 {
+ IVec4::new(self.z, self.x, self.z, self.y)
+ }
+ #[inline]
+ fn zxzz(self) -> IVec4 {
+ IVec4::new(self.z, self.x, self.z, self.z)
+ }
+ #[inline]
+ fn zxzw(self) -> IVec4 {
+ IVec4::new(self.z, self.x, self.z, self.w)
+ }
+ #[inline]
+ fn zxwx(self) -> IVec4 {
+ IVec4::new(self.z, self.x, self.w, self.x)
+ }
+ #[inline]
+ fn zxwy(self) -> IVec4 {
+ IVec4::new(self.z, self.x, self.w, self.y)
+ }
+ #[inline]
+ fn zxwz(self) -> IVec4 {
+ IVec4::new(self.z, self.x, self.w, self.z)
+ }
+ #[inline]
+ fn zxww(self) -> IVec4 {
+ IVec4::new(self.z, self.x, self.w, self.w)
+ }
+ #[inline]
+ fn zyxx(self) -> IVec4 {
+ IVec4::new(self.z, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn zyxy(self) -> IVec4 {
+ IVec4::new(self.z, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn zyxz(self) -> IVec4 {
+ IVec4::new(self.z, self.y, self.x, self.z)
+ }
+ #[inline]
+ fn zyxw(self) -> IVec4 {
+ IVec4::new(self.z, self.y, self.x, self.w)
+ }
+ #[inline]
+ fn zyyx(self) -> IVec4 {
+ IVec4::new(self.z, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn zyyy(self) -> IVec4 {
+ IVec4::new(self.z, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn zyyz(self) -> IVec4 {
+ IVec4::new(self.z, self.y, self.y, self.z)
+ }
+ #[inline]
+ fn zyyw(self) -> IVec4 {
+ IVec4::new(self.z, self.y, self.y, self.w)
+ }
+ #[inline]
+ fn zyzx(self) -> IVec4 {
+ IVec4::new(self.z, self.y, self.z, self.x)
+ }
+ #[inline]
+ fn zyzy(self) -> IVec4 {
+ IVec4::new(self.z, self.y, self.z, self.y)
+ }
+ #[inline]
+ fn zyzz(self) -> IVec4 {
+ IVec4::new(self.z, self.y, self.z, self.z)
+ }
+ #[inline]
+ fn zyzw(self) -> IVec4 {
+ IVec4::new(self.z, self.y, self.z, self.w)
+ }
+ #[inline]
+ fn zywx(self) -> IVec4 {
+ IVec4::new(self.z, self.y, self.w, self.x)
+ }
+ #[inline]
+ fn zywy(self) -> IVec4 {
+ IVec4::new(self.z, self.y, self.w, self.y)
+ }
+ #[inline]
+ fn zywz(self) -> IVec4 {
+ IVec4::new(self.z, self.y, self.w, self.z)
+ }
+ #[inline]
+ fn zyww(self) -> IVec4 {
+ IVec4::new(self.z, self.y, self.w, self.w)
+ }
+ #[inline]
+ fn zzxx(self) -> IVec4 {
+ IVec4::new(self.z, self.z, self.x, self.x)
+ }
+ #[inline]
+ fn zzxy(self) -> IVec4 {
+ IVec4::new(self.z, self.z, self.x, self.y)
+ }
+ #[inline]
+ fn zzxz(self) -> IVec4 {
+ IVec4::new(self.z, self.z, self.x, self.z)
+ }
+ #[inline]
+ fn zzxw(self) -> IVec4 {
+ IVec4::new(self.z, self.z, self.x, self.w)
+ }
+ #[inline]
+ fn zzyx(self) -> IVec4 {
+ IVec4::new(self.z, self.z, self.y, self.x)
+ }
+ #[inline]
+ fn zzyy(self) -> IVec4 {
+ IVec4::new(self.z, self.z, self.y, self.y)
+ }
+ #[inline]
+ fn zzyz(self) -> IVec4 {
+ IVec4::new(self.z, self.z, self.y, self.z)
+ }
+ #[inline]
+ fn zzyw(self) -> IVec4 {
+ IVec4::new(self.z, self.z, self.y, self.w)
+ }
+ #[inline]
+ fn zzzx(self) -> IVec4 {
+ IVec4::new(self.z, self.z, self.z, self.x)
+ }
+ #[inline]
+ fn zzzy(self) -> IVec4 {
+ IVec4::new(self.z, self.z, self.z, self.y)
+ }
+ #[inline]
+ fn zzzz(self) -> IVec4 {
+ IVec4::new(self.z, self.z, self.z, self.z)
+ }
+ #[inline]
+ fn zzzw(self) -> IVec4 {
+ IVec4::new(self.z, self.z, self.z, self.w)
+ }
+ #[inline]
+ fn zzwx(self) -> IVec4 {
+ IVec4::new(self.z, self.z, self.w, self.x)
+ }
+ #[inline]
+ fn zzwy(self) -> IVec4 {
+ IVec4::new(self.z, self.z, self.w, self.y)
+ }
+ #[inline]
+ fn zzwz(self) -> IVec4 {
+ IVec4::new(self.z, self.z, self.w, self.z)
+ }
+ #[inline]
+ fn zzww(self) -> IVec4 {
+ IVec4::new(self.z, self.z, self.w, self.w)
+ }
+ #[inline]
+ fn zwxx(self) -> IVec4 {
+ IVec4::new(self.z, self.w, self.x, self.x)
+ }
+ #[inline]
+ fn zwxy(self) -> IVec4 {
+ IVec4::new(self.z, self.w, self.x, self.y)
+ }
+ #[inline]
+ fn zwxz(self) -> IVec4 {
+ IVec4::new(self.z, self.w, self.x, self.z)
+ }
+ #[inline]
+ fn zwxw(self) -> IVec4 {
+ IVec4::new(self.z, self.w, self.x, self.w)
+ }
+ #[inline]
+ fn zwyx(self) -> IVec4 {
+ IVec4::new(self.z, self.w, self.y, self.x)
+ }
+ #[inline]
+ fn zwyy(self) -> IVec4 {
+ IVec4::new(self.z, self.w, self.y, self.y)
+ }
+ #[inline]
+ fn zwyz(self) -> IVec4 {
+ IVec4::new(self.z, self.w, self.y, self.z)
+ }
+ #[inline]
+ fn zwyw(self) -> IVec4 {
+ IVec4::new(self.z, self.w, self.y, self.w)
+ }
+ #[inline]
+ fn zwzx(self) -> IVec4 {
+ IVec4::new(self.z, self.w, self.z, self.x)
+ }
+ #[inline]
+ fn zwzy(self) -> IVec4 {
+ IVec4::new(self.z, self.w, self.z, self.y)
+ }
+ #[inline]
+ fn zwzz(self) -> IVec4 {
+ IVec4::new(self.z, self.w, self.z, self.z)
+ }
+ #[inline]
+ fn zwzw(self) -> IVec4 {
+ IVec4::new(self.z, self.w, self.z, self.w)
+ }
+ #[inline]
+ fn zwwx(self) -> IVec4 {
+ IVec4::new(self.z, self.w, self.w, self.x)
+ }
+ #[inline]
+ fn zwwy(self) -> IVec4 {
+ IVec4::new(self.z, self.w, self.w, self.y)
+ }
+ #[inline]
+ fn zwwz(self) -> IVec4 {
+ IVec4::new(self.z, self.w, self.w, self.z)
+ }
+ #[inline]
+ fn zwww(self) -> IVec4 {
+ IVec4::new(self.z, self.w, self.w, self.w)
+ }
+ #[inline]
+ fn wxxx(self) -> IVec4 {
+ IVec4::new(self.w, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn wxxy(self) -> IVec4 {
+ IVec4::new(self.w, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn wxxz(self) -> IVec4 {
+ IVec4::new(self.w, self.x, self.x, self.z)
+ }
+ #[inline]
+ fn wxxw(self) -> IVec4 {
+ IVec4::new(self.w, self.x, self.x, self.w)
+ }
+ #[inline]
+ fn wxyx(self) -> IVec4 {
+ IVec4::new(self.w, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn wxyy(self) -> IVec4 {
+ IVec4::new(self.w, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn wxyz(self) -> IVec4 {
+ IVec4::new(self.w, self.x, self.y, self.z)
+ }
+ #[inline]
+ fn wxyw(self) -> IVec4 {
+ IVec4::new(self.w, self.x, self.y, self.w)
+ }
+ #[inline]
+ fn wxzx(self) -> IVec4 {
+ IVec4::new(self.w, self.x, self.z, self.x)
+ }
+ #[inline]
+ fn wxzy(self) -> IVec4 {
+ IVec4::new(self.w, self.x, self.z, self.y)
+ }
+ #[inline]
+ fn wxzz(self) -> IVec4 {
+ IVec4::new(self.w, self.x, self.z, self.z)
+ }
+ #[inline]
+ fn wxzw(self) -> IVec4 {
+ IVec4::new(self.w, self.x, self.z, self.w)
+ }
+ #[inline]
+ fn wxwx(self) -> IVec4 {
+ IVec4::new(self.w, self.x, self.w, self.x)
+ }
+ #[inline]
+ fn wxwy(self) -> IVec4 {
+ IVec4::new(self.w, self.x, self.w, self.y)
+ }
+ #[inline]
+ fn wxwz(self) -> IVec4 {
+ IVec4::new(self.w, self.x, self.w, self.z)
+ }
+ #[inline]
+ fn wxww(self) -> IVec4 {
+ IVec4::new(self.w, self.x, self.w, self.w)
+ }
+ #[inline]
+ fn wyxx(self) -> IVec4 {
+ IVec4::new(self.w, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn wyxy(self) -> IVec4 {
+ IVec4::new(self.w, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn wyxz(self) -> IVec4 {
+ IVec4::new(self.w, self.y, self.x, self.z)
+ }
+ #[inline]
+ fn wyxw(self) -> IVec4 {
+ IVec4::new(self.w, self.y, self.x, self.w)
+ }
+ #[inline]
+ fn wyyx(self) -> IVec4 {
+ IVec4::new(self.w, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn wyyy(self) -> IVec4 {
+ IVec4::new(self.w, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn wyyz(self) -> IVec4 {
+ IVec4::new(self.w, self.y, self.y, self.z)
+ }
+ #[inline]
+ fn wyyw(self) -> IVec4 {
+ IVec4::new(self.w, self.y, self.y, self.w)
+ }
+ #[inline]
+ fn wyzx(self) -> IVec4 {
+ IVec4::new(self.w, self.y, self.z, self.x)
+ }
+ #[inline]
+ fn wyzy(self) -> IVec4 {
+ IVec4::new(self.w, self.y, self.z, self.y)
+ }
+ #[inline]
+ fn wyzz(self) -> IVec4 {
+ IVec4::new(self.w, self.y, self.z, self.z)
+ }
+ #[inline]
+ fn wyzw(self) -> IVec4 {
+ IVec4::new(self.w, self.y, self.z, self.w)
+ }
+ #[inline]
+ fn wywx(self) -> IVec4 {
+ IVec4::new(self.w, self.y, self.w, self.x)
+ }
+ #[inline]
+ fn wywy(self) -> IVec4 {
+ IVec4::new(self.w, self.y, self.w, self.y)
+ }
+ #[inline]
+ fn wywz(self) -> IVec4 {
+ IVec4::new(self.w, self.y, self.w, self.z)
+ }
+ #[inline]
+ fn wyww(self) -> IVec4 {
+ IVec4::new(self.w, self.y, self.w, self.w)
+ }
+ #[inline]
+ fn wzxx(self) -> IVec4 {
+ IVec4::new(self.w, self.z, self.x, self.x)
+ }
+ #[inline]
+ fn wzxy(self) -> IVec4 {
+ IVec4::new(self.w, self.z, self.x, self.y)
+ }
+ #[inline]
+ fn wzxz(self) -> IVec4 {
+ IVec4::new(self.w, self.z, self.x, self.z)
+ }
+ #[inline]
+ fn wzxw(self) -> IVec4 {
+ IVec4::new(self.w, self.z, self.x, self.w)
+ }
+ #[inline]
+ fn wzyx(self) -> IVec4 {
+ IVec4::new(self.w, self.z, self.y, self.x)
+ }
+ #[inline]
+ fn wzyy(self) -> IVec4 {
+ IVec4::new(self.w, self.z, self.y, self.y)
+ }
+ #[inline]
+ fn wzyz(self) -> IVec4 {
+ IVec4::new(self.w, self.z, self.y, self.z)
+ }
+ #[inline]
+ fn wzyw(self) -> IVec4 {
+ IVec4::new(self.w, self.z, self.y, self.w)
+ }
+ #[inline]
+ fn wzzx(self) -> IVec4 {
+ IVec4::new(self.w, self.z, self.z, self.x)
+ }
+ #[inline]
+ fn wzzy(self) -> IVec4 {
+ IVec4::new(self.w, self.z, self.z, self.y)
+ }
+ #[inline]
+ fn wzzz(self) -> IVec4 {
+ IVec4::new(self.w, self.z, self.z, self.z)
+ }
+ #[inline]
+ fn wzzw(self) -> IVec4 {
+ IVec4::new(self.w, self.z, self.z, self.w)
+ }
+ #[inline]
+ fn wzwx(self) -> IVec4 {
+ IVec4::new(self.w, self.z, self.w, self.x)
+ }
+ #[inline]
+ fn wzwy(self) -> IVec4 {
+ IVec4::new(self.w, self.z, self.w, self.y)
+ }
+ #[inline]
+ fn wzwz(self) -> IVec4 {
+ IVec4::new(self.w, self.z, self.w, self.z)
+ }
+ #[inline]
+ fn wzww(self) -> IVec4 {
+ IVec4::new(self.w, self.z, self.w, self.w)
+ }
+ #[inline]
+ fn wwxx(self) -> IVec4 {
+ IVec4::new(self.w, self.w, self.x, self.x)
+ }
+ #[inline]
+ fn wwxy(self) -> IVec4 {
+ IVec4::new(self.w, self.w, self.x, self.y)
+ }
+ #[inline]
+ fn wwxz(self) -> IVec4 {
+ IVec4::new(self.w, self.w, self.x, self.z)
+ }
+ #[inline]
+ fn wwxw(self) -> IVec4 {
+ IVec4::new(self.w, self.w, self.x, self.w)
+ }
+ #[inline]
+ fn wwyx(self) -> IVec4 {
+ IVec4::new(self.w, self.w, self.y, self.x)
+ }
+ #[inline]
+ fn wwyy(self) -> IVec4 {
+ IVec4::new(self.w, self.w, self.y, self.y)
+ }
+ #[inline]
+ fn wwyz(self) -> IVec4 {
+ IVec4::new(self.w, self.w, self.y, self.z)
+ }
+ #[inline]
+ fn wwyw(self) -> IVec4 {
+ IVec4::new(self.w, self.w, self.y, self.w)
+ }
+ #[inline]
+ fn wwzx(self) -> IVec4 {
+ IVec4::new(self.w, self.w, self.z, self.x)
+ }
+ #[inline]
+ fn wwzy(self) -> IVec4 {
+ IVec4::new(self.w, self.w, self.z, self.y)
+ }
+ #[inline]
+ fn wwzz(self) -> IVec4 {
+ IVec4::new(self.w, self.w, self.z, self.z)
+ }
+ #[inline]
+ fn wwzw(self) -> IVec4 {
+ IVec4::new(self.w, self.w, self.z, self.w)
+ }
+ #[inline]
+ fn wwwx(self) -> IVec4 {
+ IVec4::new(self.w, self.w, self.w, self.x)
+ }
+ #[inline]
+ fn wwwy(self) -> IVec4 {
+ IVec4::new(self.w, self.w, self.w, self.y)
+ }
+ #[inline]
+ fn wwwz(self) -> IVec4 {
+ IVec4::new(self.w, self.w, self.w, self.z)
+ }
+ #[inline]
+ fn wwww(self) -> IVec4 {
+ IVec4::new(self.w, self.w, self.w, self.w)
+ }
+ #[inline]
+ fn xxx(self) -> IVec3 {
+ IVec3::new(self.x, self.x, self.x)
+ }
+ #[inline]
+ fn xxy(self) -> IVec3 {
+ IVec3::new(self.x, self.x, self.y)
+ }
+ #[inline]
+ fn xxz(self) -> IVec3 {
+ IVec3::new(self.x, self.x, self.z)
+ }
+ #[inline]
+ fn xxw(self) -> IVec3 {
+ IVec3::new(self.x, self.x, self.w)
+ }
+ #[inline]
+ fn xyx(self) -> IVec3 {
+ IVec3::new(self.x, self.y, self.x)
+ }
+ #[inline]
+ fn xyy(self) -> IVec3 {
+ IVec3::new(self.x, self.y, self.y)
+ }
+ #[inline]
+ fn xyz(self) -> IVec3 {
+ IVec3::new(self.x, self.y, self.z)
+ }
+ #[inline]
+ fn xyw(self) -> IVec3 {
+ IVec3::new(self.x, self.y, self.w)
+ }
+ #[inline]
+ fn xzx(self) -> IVec3 {
+ IVec3::new(self.x, self.z, self.x)
+ }
+ #[inline]
+ fn xzy(self) -> IVec3 {
+ IVec3::new(self.x, self.z, self.y)
+ }
+ #[inline]
+ fn xzz(self) -> IVec3 {
+ IVec3::new(self.x, self.z, self.z)
+ }
+ #[inline]
+ fn xzw(self) -> IVec3 {
+ IVec3::new(self.x, self.z, self.w)
+ }
+ #[inline]
+ fn xwx(self) -> IVec3 {
+ IVec3::new(self.x, self.w, self.x)
+ }
+ #[inline]
+ fn xwy(self) -> IVec3 {
+ IVec3::new(self.x, self.w, self.y)
+ }
+ #[inline]
+ fn xwz(self) -> IVec3 {
+ IVec3::new(self.x, self.w, self.z)
+ }
+ #[inline]
+ fn xww(self) -> IVec3 {
+ IVec3::new(self.x, self.w, self.w)
+ }
+ #[inline]
+ fn yxx(self) -> IVec3 {
+ IVec3::new(self.y, self.x, self.x)
+ }
+ #[inline]
+ fn yxy(self) -> IVec3 {
+ IVec3::new(self.y, self.x, self.y)
+ }
+ #[inline]
+ fn yxz(self) -> IVec3 {
+ IVec3::new(self.y, self.x, self.z)
+ }
+ #[inline]
+ fn yxw(self) -> IVec3 {
+ IVec3::new(self.y, self.x, self.w)
+ }
+ #[inline]
+ fn yyx(self) -> IVec3 {
+ IVec3::new(self.y, self.y, self.x)
+ }
+ #[inline]
+ fn yyy(self) -> IVec3 {
+ IVec3::new(self.y, self.y, self.y)
+ }
+ #[inline]
+ fn yyz(self) -> IVec3 {
+ IVec3::new(self.y, self.y, self.z)
+ }
+ #[inline]
+ fn yyw(self) -> IVec3 {
+ IVec3::new(self.y, self.y, self.w)
+ }
+ #[inline]
+ fn yzx(self) -> IVec3 {
+ IVec3::new(self.y, self.z, self.x)
+ }
+ #[inline]
+ fn yzy(self) -> IVec3 {
+ IVec3::new(self.y, self.z, self.y)
+ }
+ #[inline]
+ fn yzz(self) -> IVec3 {
+ IVec3::new(self.y, self.z, self.z)
+ }
+ #[inline]
+ fn yzw(self) -> IVec3 {
+ IVec3::new(self.y, self.z, self.w)
+ }
+ #[inline]
+ fn ywx(self) -> IVec3 {
+ IVec3::new(self.y, self.w, self.x)
+ }
+ #[inline]
+ fn ywy(self) -> IVec3 {
+ IVec3::new(self.y, self.w, self.y)
+ }
+ #[inline]
+ fn ywz(self) -> IVec3 {
+ IVec3::new(self.y, self.w, self.z)
+ }
+ #[inline]
+ fn yww(self) -> IVec3 {
+ IVec3::new(self.y, self.w, self.w)
+ }
+ #[inline]
+ fn zxx(self) -> IVec3 {
+ IVec3::new(self.z, self.x, self.x)
+ }
+ #[inline]
+ fn zxy(self) -> IVec3 {
+ IVec3::new(self.z, self.x, self.y)
+ }
+ #[inline]
+ fn zxz(self) -> IVec3 {
+ IVec3::new(self.z, self.x, self.z)
+ }
+ #[inline]
+ fn zxw(self) -> IVec3 {
+ IVec3::new(self.z, self.x, self.w)
+ }
+ #[inline]
+ fn zyx(self) -> IVec3 {
+ IVec3::new(self.z, self.y, self.x)
+ }
+ #[inline]
+ fn zyy(self) -> IVec3 {
+ IVec3::new(self.z, self.y, self.y)
+ }
+ #[inline]
+ fn zyz(self) -> IVec3 {
+ IVec3::new(self.z, self.y, self.z)
+ }
+ #[inline]
+ fn zyw(self) -> IVec3 {
+ IVec3::new(self.z, self.y, self.w)
+ }
+ #[inline]
+ fn zzx(self) -> IVec3 {
+ IVec3::new(self.z, self.z, self.x)
+ }
+ #[inline]
+ fn zzy(self) -> IVec3 {
+ IVec3::new(self.z, self.z, self.y)
+ }
+ #[inline]
+ fn zzz(self) -> IVec3 {
+ IVec3::new(self.z, self.z, self.z)
+ }
+ #[inline]
+ fn zzw(self) -> IVec3 {
+ IVec3::new(self.z, self.z, self.w)
+ }
+ #[inline]
+ fn zwx(self) -> IVec3 {
+ IVec3::new(self.z, self.w, self.x)
+ }
+ #[inline]
+ fn zwy(self) -> IVec3 {
+ IVec3::new(self.z, self.w, self.y)
+ }
+ #[inline]
+ fn zwz(self) -> IVec3 {
+ IVec3::new(self.z, self.w, self.z)
+ }
+ #[inline]
+ fn zww(self) -> IVec3 {
+ IVec3::new(self.z, self.w, self.w)
+ }
+ #[inline]
+ fn wxx(self) -> IVec3 {
+ IVec3::new(self.w, self.x, self.x)
+ }
+ #[inline]
+ fn wxy(self) -> IVec3 {
+ IVec3::new(self.w, self.x, self.y)
+ }
+ #[inline]
+ fn wxz(self) -> IVec3 {
+ IVec3::new(self.w, self.x, self.z)
+ }
+ #[inline]
+ fn wxw(self) -> IVec3 {
+ IVec3::new(self.w, self.x, self.w)
+ }
+ #[inline]
+ fn wyx(self) -> IVec3 {
+ IVec3::new(self.w, self.y, self.x)
+ }
+ #[inline]
+ fn wyy(self) -> IVec3 {
+ IVec3::new(self.w, self.y, self.y)
+ }
+ #[inline]
+ fn wyz(self) -> IVec3 {
+ IVec3::new(self.w, self.y, self.z)
+ }
+ #[inline]
+ fn wyw(self) -> IVec3 {
+ IVec3::new(self.w, self.y, self.w)
+ }
+ #[inline]
+ fn wzx(self) -> IVec3 {
+ IVec3::new(self.w, self.z, self.x)
+ }
+ #[inline]
+ fn wzy(self) -> IVec3 {
+ IVec3::new(self.w, self.z, self.y)
+ }
+ #[inline]
+ fn wzz(self) -> IVec3 {
+ IVec3::new(self.w, self.z, self.z)
+ }
+ #[inline]
+ fn wzw(self) -> IVec3 {
+ IVec3::new(self.w, self.z, self.w)
+ }
+ #[inline]
+ fn wwx(self) -> IVec3 {
+ IVec3::new(self.w, self.w, self.x)
+ }
+ #[inline]
+ fn wwy(self) -> IVec3 {
+ IVec3::new(self.w, self.w, self.y)
+ }
+ #[inline]
+ fn wwz(self) -> IVec3 {
+ IVec3::new(self.w, self.w, self.z)
+ }
+ #[inline]
+ fn www(self) -> IVec3 {
+ IVec3::new(self.w, self.w, self.w)
+ }
+ #[inline]
+ fn xx(self) -> IVec2 {
+ IVec2::new(self.x, self.x)
+ }
+ #[inline]
+ fn xy(self) -> IVec2 {
+ IVec2::new(self.x, self.y)
+ }
+ #[inline]
+ fn xz(self) -> IVec2 {
+ IVec2::new(self.x, self.z)
+ }
+ #[inline]
+ fn xw(self) -> IVec2 {
+ IVec2::new(self.x, self.w)
+ }
+ #[inline]
+ fn yx(self) -> IVec2 {
+ IVec2::new(self.y, self.x)
+ }
+ #[inline]
+ fn yy(self) -> IVec2 {
+ IVec2::new(self.y, self.y)
+ }
+ #[inline]
+ fn yz(self) -> IVec2 {
+ IVec2::new(self.y, self.z)
+ }
+ #[inline]
+ fn yw(self) -> IVec2 {
+ IVec2::new(self.y, self.w)
+ }
+ #[inline]
+ fn zx(self) -> IVec2 {
+ IVec2::new(self.z, self.x)
+ }
+ #[inline]
+ fn zy(self) -> IVec2 {
+ IVec2::new(self.z, self.y)
+ }
+ #[inline]
+ fn zz(self) -> IVec2 {
+ IVec2::new(self.z, self.z)
+ }
+ #[inline]
+ fn zw(self) -> IVec2 {
+ IVec2::new(self.z, self.w)
+ }
+ #[inline]
+ fn wx(self) -> IVec2 {
+ IVec2::new(self.w, self.x)
+ }
+ #[inline]
+ fn wy(self) -> IVec2 {
+ IVec2::new(self.w, self.y)
+ }
+ #[inline]
+ fn wz(self) -> IVec2 {
+ IVec2::new(self.w, self.z)
+ }
+ #[inline]
+ fn ww(self) -> IVec2 {
+ IVec2::new(self.w, self.w)
+ }
+}
diff --git a/src/swizzles/mod.rs b/src/swizzles/mod.rs
new file mode 100644
index 0000000..d1dff7a
--- /dev/null
+++ b/src/swizzles/mod.rs
@@ -0,0 +1,35 @@
+mod dvec2_impl_scalar;
+mod dvec3_impl_scalar;
+mod dvec4_impl_scalar;
+
+mod ivec2_impl_scalar;
+mod ivec3_impl_scalar;
+mod ivec4_impl_scalar;
+
+mod uvec2_impl_scalar;
+mod uvec3_impl_scalar;
+mod uvec4_impl_scalar;
+
+mod vec2_impl_scalar;
+mod vec3_impl_scalar;
+#[cfg(any(
+ not(any(target_feature = "sse2", target_feature = "simd128")),
+ feature = "scalar-math"
+))]
+mod vec3a_impl_scalar;
+#[cfg(all(target_feature = "sse2", not(feature = "scalar-math")))]
+mod vec3a_impl_sse2;
+#[cfg(all(target_feature = "simd128", not(feature = "scalar-math")))]
+mod vec3a_impl_wasm32;
+#[cfg(any(
+ not(any(target_feature = "sse2", target_feature = "simd128")),
+ feature = "scalar-math"
+))]
+mod vec4_impl_scalar;
+#[cfg(all(target_feature = "sse2", not(feature = "scalar-math")))]
+mod vec4_impl_sse2;
+#[cfg(all(target_feature = "simd128", not(feature = "scalar-math")))]
+mod vec4_impl_wasm32;
+mod vec_traits;
+
+pub use vec_traits::*;
diff --git a/src/swizzles/uvec2_impl_scalar.rs b/src/swizzles/uvec2_impl_scalar.rs
new file mode 100644
index 0000000..60ba857
--- /dev/null
+++ b/src/swizzles/uvec2_impl_scalar.rs
@@ -0,0 +1,118 @@
+// Generated by swizzlegen. Do not edit.
+
+use super::Vec2Swizzles;
+use crate::{UVec2, UVec3, UVec4};
+
+impl Vec2Swizzles for UVec2 {
+ type Vec3 = UVec3;
+ type Vec4 = UVec4;
+
+ #[inline]
+ fn xxxx(self) -> UVec4 {
+ UVec4::new(self.x, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn xxxy(self) -> UVec4 {
+ UVec4::new(self.x, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn xxyx(self) -> UVec4 {
+ UVec4::new(self.x, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn xxyy(self) -> UVec4 {
+ UVec4::new(self.x, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn xyxx(self) -> UVec4 {
+ UVec4::new(self.x, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn xyxy(self) -> UVec4 {
+ UVec4::new(self.x, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn xyyx(self) -> UVec4 {
+ UVec4::new(self.x, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn xyyy(self) -> UVec4 {
+ UVec4::new(self.x, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn yxxx(self) -> UVec4 {
+ UVec4::new(self.y, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn yxxy(self) -> UVec4 {
+ UVec4::new(self.y, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn yxyx(self) -> UVec4 {
+ UVec4::new(self.y, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn yxyy(self) -> UVec4 {
+ UVec4::new(self.y, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn yyxx(self) -> UVec4 {
+ UVec4::new(self.y, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn yyxy(self) -> UVec4 {
+ UVec4::new(self.y, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn yyyx(self) -> UVec4 {
+ UVec4::new(self.y, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn yyyy(self) -> UVec4 {
+ UVec4::new(self.y, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn xxx(self) -> UVec3 {
+ UVec3::new(self.x, self.x, self.x)
+ }
+ #[inline]
+ fn xxy(self) -> UVec3 {
+ UVec3::new(self.x, self.x, self.y)
+ }
+ #[inline]
+ fn xyx(self) -> UVec3 {
+ UVec3::new(self.x, self.y, self.x)
+ }
+ #[inline]
+ fn xyy(self) -> UVec3 {
+ UVec3::new(self.x, self.y, self.y)
+ }
+ #[inline]
+ fn yxx(self) -> UVec3 {
+ UVec3::new(self.y, self.x, self.x)
+ }
+ #[inline]
+ fn yxy(self) -> UVec3 {
+ UVec3::new(self.y, self.x, self.y)
+ }
+ #[inline]
+ fn yyx(self) -> UVec3 {
+ UVec3::new(self.y, self.y, self.x)
+ }
+ #[inline]
+ fn yyy(self) -> UVec3 {
+ UVec3::new(self.y, self.y, self.y)
+ }
+ #[inline]
+ fn xx(self) -> Self {
+ Self::new(self.x, self.x)
+ }
+ #[inline]
+ fn yx(self) -> Self {
+ Self::new(self.y, self.x)
+ }
+ #[inline]
+ fn yy(self) -> Self {
+ Self::new(self.y, self.y)
+ }
+}
diff --git a/src/swizzles/uvec3_impl_scalar.rs b/src/swizzles/uvec3_impl_scalar.rs
new file mode 100644
index 0000000..5158e07
--- /dev/null
+++ b/src/swizzles/uvec3_impl_scalar.rs
@@ -0,0 +1,474 @@
+// Generated by swizzlegen. Do not edit.
+
+use super::Vec3Swizzles;
+use crate::{UVec2, UVec3, UVec4};
+
+impl Vec3Swizzles for UVec3 {
+ type Vec2 = UVec2;
+ type Vec4 = UVec4;
+
+ #[inline]
+ fn xxxx(self) -> UVec4 {
+ UVec4::new(self.x, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn xxxy(self) -> UVec4 {
+ UVec4::new(self.x, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn xxxz(self) -> UVec4 {
+ UVec4::new(self.x, self.x, self.x, self.z)
+ }
+ #[inline]
+ fn xxyx(self) -> UVec4 {
+ UVec4::new(self.x, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn xxyy(self) -> UVec4 {
+ UVec4::new(self.x, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn xxyz(self) -> UVec4 {
+ UVec4::new(self.x, self.x, self.y, self.z)
+ }
+ #[inline]
+ fn xxzx(self) -> UVec4 {
+ UVec4::new(self.x, self.x, self.z, self.x)
+ }
+ #[inline]
+ fn xxzy(self) -> UVec4 {
+ UVec4::new(self.x, self.x, self.z, self.y)
+ }
+ #[inline]
+ fn xxzz(self) -> UVec4 {
+ UVec4::new(self.x, self.x, self.z, self.z)
+ }
+ #[inline]
+ fn xyxx(self) -> UVec4 {
+ UVec4::new(self.x, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn xyxy(self) -> UVec4 {
+ UVec4::new(self.x, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn xyxz(self) -> UVec4 {
+ UVec4::new(self.x, self.y, self.x, self.z)
+ }
+ #[inline]
+ fn xyyx(self) -> UVec4 {
+ UVec4::new(self.x, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn xyyy(self) -> UVec4 {
+ UVec4::new(self.x, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn xyyz(self) -> UVec4 {
+ UVec4::new(self.x, self.y, self.y, self.z)
+ }
+ #[inline]
+ fn xyzx(self) -> UVec4 {
+ UVec4::new(self.x, self.y, self.z, self.x)
+ }
+ #[inline]
+ fn xyzy(self) -> UVec4 {
+ UVec4::new(self.x, self.y, self.z, self.y)
+ }
+ #[inline]
+ fn xyzz(self) -> UVec4 {
+ UVec4::new(self.x, self.y, self.z, self.z)
+ }
+ #[inline]
+ fn xzxx(self) -> UVec4 {
+ UVec4::new(self.x, self.z, self.x, self.x)
+ }
+ #[inline]
+ fn xzxy(self) -> UVec4 {
+ UVec4::new(self.x, self.z, self.x, self.y)
+ }
+ #[inline]
+ fn xzxz(self) -> UVec4 {
+ UVec4::new(self.x, self.z, self.x, self.z)
+ }
+ #[inline]
+ fn xzyx(self) -> UVec4 {
+ UVec4::new(self.x, self.z, self.y, self.x)
+ }
+ #[inline]
+ fn xzyy(self) -> UVec4 {
+ UVec4::new(self.x, self.z, self.y, self.y)
+ }
+ #[inline]
+ fn xzyz(self) -> UVec4 {
+ UVec4::new(self.x, self.z, self.y, self.z)
+ }
+ #[inline]
+ fn xzzx(self) -> UVec4 {
+ UVec4::new(self.x, self.z, self.z, self.x)
+ }
+ #[inline]
+ fn xzzy(self) -> UVec4 {
+ UVec4::new(self.x, self.z, self.z, self.y)
+ }
+ #[inline]
+ fn xzzz(self) -> UVec4 {
+ UVec4::new(self.x, self.z, self.z, self.z)
+ }
+ #[inline]
+ fn yxxx(self) -> UVec4 {
+ UVec4::new(self.y, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn yxxy(self) -> UVec4 {
+ UVec4::new(self.y, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn yxxz(self) -> UVec4 {
+ UVec4::new(self.y, self.x, self.x, self.z)
+ }
+ #[inline]
+ fn yxyx(self) -> UVec4 {
+ UVec4::new(self.y, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn yxyy(self) -> UVec4 {
+ UVec4::new(self.y, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn yxyz(self) -> UVec4 {
+ UVec4::new(self.y, self.x, self.y, self.z)
+ }
+ #[inline]
+ fn yxzx(self) -> UVec4 {
+ UVec4::new(self.y, self.x, self.z, self.x)
+ }
+ #[inline]
+ fn yxzy(self) -> UVec4 {
+ UVec4::new(self.y, self.x, self.z, self.y)
+ }
+ #[inline]
+ fn yxzz(self) -> UVec4 {
+ UVec4::new(self.y, self.x, self.z, self.z)
+ }
+ #[inline]
+ fn yyxx(self) -> UVec4 {
+ UVec4::new(self.y, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn yyxy(self) -> UVec4 {
+ UVec4::new(self.y, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn yyxz(self) -> UVec4 {
+ UVec4::new(self.y, self.y, self.x, self.z)
+ }
+ #[inline]
+ fn yyyx(self) -> UVec4 {
+ UVec4::new(self.y, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn yyyy(self) -> UVec4 {
+ UVec4::new(self.y, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn yyyz(self) -> UVec4 {
+ UVec4::new(self.y, self.y, self.y, self.z)
+ }
+ #[inline]
+ fn yyzx(self) -> UVec4 {
+ UVec4::new(self.y, self.y, self.z, self.x)
+ }
+ #[inline]
+ fn yyzy(self) -> UVec4 {
+ UVec4::new(self.y, self.y, self.z, self.y)
+ }
+ #[inline]
+ fn yyzz(self) -> UVec4 {
+ UVec4::new(self.y, self.y, self.z, self.z)
+ }
+ #[inline]
+ fn yzxx(self) -> UVec4 {
+ UVec4::new(self.y, self.z, self.x, self.x)
+ }
+ #[inline]
+ fn yzxy(self) -> UVec4 {
+ UVec4::new(self.y, self.z, self.x, self.y)
+ }
+ #[inline]
+ fn yzxz(self) -> UVec4 {
+ UVec4::new(self.y, self.z, self.x, self.z)
+ }
+ #[inline]
+ fn yzyx(self) -> UVec4 {
+ UVec4::new(self.y, self.z, self.y, self.x)
+ }
+ #[inline]
+ fn yzyy(self) -> UVec4 {
+ UVec4::new(self.y, self.z, self.y, self.y)
+ }
+ #[inline]
+ fn yzyz(self) -> UVec4 {
+ UVec4::new(self.y, self.z, self.y, self.z)
+ }
+ #[inline]
+ fn yzzx(self) -> UVec4 {
+ UVec4::new(self.y, self.z, self.z, self.x)
+ }
+ #[inline]
+ fn yzzy(self) -> UVec4 {
+ UVec4::new(self.y, self.z, self.z, self.y)
+ }
+ #[inline]
+ fn yzzz(self) -> UVec4 {
+ UVec4::new(self.y, self.z, self.z, self.z)
+ }
+ #[inline]
+ fn zxxx(self) -> UVec4 {
+ UVec4::new(self.z, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn zxxy(self) -> UVec4 {
+ UVec4::new(self.z, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn zxxz(self) -> UVec4 {
+ UVec4::new(self.z, self.x, self.x, self.z)
+ }
+ #[inline]
+ fn zxyx(self) -> UVec4 {
+ UVec4::new(self.z, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn zxyy(self) -> UVec4 {
+ UVec4::new(self.z, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn zxyz(self) -> UVec4 {
+ UVec4::new(self.z, self.x, self.y, self.z)
+ }
+ #[inline]
+ fn zxzx(self) -> UVec4 {
+ UVec4::new(self.z, self.x, self.z, self.x)
+ }
+ #[inline]
+ fn zxzy(self) -> UVec4 {
+ UVec4::new(self.z, self.x, self.z, self.y)
+ }
+ #[inline]
+ fn zxzz(self) -> UVec4 {
+ UVec4::new(self.z, self.x, self.z, self.z)
+ }
+ #[inline]
+ fn zyxx(self) -> UVec4 {
+ UVec4::new(self.z, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn zyxy(self) -> UVec4 {
+ UVec4::new(self.z, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn zyxz(self) -> UVec4 {
+ UVec4::new(self.z, self.y, self.x, self.z)
+ }
+ #[inline]
+ fn zyyx(self) -> UVec4 {
+ UVec4::new(self.z, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn zyyy(self) -> UVec4 {
+ UVec4::new(self.z, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn zyyz(self) -> UVec4 {
+ UVec4::new(self.z, self.y, self.y, self.z)
+ }
+ #[inline]
+ fn zyzx(self) -> UVec4 {
+ UVec4::new(self.z, self.y, self.z, self.x)
+ }
+ #[inline]
+ fn zyzy(self) -> UVec4 {
+ UVec4::new(self.z, self.y, self.z, self.y)
+ }
+ #[inline]
+ fn zyzz(self) -> UVec4 {
+ UVec4::new(self.z, self.y, self.z, self.z)
+ }
+ #[inline]
+ fn zzxx(self) -> UVec4 {
+ UVec4::new(self.z, self.z, self.x, self.x)
+ }
+ #[inline]
+ fn zzxy(self) -> UVec4 {
+ UVec4::new(self.z, self.z, self.x, self.y)
+ }
+ #[inline]
+ fn zzxz(self) -> UVec4 {
+ UVec4::new(self.z, self.z, self.x, self.z)
+ }
+ #[inline]
+ fn zzyx(self) -> UVec4 {
+ UVec4::new(self.z, self.z, self.y, self.x)
+ }
+ #[inline]
+ fn zzyy(self) -> UVec4 {
+ UVec4::new(self.z, self.z, self.y, self.y)
+ }
+ #[inline]
+ fn zzyz(self) -> UVec4 {
+ UVec4::new(self.z, self.z, self.y, self.z)
+ }
+ #[inline]
+ fn zzzx(self) -> UVec4 {
+ UVec4::new(self.z, self.z, self.z, self.x)
+ }
+ #[inline]
+ fn zzzy(self) -> UVec4 {
+ UVec4::new(self.z, self.z, self.z, self.y)
+ }
+ #[inline]
+ fn zzzz(self) -> UVec4 {
+ UVec4::new(self.z, self.z, self.z, self.z)
+ }
+ #[inline]
+ fn xxx(self) -> Self {
+ Self::new(self.x, self.x, self.x)
+ }
+ #[inline]
+ fn xxy(self) -> Self {
+ Self::new(self.x, self.x, self.y)
+ }
+ #[inline]
+ fn xxz(self) -> Self {
+ Self::new(self.x, self.x, self.z)
+ }
+ #[inline]
+ fn xyx(self) -> Self {
+ Self::new(self.x, self.y, self.x)
+ }
+ #[inline]
+ fn xyy(self) -> Self {
+ Self::new(self.x, self.y, self.y)
+ }
+ #[inline]
+ fn xzx(self) -> Self {
+ Self::new(self.x, self.z, self.x)
+ }
+ #[inline]
+ fn xzy(self) -> Self {
+ Self::new(self.x, self.z, self.y)
+ }
+ #[inline]
+ fn xzz(self) -> Self {
+ Self::new(self.x, self.z, self.z)
+ }
+ #[inline]
+ fn yxx(self) -> Self {
+ Self::new(self.y, self.x, self.x)
+ }
+ #[inline]
+ fn yxy(self) -> Self {
+ Self::new(self.y, self.x, self.y)
+ }
+ #[inline]
+ fn yxz(self) -> Self {
+ Self::new(self.y, self.x, self.z)
+ }
+ #[inline]
+ fn yyx(self) -> Self {
+ Self::new(self.y, self.y, self.x)
+ }
+ #[inline]
+ fn yyy(self) -> Self {
+ Self::new(self.y, self.y, self.y)
+ }
+ #[inline]
+ fn yyz(self) -> Self {
+ Self::new(self.y, self.y, self.z)
+ }
+ #[inline]
+ fn yzx(self) -> Self {
+ Self::new(self.y, self.z, self.x)
+ }
+ #[inline]
+ fn yzy(self) -> Self {
+ Self::new(self.y, self.z, self.y)
+ }
+ #[inline]
+ fn yzz(self) -> Self {
+ Self::new(self.y, self.z, self.z)
+ }
+ #[inline]
+ fn zxx(self) -> Self {
+ Self::new(self.z, self.x, self.x)
+ }
+ #[inline]
+ fn zxy(self) -> Self {
+ Self::new(self.z, self.x, self.y)
+ }
+ #[inline]
+ fn zxz(self) -> Self {
+ Self::new(self.z, self.x, self.z)
+ }
+ #[inline]
+ fn zyx(self) -> Self {
+ Self::new(self.z, self.y, self.x)
+ }
+ #[inline]
+ fn zyy(self) -> Self {
+ Self::new(self.z, self.y, self.y)
+ }
+ #[inline]
+ fn zyz(self) -> Self {
+ Self::new(self.z, self.y, self.z)
+ }
+ #[inline]
+ fn zzx(self) -> Self {
+ Self::new(self.z, self.z, self.x)
+ }
+ #[inline]
+ fn zzy(self) -> Self {
+ Self::new(self.z, self.z, self.y)
+ }
+ #[inline]
+ fn zzz(self) -> Self {
+ Self::new(self.z, self.z, self.z)
+ }
+ #[inline]
+ fn xx(self) -> UVec2 {
+ UVec2::new(self.x, self.x)
+ }
+ #[inline]
+ fn xy(self) -> UVec2 {
+ UVec2::new(self.x, self.y)
+ }
+ #[inline]
+ fn xz(self) -> UVec2 {
+ UVec2::new(self.x, self.z)
+ }
+ #[inline]
+ fn yx(self) -> UVec2 {
+ UVec2::new(self.y, self.x)
+ }
+ #[inline]
+ fn yy(self) -> UVec2 {
+ UVec2::new(self.y, self.y)
+ }
+ #[inline]
+ fn yz(self) -> UVec2 {
+ UVec2::new(self.y, self.z)
+ }
+ #[inline]
+ fn zx(self) -> UVec2 {
+ UVec2::new(self.z, self.x)
+ }
+ #[inline]
+ fn zy(self) -> UVec2 {
+ UVec2::new(self.z, self.y)
+ }
+ #[inline]
+ fn zz(self) -> UVec2 {
+ UVec2::new(self.z, self.z)
+ }
+}
diff --git a/src/swizzles/uvec4_impl_scalar.rs b/src/swizzles/uvec4_impl_scalar.rs
new file mode 100644
index 0000000..be4824c
--- /dev/null
+++ b/src/swizzles/uvec4_impl_scalar.rs
@@ -0,0 +1,1350 @@
+// Generated by swizzlegen. Do not edit.
+
+use super::Vec4Swizzles;
+use crate::{UVec2, UVec3, UVec4};
+
+impl Vec4Swizzles for UVec4 {
+ type Vec2 = UVec2;
+ type Vec3 = UVec3;
+
+ #[inline]
+ fn xxxx(self) -> UVec4 {
+ UVec4::new(self.x, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn xxxy(self) -> UVec4 {
+ UVec4::new(self.x, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn xxxz(self) -> UVec4 {
+ UVec4::new(self.x, self.x, self.x, self.z)
+ }
+ #[inline]
+ fn xxxw(self) -> UVec4 {
+ UVec4::new(self.x, self.x, self.x, self.w)
+ }
+ #[inline]
+ fn xxyx(self) -> UVec4 {
+ UVec4::new(self.x, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn xxyy(self) -> UVec4 {
+ UVec4::new(self.x, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn xxyz(self) -> UVec4 {
+ UVec4::new(self.x, self.x, self.y, self.z)
+ }
+ #[inline]
+ fn xxyw(self) -> UVec4 {
+ UVec4::new(self.x, self.x, self.y, self.w)
+ }
+ #[inline]
+ fn xxzx(self) -> UVec4 {
+ UVec4::new(self.x, self.x, self.z, self.x)
+ }
+ #[inline]
+ fn xxzy(self) -> UVec4 {
+ UVec4::new(self.x, self.x, self.z, self.y)
+ }
+ #[inline]
+ fn xxzz(self) -> UVec4 {
+ UVec4::new(self.x, self.x, self.z, self.z)
+ }
+ #[inline]
+ fn xxzw(self) -> UVec4 {
+ UVec4::new(self.x, self.x, self.z, self.w)
+ }
+ #[inline]
+ fn xxwx(self) -> UVec4 {
+ UVec4::new(self.x, self.x, self.w, self.x)
+ }
+ #[inline]
+ fn xxwy(self) -> UVec4 {
+ UVec4::new(self.x, self.x, self.w, self.y)
+ }
+ #[inline]
+ fn xxwz(self) -> UVec4 {
+ UVec4::new(self.x, self.x, self.w, self.z)
+ }
+ #[inline]
+ fn xxww(self) -> UVec4 {
+ UVec4::new(self.x, self.x, self.w, self.w)
+ }
+ #[inline]
+ fn xyxx(self) -> UVec4 {
+ UVec4::new(self.x, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn xyxy(self) -> UVec4 {
+ UVec4::new(self.x, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn xyxz(self) -> UVec4 {
+ UVec4::new(self.x, self.y, self.x, self.z)
+ }
+ #[inline]
+ fn xyxw(self) -> UVec4 {
+ UVec4::new(self.x, self.y, self.x, self.w)
+ }
+ #[inline]
+ fn xyyx(self) -> UVec4 {
+ UVec4::new(self.x, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn xyyy(self) -> UVec4 {
+ UVec4::new(self.x, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn xyyz(self) -> UVec4 {
+ UVec4::new(self.x, self.y, self.y, self.z)
+ }
+ #[inline]
+ fn xyyw(self) -> UVec4 {
+ UVec4::new(self.x, self.y, self.y, self.w)
+ }
+ #[inline]
+ fn xyzx(self) -> UVec4 {
+ UVec4::new(self.x, self.y, self.z, self.x)
+ }
+ #[inline]
+ fn xyzy(self) -> UVec4 {
+ UVec4::new(self.x, self.y, self.z, self.y)
+ }
+ #[inline]
+ fn xyzz(self) -> UVec4 {
+ UVec4::new(self.x, self.y, self.z, self.z)
+ }
+ #[inline]
+ fn xywx(self) -> UVec4 {
+ UVec4::new(self.x, self.y, self.w, self.x)
+ }
+ #[inline]
+ fn xywy(self) -> UVec4 {
+ UVec4::new(self.x, self.y, self.w, self.y)
+ }
+ #[inline]
+ fn xywz(self) -> UVec4 {
+ UVec4::new(self.x, self.y, self.w, self.z)
+ }
+ #[inline]
+ fn xyww(self) -> UVec4 {
+ UVec4::new(self.x, self.y, self.w, self.w)
+ }
+ #[inline]
+ fn xzxx(self) -> UVec4 {
+ UVec4::new(self.x, self.z, self.x, self.x)
+ }
+ #[inline]
+ fn xzxy(self) -> UVec4 {
+ UVec4::new(self.x, self.z, self.x, self.y)
+ }
+ #[inline]
+ fn xzxz(self) -> UVec4 {
+ UVec4::new(self.x, self.z, self.x, self.z)
+ }
+ #[inline]
+ fn xzxw(self) -> UVec4 {
+ UVec4::new(self.x, self.z, self.x, self.w)
+ }
+ #[inline]
+ fn xzyx(self) -> UVec4 {
+ UVec4::new(self.x, self.z, self.y, self.x)
+ }
+ #[inline]
+ fn xzyy(self) -> UVec4 {
+ UVec4::new(self.x, self.z, self.y, self.y)
+ }
+ #[inline]
+ fn xzyz(self) -> UVec4 {
+ UVec4::new(self.x, self.z, self.y, self.z)
+ }
+ #[inline]
+ fn xzyw(self) -> UVec4 {
+ UVec4::new(self.x, self.z, self.y, self.w)
+ }
+ #[inline]
+ fn xzzx(self) -> UVec4 {
+ UVec4::new(self.x, self.z, self.z, self.x)
+ }
+ #[inline]
+ fn xzzy(self) -> UVec4 {
+ UVec4::new(self.x, self.z, self.z, self.y)
+ }
+ #[inline]
+ fn xzzz(self) -> UVec4 {
+ UVec4::new(self.x, self.z, self.z, self.z)
+ }
+ #[inline]
+ fn xzzw(self) -> UVec4 {
+ UVec4::new(self.x, self.z, self.z, self.w)
+ }
+ #[inline]
+ fn xzwx(self) -> UVec4 {
+ UVec4::new(self.x, self.z, self.w, self.x)
+ }
+ #[inline]
+ fn xzwy(self) -> UVec4 {
+ UVec4::new(self.x, self.z, self.w, self.y)
+ }
+ #[inline]
+ fn xzwz(self) -> UVec4 {
+ UVec4::new(self.x, self.z, self.w, self.z)
+ }
+ #[inline]
+ fn xzww(self) -> UVec4 {
+ UVec4::new(self.x, self.z, self.w, self.w)
+ }
+ #[inline]
+ fn xwxx(self) -> UVec4 {
+ UVec4::new(self.x, self.w, self.x, self.x)
+ }
+ #[inline]
+ fn xwxy(self) -> UVec4 {
+ UVec4::new(self.x, self.w, self.x, self.y)
+ }
+ #[inline]
+ fn xwxz(self) -> UVec4 {
+ UVec4::new(self.x, self.w, self.x, self.z)
+ }
+ #[inline]
+ fn xwxw(self) -> UVec4 {
+ UVec4::new(self.x, self.w, self.x, self.w)
+ }
+ #[inline]
+ fn xwyx(self) -> UVec4 {
+ UVec4::new(self.x, self.w, self.y, self.x)
+ }
+ #[inline]
+ fn xwyy(self) -> UVec4 {
+ UVec4::new(self.x, self.w, self.y, self.y)
+ }
+ #[inline]
+ fn xwyz(self) -> UVec4 {
+ UVec4::new(self.x, self.w, self.y, self.z)
+ }
+ #[inline]
+ fn xwyw(self) -> UVec4 {
+ UVec4::new(self.x, self.w, self.y, self.w)
+ }
+ #[inline]
+ fn xwzx(self) -> UVec4 {
+ UVec4::new(self.x, self.w, self.z, self.x)
+ }
+ #[inline]
+ fn xwzy(self) -> UVec4 {
+ UVec4::new(self.x, self.w, self.z, self.y)
+ }
+ #[inline]
+ fn xwzz(self) -> UVec4 {
+ UVec4::new(self.x, self.w, self.z, self.z)
+ }
+ #[inline]
+ fn xwzw(self) -> UVec4 {
+ UVec4::new(self.x, self.w, self.z, self.w)
+ }
+ #[inline]
+ fn xwwx(self) -> UVec4 {
+ UVec4::new(self.x, self.w, self.w, self.x)
+ }
+ #[inline]
+ fn xwwy(self) -> UVec4 {
+ UVec4::new(self.x, self.w, self.w, self.y)
+ }
+ #[inline]
+ fn xwwz(self) -> UVec4 {
+ UVec4::new(self.x, self.w, self.w, self.z)
+ }
+ #[inline]
+ fn xwww(self) -> UVec4 {
+ UVec4::new(self.x, self.w, self.w, self.w)
+ }
+ #[inline]
+ fn yxxx(self) -> UVec4 {
+ UVec4::new(self.y, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn yxxy(self) -> UVec4 {
+ UVec4::new(self.y, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn yxxz(self) -> UVec4 {
+ UVec4::new(self.y, self.x, self.x, self.z)
+ }
+ #[inline]
+ fn yxxw(self) -> UVec4 {
+ UVec4::new(self.y, self.x, self.x, self.w)
+ }
+ #[inline]
+ fn yxyx(self) -> UVec4 {
+ UVec4::new(self.y, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn yxyy(self) -> UVec4 {
+ UVec4::new(self.y, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn yxyz(self) -> UVec4 {
+ UVec4::new(self.y, self.x, self.y, self.z)
+ }
+ #[inline]
+ fn yxyw(self) -> UVec4 {
+ UVec4::new(self.y, self.x, self.y, self.w)
+ }
+ #[inline]
+ fn yxzx(self) -> UVec4 {
+ UVec4::new(self.y, self.x, self.z, self.x)
+ }
+ #[inline]
+ fn yxzy(self) -> UVec4 {
+ UVec4::new(self.y, self.x, self.z, self.y)
+ }
+ #[inline]
+ fn yxzz(self) -> UVec4 {
+ UVec4::new(self.y, self.x, self.z, self.z)
+ }
+ #[inline]
+ fn yxzw(self) -> UVec4 {
+ UVec4::new(self.y, self.x, self.z, self.w)
+ }
+ #[inline]
+ fn yxwx(self) -> UVec4 {
+ UVec4::new(self.y, self.x, self.w, self.x)
+ }
+ #[inline]
+ fn yxwy(self) -> UVec4 {
+ UVec4::new(self.y, self.x, self.w, self.y)
+ }
+ #[inline]
+ fn yxwz(self) -> UVec4 {
+ UVec4::new(self.y, self.x, self.w, self.z)
+ }
+ #[inline]
+ fn yxww(self) -> UVec4 {
+ UVec4::new(self.y, self.x, self.w, self.w)
+ }
+ #[inline]
+ fn yyxx(self) -> UVec4 {
+ UVec4::new(self.y, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn yyxy(self) -> UVec4 {
+ UVec4::new(self.y, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn yyxz(self) -> UVec4 {
+ UVec4::new(self.y, self.y, self.x, self.z)
+ }
+ #[inline]
+ fn yyxw(self) -> UVec4 {
+ UVec4::new(self.y, self.y, self.x, self.w)
+ }
+ #[inline]
+ fn yyyx(self) -> UVec4 {
+ UVec4::new(self.y, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn yyyy(self) -> UVec4 {
+ UVec4::new(self.y, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn yyyz(self) -> UVec4 {
+ UVec4::new(self.y, self.y, self.y, self.z)
+ }
+ #[inline]
+ fn yyyw(self) -> UVec4 {
+ UVec4::new(self.y, self.y, self.y, self.w)
+ }
+ #[inline]
+ fn yyzx(self) -> UVec4 {
+ UVec4::new(self.y, self.y, self.z, self.x)
+ }
+ #[inline]
+ fn yyzy(self) -> UVec4 {
+ UVec4::new(self.y, self.y, self.z, self.y)
+ }
+ #[inline]
+ fn yyzz(self) -> UVec4 {
+ UVec4::new(self.y, self.y, self.z, self.z)
+ }
+ #[inline]
+ fn yyzw(self) -> UVec4 {
+ UVec4::new(self.y, self.y, self.z, self.w)
+ }
+ #[inline]
+ fn yywx(self) -> UVec4 {
+ UVec4::new(self.y, self.y, self.w, self.x)
+ }
+ #[inline]
+ fn yywy(self) -> UVec4 {
+ UVec4::new(self.y, self.y, self.w, self.y)
+ }
+ #[inline]
+ fn yywz(self) -> UVec4 {
+ UVec4::new(self.y, self.y, self.w, self.z)
+ }
+ #[inline]
+ fn yyww(self) -> UVec4 {
+ UVec4::new(self.y, self.y, self.w, self.w)
+ }
+ #[inline]
+ fn yzxx(self) -> UVec4 {
+ UVec4::new(self.y, self.z, self.x, self.x)
+ }
+ #[inline]
+ fn yzxy(self) -> UVec4 {
+ UVec4::new(self.y, self.z, self.x, self.y)
+ }
+ #[inline]
+ fn yzxz(self) -> UVec4 {
+ UVec4::new(self.y, self.z, self.x, self.z)
+ }
+ #[inline]
+ fn yzxw(self) -> UVec4 {
+ UVec4::new(self.y, self.z, self.x, self.w)
+ }
+ #[inline]
+ fn yzyx(self) -> UVec4 {
+ UVec4::new(self.y, self.z, self.y, self.x)
+ }
+ #[inline]
+ fn yzyy(self) -> UVec4 {
+ UVec4::new(self.y, self.z, self.y, self.y)
+ }
+ #[inline]
+ fn yzyz(self) -> UVec4 {
+ UVec4::new(self.y, self.z, self.y, self.z)
+ }
+ #[inline]
+ fn yzyw(self) -> UVec4 {
+ UVec4::new(self.y, self.z, self.y, self.w)
+ }
+ #[inline]
+ fn yzzx(self) -> UVec4 {
+ UVec4::new(self.y, self.z, self.z, self.x)
+ }
+ #[inline]
+ fn yzzy(self) -> UVec4 {
+ UVec4::new(self.y, self.z, self.z, self.y)
+ }
+ #[inline]
+ fn yzzz(self) -> UVec4 {
+ UVec4::new(self.y, self.z, self.z, self.z)
+ }
+ #[inline]
+ fn yzzw(self) -> UVec4 {
+ UVec4::new(self.y, self.z, self.z, self.w)
+ }
+ #[inline]
+ fn yzwx(self) -> UVec4 {
+ UVec4::new(self.y, self.z, self.w, self.x)
+ }
+ #[inline]
+ fn yzwy(self) -> UVec4 {
+ UVec4::new(self.y, self.z, self.w, self.y)
+ }
+ #[inline]
+ fn yzwz(self) -> UVec4 {
+ UVec4::new(self.y, self.z, self.w, self.z)
+ }
+ #[inline]
+ fn yzww(self) -> UVec4 {
+ UVec4::new(self.y, self.z, self.w, self.w)
+ }
+ #[inline]
+ fn ywxx(self) -> UVec4 {
+ UVec4::new(self.y, self.w, self.x, self.x)
+ }
+ #[inline]
+ fn ywxy(self) -> UVec4 {
+ UVec4::new(self.y, self.w, self.x, self.y)
+ }
+ #[inline]
+ fn ywxz(self) -> UVec4 {
+ UVec4::new(self.y, self.w, self.x, self.z)
+ }
+ #[inline]
+ fn ywxw(self) -> UVec4 {
+ UVec4::new(self.y, self.w, self.x, self.w)
+ }
+ #[inline]
+ fn ywyx(self) -> UVec4 {
+ UVec4::new(self.y, self.w, self.y, self.x)
+ }
+ #[inline]
+ fn ywyy(self) -> UVec4 {
+ UVec4::new(self.y, self.w, self.y, self.y)
+ }
+ #[inline]
+ fn ywyz(self) -> UVec4 {
+ UVec4::new(self.y, self.w, self.y, self.z)
+ }
+ #[inline]
+ fn ywyw(self) -> UVec4 {
+ UVec4::new(self.y, self.w, self.y, self.w)
+ }
+ #[inline]
+ fn ywzx(self) -> UVec4 {
+ UVec4::new(self.y, self.w, self.z, self.x)
+ }
+ #[inline]
+ fn ywzy(self) -> UVec4 {
+ UVec4::new(self.y, self.w, self.z, self.y)
+ }
+ #[inline]
+ fn ywzz(self) -> UVec4 {
+ UVec4::new(self.y, self.w, self.z, self.z)
+ }
+ #[inline]
+ fn ywzw(self) -> UVec4 {
+ UVec4::new(self.y, self.w, self.z, self.w)
+ }
+ #[inline]
+ fn ywwx(self) -> UVec4 {
+ UVec4::new(self.y, self.w, self.w, self.x)
+ }
+ #[inline]
+ fn ywwy(self) -> UVec4 {
+ UVec4::new(self.y, self.w, self.w, self.y)
+ }
+ #[inline]
+ fn ywwz(self) -> UVec4 {
+ UVec4::new(self.y, self.w, self.w, self.z)
+ }
+ #[inline]
+ fn ywww(self) -> UVec4 {
+ UVec4::new(self.y, self.w, self.w, self.w)
+ }
+ #[inline]
+ fn zxxx(self) -> UVec4 {
+ UVec4::new(self.z, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn zxxy(self) -> UVec4 {
+ UVec4::new(self.z, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn zxxz(self) -> UVec4 {
+ UVec4::new(self.z, self.x, self.x, self.z)
+ }
+ #[inline]
+ fn zxxw(self) -> UVec4 {
+ UVec4::new(self.z, self.x, self.x, self.w)
+ }
+ #[inline]
+ fn zxyx(self) -> UVec4 {
+ UVec4::new(self.z, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn zxyy(self) -> UVec4 {
+ UVec4::new(self.z, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn zxyz(self) -> UVec4 {
+ UVec4::new(self.z, self.x, self.y, self.z)
+ }
+ #[inline]
+ fn zxyw(self) -> UVec4 {
+ UVec4::new(self.z, self.x, self.y, self.w)
+ }
+ #[inline]
+ fn zxzx(self) -> UVec4 {
+ UVec4::new(self.z, self.x, self.z, self.x)
+ }
+ #[inline]
+ fn zxzy(self) -> UVec4 {
+ UVec4::new(self.z, self.x, self.z, self.y)
+ }
+ #[inline]
+ fn zxzz(self) -> UVec4 {
+ UVec4::new(self.z, self.x, self.z, self.z)
+ }
+ #[inline]
+ fn zxzw(self) -> UVec4 {
+ UVec4::new(self.z, self.x, self.z, self.w)
+ }
+ #[inline]
+ fn zxwx(self) -> UVec4 {
+ UVec4::new(self.z, self.x, self.w, self.x)
+ }
+ #[inline]
+ fn zxwy(self) -> UVec4 {
+ UVec4::new(self.z, self.x, self.w, self.y)
+ }
+ #[inline]
+ fn zxwz(self) -> UVec4 {
+ UVec4::new(self.z, self.x, self.w, self.z)
+ }
+ #[inline]
+ fn zxww(self) -> UVec4 {
+ UVec4::new(self.z, self.x, self.w, self.w)
+ }
+ #[inline]
+ fn zyxx(self) -> UVec4 {
+ UVec4::new(self.z, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn zyxy(self) -> UVec4 {
+ UVec4::new(self.z, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn zyxz(self) -> UVec4 {
+ UVec4::new(self.z, self.y, self.x, self.z)
+ }
+ #[inline]
+ fn zyxw(self) -> UVec4 {
+ UVec4::new(self.z, self.y, self.x, self.w)
+ }
+ #[inline]
+ fn zyyx(self) -> UVec4 {
+ UVec4::new(self.z, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn zyyy(self) -> UVec4 {
+ UVec4::new(self.z, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn zyyz(self) -> UVec4 {
+ UVec4::new(self.z, self.y, self.y, self.z)
+ }
+ #[inline]
+ fn zyyw(self) -> UVec4 {
+ UVec4::new(self.z, self.y, self.y, self.w)
+ }
+ #[inline]
+ fn zyzx(self) -> UVec4 {
+ UVec4::new(self.z, self.y, self.z, self.x)
+ }
+ #[inline]
+ fn zyzy(self) -> UVec4 {
+ UVec4::new(self.z, self.y, self.z, self.y)
+ }
+ #[inline]
+ fn zyzz(self) -> UVec4 {
+ UVec4::new(self.z, self.y, self.z, self.z)
+ }
+ #[inline]
+ fn zyzw(self) -> UVec4 {
+ UVec4::new(self.z, self.y, self.z, self.w)
+ }
+ #[inline]
+ fn zywx(self) -> UVec4 {
+ UVec4::new(self.z, self.y, self.w, self.x)
+ }
+ #[inline]
+ fn zywy(self) -> UVec4 {
+ UVec4::new(self.z, self.y, self.w, self.y)
+ }
+ #[inline]
+ fn zywz(self) -> UVec4 {
+ UVec4::new(self.z, self.y, self.w, self.z)
+ }
+ #[inline]
+ fn zyww(self) -> UVec4 {
+ UVec4::new(self.z, self.y, self.w, self.w)
+ }
+ #[inline]
+ fn zzxx(self) -> UVec4 {
+ UVec4::new(self.z, self.z, self.x, self.x)
+ }
+ #[inline]
+ fn zzxy(self) -> UVec4 {
+ UVec4::new(self.z, self.z, self.x, self.y)
+ }
+ #[inline]
+ fn zzxz(self) -> UVec4 {
+ UVec4::new(self.z, self.z, self.x, self.z)
+ }
+ #[inline]
+ fn zzxw(self) -> UVec4 {
+ UVec4::new(self.z, self.z, self.x, self.w)
+ }
+ #[inline]
+ fn zzyx(self) -> UVec4 {
+ UVec4::new(self.z, self.z, self.y, self.x)
+ }
+ #[inline]
+ fn zzyy(self) -> UVec4 {
+ UVec4::new(self.z, self.z, self.y, self.y)
+ }
+ #[inline]
+ fn zzyz(self) -> UVec4 {
+ UVec4::new(self.z, self.z, self.y, self.z)
+ }
+ #[inline]
+ fn zzyw(self) -> UVec4 {
+ UVec4::new(self.z, self.z, self.y, self.w)
+ }
+ #[inline]
+ fn zzzx(self) -> UVec4 {
+ UVec4::new(self.z, self.z, self.z, self.x)
+ }
+ #[inline]
+ fn zzzy(self) -> UVec4 {
+ UVec4::new(self.z, self.z, self.z, self.y)
+ }
+ #[inline]
+ fn zzzz(self) -> UVec4 {
+ UVec4::new(self.z, self.z, self.z, self.z)
+ }
+ #[inline]
+ fn zzzw(self) -> UVec4 {
+ UVec4::new(self.z, self.z, self.z, self.w)
+ }
+ #[inline]
+ fn zzwx(self) -> UVec4 {
+ UVec4::new(self.z, self.z, self.w, self.x)
+ }
+ #[inline]
+ fn zzwy(self) -> UVec4 {
+ UVec4::new(self.z, self.z, self.w, self.y)
+ }
+ #[inline]
+ fn zzwz(self) -> UVec4 {
+ UVec4::new(self.z, self.z, self.w, self.z)
+ }
+ #[inline]
+ fn zzww(self) -> UVec4 {
+ UVec4::new(self.z, self.z, self.w, self.w)
+ }
+ #[inline]
+ fn zwxx(self) -> UVec4 {
+ UVec4::new(self.z, self.w, self.x, self.x)
+ }
+ #[inline]
+ fn zwxy(self) -> UVec4 {
+ UVec4::new(self.z, self.w, self.x, self.y)
+ }
+ #[inline]
+ fn zwxz(self) -> UVec4 {
+ UVec4::new(self.z, self.w, self.x, self.z)
+ }
+ #[inline]
+ fn zwxw(self) -> UVec4 {
+ UVec4::new(self.z, self.w, self.x, self.w)
+ }
+ #[inline]
+ fn zwyx(self) -> UVec4 {
+ UVec4::new(self.z, self.w, self.y, self.x)
+ }
+ #[inline]
+ fn zwyy(self) -> UVec4 {
+ UVec4::new(self.z, self.w, self.y, self.y)
+ }
+ #[inline]
+ fn zwyz(self) -> UVec4 {
+ UVec4::new(self.z, self.w, self.y, self.z)
+ }
+ #[inline]
+ fn zwyw(self) -> UVec4 {
+ UVec4::new(self.z, self.w, self.y, self.w)
+ }
+ #[inline]
+ fn zwzx(self) -> UVec4 {
+ UVec4::new(self.z, self.w, self.z, self.x)
+ }
+ #[inline]
+ fn zwzy(self) -> UVec4 {
+ UVec4::new(self.z, self.w, self.z, self.y)
+ }
+ #[inline]
+ fn zwzz(self) -> UVec4 {
+ UVec4::new(self.z, self.w, self.z, self.z)
+ }
+ #[inline]
+ fn zwzw(self) -> UVec4 {
+ UVec4::new(self.z, self.w, self.z, self.w)
+ }
+ #[inline]
+ fn zwwx(self) -> UVec4 {
+ UVec4::new(self.z, self.w, self.w, self.x)
+ }
+ #[inline]
+ fn zwwy(self) -> UVec4 {
+ UVec4::new(self.z, self.w, self.w, self.y)
+ }
+ #[inline]
+ fn zwwz(self) -> UVec4 {
+ UVec4::new(self.z, self.w, self.w, self.z)
+ }
+ #[inline]
+ fn zwww(self) -> UVec4 {
+ UVec4::new(self.z, self.w, self.w, self.w)
+ }
+ #[inline]
+ fn wxxx(self) -> UVec4 {
+ UVec4::new(self.w, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn wxxy(self) -> UVec4 {
+ UVec4::new(self.w, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn wxxz(self) -> UVec4 {
+ UVec4::new(self.w, self.x, self.x, self.z)
+ }
+ #[inline]
+ fn wxxw(self) -> UVec4 {
+ UVec4::new(self.w, self.x, self.x, self.w)
+ }
+ #[inline]
+ fn wxyx(self) -> UVec4 {
+ UVec4::new(self.w, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn wxyy(self) -> UVec4 {
+ UVec4::new(self.w, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn wxyz(self) -> UVec4 {
+ UVec4::new(self.w, self.x, self.y, self.z)
+ }
+ #[inline]
+ fn wxyw(self) -> UVec4 {
+ UVec4::new(self.w, self.x, self.y, self.w)
+ }
+ #[inline]
+ fn wxzx(self) -> UVec4 {
+ UVec4::new(self.w, self.x, self.z, self.x)
+ }
+ #[inline]
+ fn wxzy(self) -> UVec4 {
+ UVec4::new(self.w, self.x, self.z, self.y)
+ }
+ #[inline]
+ fn wxzz(self) -> UVec4 {
+ UVec4::new(self.w, self.x, self.z, self.z)
+ }
+ #[inline]
+ fn wxzw(self) -> UVec4 {
+ UVec4::new(self.w, self.x, self.z, self.w)
+ }
+ #[inline]
+ fn wxwx(self) -> UVec4 {
+ UVec4::new(self.w, self.x, self.w, self.x)
+ }
+ #[inline]
+ fn wxwy(self) -> UVec4 {
+ UVec4::new(self.w, self.x, self.w, self.y)
+ }
+ #[inline]
+ fn wxwz(self) -> UVec4 {
+ UVec4::new(self.w, self.x, self.w, self.z)
+ }
+ #[inline]
+ fn wxww(self) -> UVec4 {
+ UVec4::new(self.w, self.x, self.w, self.w)
+ }
+ #[inline]
+ fn wyxx(self) -> UVec4 {
+ UVec4::new(self.w, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn wyxy(self) -> UVec4 {
+ UVec4::new(self.w, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn wyxz(self) -> UVec4 {
+ UVec4::new(self.w, self.y, self.x, self.z)
+ }
+ #[inline]
+ fn wyxw(self) -> UVec4 {
+ UVec4::new(self.w, self.y, self.x, self.w)
+ }
+ #[inline]
+ fn wyyx(self) -> UVec4 {
+ UVec4::new(self.w, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn wyyy(self) -> UVec4 {
+ UVec4::new(self.w, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn wyyz(self) -> UVec4 {
+ UVec4::new(self.w, self.y, self.y, self.z)
+ }
+ #[inline]
+ fn wyyw(self) -> UVec4 {
+ UVec4::new(self.w, self.y, self.y, self.w)
+ }
+ #[inline]
+ fn wyzx(self) -> UVec4 {
+ UVec4::new(self.w, self.y, self.z, self.x)
+ }
+ #[inline]
+ fn wyzy(self) -> UVec4 {
+ UVec4::new(self.w, self.y, self.z, self.y)
+ }
+ #[inline]
+ fn wyzz(self) -> UVec4 {
+ UVec4::new(self.w, self.y, self.z, self.z)
+ }
+ #[inline]
+ fn wyzw(self) -> UVec4 {
+ UVec4::new(self.w, self.y, self.z, self.w)
+ }
+ #[inline]
+ fn wywx(self) -> UVec4 {
+ UVec4::new(self.w, self.y, self.w, self.x)
+ }
+ #[inline]
+ fn wywy(self) -> UVec4 {
+ UVec4::new(self.w, self.y, self.w, self.y)
+ }
+ #[inline]
+ fn wywz(self) -> UVec4 {
+ UVec4::new(self.w, self.y, self.w, self.z)
+ }
+ #[inline]
+ fn wyww(self) -> UVec4 {
+ UVec4::new(self.w, self.y, self.w, self.w)
+ }
+ #[inline]
+ fn wzxx(self) -> UVec4 {
+ UVec4::new(self.w, self.z, self.x, self.x)
+ }
+ #[inline]
+ fn wzxy(self) -> UVec4 {
+ UVec4::new(self.w, self.z, self.x, self.y)
+ }
+ #[inline]
+ fn wzxz(self) -> UVec4 {
+ UVec4::new(self.w, self.z, self.x, self.z)
+ }
+ #[inline]
+ fn wzxw(self) -> UVec4 {
+ UVec4::new(self.w, self.z, self.x, self.w)
+ }
+ #[inline]
+ fn wzyx(self) -> UVec4 {
+ UVec4::new(self.w, self.z, self.y, self.x)
+ }
+ #[inline]
+ fn wzyy(self) -> UVec4 {
+ UVec4::new(self.w, self.z, self.y, self.y)
+ }
+ #[inline]
+ fn wzyz(self) -> UVec4 {
+ UVec4::new(self.w, self.z, self.y, self.z)
+ }
+ #[inline]
+ fn wzyw(self) -> UVec4 {
+ UVec4::new(self.w, self.z, self.y, self.w)
+ }
+ #[inline]
+ fn wzzx(self) -> UVec4 {
+ UVec4::new(self.w, self.z, self.z, self.x)
+ }
+ #[inline]
+ fn wzzy(self) -> UVec4 {
+ UVec4::new(self.w, self.z, self.z, self.y)
+ }
+ #[inline]
+ fn wzzz(self) -> UVec4 {
+ UVec4::new(self.w, self.z, self.z, self.z)
+ }
+ #[inline]
+ fn wzzw(self) -> UVec4 {
+ UVec4::new(self.w, self.z, self.z, self.w)
+ }
+ #[inline]
+ fn wzwx(self) -> UVec4 {
+ UVec4::new(self.w, self.z, self.w, self.x)
+ }
+ #[inline]
+ fn wzwy(self) -> UVec4 {
+ UVec4::new(self.w, self.z, self.w, self.y)
+ }
+ #[inline]
+ fn wzwz(self) -> UVec4 {
+ UVec4::new(self.w, self.z, self.w, self.z)
+ }
+ #[inline]
+ fn wzww(self) -> UVec4 {
+ UVec4::new(self.w, self.z, self.w, self.w)
+ }
+ #[inline]
+ fn wwxx(self) -> UVec4 {
+ UVec4::new(self.w, self.w, self.x, self.x)
+ }
+ #[inline]
+ fn wwxy(self) -> UVec4 {
+ UVec4::new(self.w, self.w, self.x, self.y)
+ }
+ #[inline]
+ fn wwxz(self) -> UVec4 {
+ UVec4::new(self.w, self.w, self.x, self.z)
+ }
+ #[inline]
+ fn wwxw(self) -> UVec4 {
+ UVec4::new(self.w, self.w, self.x, self.w)
+ }
+ #[inline]
+ fn wwyx(self) -> UVec4 {
+ UVec4::new(self.w, self.w, self.y, self.x)
+ }
+ #[inline]
+ fn wwyy(self) -> UVec4 {
+ UVec4::new(self.w, self.w, self.y, self.y)
+ }
+ #[inline]
+ fn wwyz(self) -> UVec4 {
+ UVec4::new(self.w, self.w, self.y, self.z)
+ }
+ #[inline]
+ fn wwyw(self) -> UVec4 {
+ UVec4::new(self.w, self.w, self.y, self.w)
+ }
+ #[inline]
+ fn wwzx(self) -> UVec4 {
+ UVec4::new(self.w, self.w, self.z, self.x)
+ }
+ #[inline]
+ fn wwzy(self) -> UVec4 {
+ UVec4::new(self.w, self.w, self.z, self.y)
+ }
+ #[inline]
+ fn wwzz(self) -> UVec4 {
+ UVec4::new(self.w, self.w, self.z, self.z)
+ }
+ #[inline]
+ fn wwzw(self) -> UVec4 {
+ UVec4::new(self.w, self.w, self.z, self.w)
+ }
+ #[inline]
+ fn wwwx(self) -> UVec4 {
+ UVec4::new(self.w, self.w, self.w, self.x)
+ }
+ #[inline]
+ fn wwwy(self) -> UVec4 {
+ UVec4::new(self.w, self.w, self.w, self.y)
+ }
+ #[inline]
+ fn wwwz(self) -> UVec4 {
+ UVec4::new(self.w, self.w, self.w, self.z)
+ }
+ #[inline]
+ fn wwww(self) -> UVec4 {
+ UVec4::new(self.w, self.w, self.w, self.w)
+ }
+ #[inline]
+ fn xxx(self) -> UVec3 {
+ UVec3::new(self.x, self.x, self.x)
+ }
+ #[inline]
+ fn xxy(self) -> UVec3 {
+ UVec3::new(self.x, self.x, self.y)
+ }
+ #[inline]
+ fn xxz(self) -> UVec3 {
+ UVec3::new(self.x, self.x, self.z)
+ }
+ #[inline]
+ fn xxw(self) -> UVec3 {
+ UVec3::new(self.x, self.x, self.w)
+ }
+ #[inline]
+ fn xyx(self) -> UVec3 {
+ UVec3::new(self.x, self.y, self.x)
+ }
+ #[inline]
+ fn xyy(self) -> UVec3 {
+ UVec3::new(self.x, self.y, self.y)
+ }
+ #[inline]
+ fn xyz(self) -> UVec3 {
+ UVec3::new(self.x, self.y, self.z)
+ }
+ #[inline]
+ fn xyw(self) -> UVec3 {
+ UVec3::new(self.x, self.y, self.w)
+ }
+ #[inline]
+ fn xzx(self) -> UVec3 {
+ UVec3::new(self.x, self.z, self.x)
+ }
+ #[inline]
+ fn xzy(self) -> UVec3 {
+ UVec3::new(self.x, self.z, self.y)
+ }
+ #[inline]
+ fn xzz(self) -> UVec3 {
+ UVec3::new(self.x, self.z, self.z)
+ }
+ #[inline]
+ fn xzw(self) -> UVec3 {
+ UVec3::new(self.x, self.z, self.w)
+ }
+ #[inline]
+ fn xwx(self) -> UVec3 {
+ UVec3::new(self.x, self.w, self.x)
+ }
+ #[inline]
+ fn xwy(self) -> UVec3 {
+ UVec3::new(self.x, self.w, self.y)
+ }
+ #[inline]
+ fn xwz(self) -> UVec3 {
+ UVec3::new(self.x, self.w, self.z)
+ }
+ #[inline]
+ fn xww(self) -> UVec3 {
+ UVec3::new(self.x, self.w, self.w)
+ }
+ #[inline]
+ fn yxx(self) -> UVec3 {
+ UVec3::new(self.y, self.x, self.x)
+ }
+ #[inline]
+ fn yxy(self) -> UVec3 {
+ UVec3::new(self.y, self.x, self.y)
+ }
+ #[inline]
+ fn yxz(self) -> UVec3 {
+ UVec3::new(self.y, self.x, self.z)
+ }
+ #[inline]
+ fn yxw(self) -> UVec3 {
+ UVec3::new(self.y, self.x, self.w)
+ }
+ #[inline]
+ fn yyx(self) -> UVec3 {
+ UVec3::new(self.y, self.y, self.x)
+ }
+ #[inline]
+ fn yyy(self) -> UVec3 {
+ UVec3::new(self.y, self.y, self.y)
+ }
+ #[inline]
+ fn yyz(self) -> UVec3 {
+ UVec3::new(self.y, self.y, self.z)
+ }
+ #[inline]
+ fn yyw(self) -> UVec3 {
+ UVec3::new(self.y, self.y, self.w)
+ }
+ #[inline]
+ fn yzx(self) -> UVec3 {
+ UVec3::new(self.y, self.z, self.x)
+ }
+ #[inline]
+ fn yzy(self) -> UVec3 {
+ UVec3::new(self.y, self.z, self.y)
+ }
+ #[inline]
+ fn yzz(self) -> UVec3 {
+ UVec3::new(self.y, self.z, self.z)
+ }
+ #[inline]
+ fn yzw(self) -> UVec3 {
+ UVec3::new(self.y, self.z, self.w)
+ }
+ #[inline]
+ fn ywx(self) -> UVec3 {
+ UVec3::new(self.y, self.w, self.x)
+ }
+ #[inline]
+ fn ywy(self) -> UVec3 {
+ UVec3::new(self.y, self.w, self.y)
+ }
+ #[inline]
+ fn ywz(self) -> UVec3 {
+ UVec3::new(self.y, self.w, self.z)
+ }
+ #[inline]
+ fn yww(self) -> UVec3 {
+ UVec3::new(self.y, self.w, self.w)
+ }
+ #[inline]
+ fn zxx(self) -> UVec3 {
+ UVec3::new(self.z, self.x, self.x)
+ }
+ #[inline]
+ fn zxy(self) -> UVec3 {
+ UVec3::new(self.z, self.x, self.y)
+ }
+ #[inline]
+ fn zxz(self) -> UVec3 {
+ UVec3::new(self.z, self.x, self.z)
+ }
+ #[inline]
+ fn zxw(self) -> UVec3 {
+ UVec3::new(self.z, self.x, self.w)
+ }
+ #[inline]
+ fn zyx(self) -> UVec3 {
+ UVec3::new(self.z, self.y, self.x)
+ }
+ #[inline]
+ fn zyy(self) -> UVec3 {
+ UVec3::new(self.z, self.y, self.y)
+ }
+ #[inline]
+ fn zyz(self) -> UVec3 {
+ UVec3::new(self.z, self.y, self.z)
+ }
+ #[inline]
+ fn zyw(self) -> UVec3 {
+ UVec3::new(self.z, self.y, self.w)
+ }
+ #[inline]
+ fn zzx(self) -> UVec3 {
+ UVec3::new(self.z, self.z, self.x)
+ }
+ #[inline]
+ fn zzy(self) -> UVec3 {
+ UVec3::new(self.z, self.z, self.y)
+ }
+ #[inline]
+ fn zzz(self) -> UVec3 {
+ UVec3::new(self.z, self.z, self.z)
+ }
+ #[inline]
+ fn zzw(self) -> UVec3 {
+ UVec3::new(self.z, self.z, self.w)
+ }
+ #[inline]
+ fn zwx(self) -> UVec3 {
+ UVec3::new(self.z, self.w, self.x)
+ }
+ #[inline]
+ fn zwy(self) -> UVec3 {
+ UVec3::new(self.z, self.w, self.y)
+ }
+ #[inline]
+ fn zwz(self) -> UVec3 {
+ UVec3::new(self.z, self.w, self.z)
+ }
+ #[inline]
+ fn zww(self) -> UVec3 {
+ UVec3::new(self.z, self.w, self.w)
+ }
+ #[inline]
+ fn wxx(self) -> UVec3 {
+ UVec3::new(self.w, self.x, self.x)
+ }
+ #[inline]
+ fn wxy(self) -> UVec3 {
+ UVec3::new(self.w, self.x, self.y)
+ }
+ #[inline]
+ fn wxz(self) -> UVec3 {
+ UVec3::new(self.w, self.x, self.z)
+ }
+ #[inline]
+ fn wxw(self) -> UVec3 {
+ UVec3::new(self.w, self.x, self.w)
+ }
+ #[inline]
+ fn wyx(self) -> UVec3 {
+ UVec3::new(self.w, self.y, self.x)
+ }
+ #[inline]
+ fn wyy(self) -> UVec3 {
+ UVec3::new(self.w, self.y, self.y)
+ }
+ #[inline]
+ fn wyz(self) -> UVec3 {
+ UVec3::new(self.w, self.y, self.z)
+ }
+ #[inline]
+ fn wyw(self) -> UVec3 {
+ UVec3::new(self.w, self.y, self.w)
+ }
+ #[inline]
+ fn wzx(self) -> UVec3 {
+ UVec3::new(self.w, self.z, self.x)
+ }
+ #[inline]
+ fn wzy(self) -> UVec3 {
+ UVec3::new(self.w, self.z, self.y)
+ }
+ #[inline]
+ fn wzz(self) -> UVec3 {
+ UVec3::new(self.w, self.z, self.z)
+ }
+ #[inline]
+ fn wzw(self) -> UVec3 {
+ UVec3::new(self.w, self.z, self.w)
+ }
+ #[inline]
+ fn wwx(self) -> UVec3 {
+ UVec3::new(self.w, self.w, self.x)
+ }
+ #[inline]
+ fn wwy(self) -> UVec3 {
+ UVec3::new(self.w, self.w, self.y)
+ }
+ #[inline]
+ fn wwz(self) -> UVec3 {
+ UVec3::new(self.w, self.w, self.z)
+ }
+ #[inline]
+ fn www(self) -> UVec3 {
+ UVec3::new(self.w, self.w, self.w)
+ }
+ #[inline]
+ fn xx(self) -> UVec2 {
+ UVec2::new(self.x, self.x)
+ }
+ #[inline]
+ fn xy(self) -> UVec2 {
+ UVec2::new(self.x, self.y)
+ }
+ #[inline]
+ fn xz(self) -> UVec2 {
+ UVec2::new(self.x, self.z)
+ }
+ #[inline]
+ fn xw(self) -> UVec2 {
+ UVec2::new(self.x, self.w)
+ }
+ #[inline]
+ fn yx(self) -> UVec2 {
+ UVec2::new(self.y, self.x)
+ }
+ #[inline]
+ fn yy(self) -> UVec2 {
+ UVec2::new(self.y, self.y)
+ }
+ #[inline]
+ fn yz(self) -> UVec2 {
+ UVec2::new(self.y, self.z)
+ }
+ #[inline]
+ fn yw(self) -> UVec2 {
+ UVec2::new(self.y, self.w)
+ }
+ #[inline]
+ fn zx(self) -> UVec2 {
+ UVec2::new(self.z, self.x)
+ }
+ #[inline]
+ fn zy(self) -> UVec2 {
+ UVec2::new(self.z, self.y)
+ }
+ #[inline]
+ fn zz(self) -> UVec2 {
+ UVec2::new(self.z, self.z)
+ }
+ #[inline]
+ fn zw(self) -> UVec2 {
+ UVec2::new(self.z, self.w)
+ }
+ #[inline]
+ fn wx(self) -> UVec2 {
+ UVec2::new(self.w, self.x)
+ }
+ #[inline]
+ fn wy(self) -> UVec2 {
+ UVec2::new(self.w, self.y)
+ }
+ #[inline]
+ fn wz(self) -> UVec2 {
+ UVec2::new(self.w, self.z)
+ }
+ #[inline]
+ fn ww(self) -> UVec2 {
+ UVec2::new(self.w, self.w)
+ }
+}
diff --git a/src/swizzles/vec2_impl_scalar.rs b/src/swizzles/vec2_impl_scalar.rs
new file mode 100644
index 0000000..9968553
--- /dev/null
+++ b/src/swizzles/vec2_impl_scalar.rs
@@ -0,0 +1,118 @@
+// Generated by swizzlegen. Do not edit.
+
+use super::Vec2Swizzles;
+use crate::{Vec2, Vec3, Vec4};
+
+impl Vec2Swizzles for Vec2 {
+ type Vec3 = Vec3;
+ type Vec4 = Vec4;
+
+ #[inline]
+ fn xxxx(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn xxxy(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn xxyx(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn xxyy(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn xyxx(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn xyxy(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn xyyx(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn xyyy(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn yxxx(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn yxxy(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn yxyx(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn yxyy(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn yyxx(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn yyxy(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn yyyx(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn yyyy(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn xxx(self) -> Vec3 {
+ Vec3::new(self.x, self.x, self.x)
+ }
+ #[inline]
+ fn xxy(self) -> Vec3 {
+ Vec3::new(self.x, self.x, self.y)
+ }
+ #[inline]
+ fn xyx(self) -> Vec3 {
+ Vec3::new(self.x, self.y, self.x)
+ }
+ #[inline]
+ fn xyy(self) -> Vec3 {
+ Vec3::new(self.x, self.y, self.y)
+ }
+ #[inline]
+ fn yxx(self) -> Vec3 {
+ Vec3::new(self.y, self.x, self.x)
+ }
+ #[inline]
+ fn yxy(self) -> Vec3 {
+ Vec3::new(self.y, self.x, self.y)
+ }
+ #[inline]
+ fn yyx(self) -> Vec3 {
+ Vec3::new(self.y, self.y, self.x)
+ }
+ #[inline]
+ fn yyy(self) -> Vec3 {
+ Vec3::new(self.y, self.y, self.y)
+ }
+ #[inline]
+ fn xx(self) -> Self {
+ Self::new(self.x, self.x)
+ }
+ #[inline]
+ fn yx(self) -> Self {
+ Self::new(self.y, self.x)
+ }
+ #[inline]
+ fn yy(self) -> Self {
+ Self::new(self.y, self.y)
+ }
+}
diff --git a/src/swizzles/vec3_impl_scalar.rs b/src/swizzles/vec3_impl_scalar.rs
new file mode 100644
index 0000000..83d7418
--- /dev/null
+++ b/src/swizzles/vec3_impl_scalar.rs
@@ -0,0 +1,474 @@
+// Generated by swizzlegen. Do not edit.
+
+use super::Vec3Swizzles;
+use crate::{Vec2, Vec3, Vec4};
+
+impl Vec3Swizzles for Vec3 {
+ type Vec2 = Vec2;
+ type Vec4 = Vec4;
+
+ #[inline]
+ fn xxxx(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn xxxy(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn xxxz(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.x, self.z)
+ }
+ #[inline]
+ fn xxyx(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn xxyy(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn xxyz(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.y, self.z)
+ }
+ #[inline]
+ fn xxzx(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.z, self.x)
+ }
+ #[inline]
+ fn xxzy(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.z, self.y)
+ }
+ #[inline]
+ fn xxzz(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.z, self.z)
+ }
+ #[inline]
+ fn xyxx(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn xyxy(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn xyxz(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.x, self.z)
+ }
+ #[inline]
+ fn xyyx(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn xyyy(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn xyyz(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.y, self.z)
+ }
+ #[inline]
+ fn xyzx(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.z, self.x)
+ }
+ #[inline]
+ fn xyzy(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.z, self.y)
+ }
+ #[inline]
+ fn xyzz(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.z, self.z)
+ }
+ #[inline]
+ fn xzxx(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.x, self.x)
+ }
+ #[inline]
+ fn xzxy(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.x, self.y)
+ }
+ #[inline]
+ fn xzxz(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.x, self.z)
+ }
+ #[inline]
+ fn xzyx(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.y, self.x)
+ }
+ #[inline]
+ fn xzyy(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.y, self.y)
+ }
+ #[inline]
+ fn xzyz(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.y, self.z)
+ }
+ #[inline]
+ fn xzzx(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.z, self.x)
+ }
+ #[inline]
+ fn xzzy(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.z, self.y)
+ }
+ #[inline]
+ fn xzzz(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.z, self.z)
+ }
+ #[inline]
+ fn yxxx(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn yxxy(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn yxxz(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.x, self.z)
+ }
+ #[inline]
+ fn yxyx(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn yxyy(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn yxyz(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.y, self.z)
+ }
+ #[inline]
+ fn yxzx(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.z, self.x)
+ }
+ #[inline]
+ fn yxzy(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.z, self.y)
+ }
+ #[inline]
+ fn yxzz(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.z, self.z)
+ }
+ #[inline]
+ fn yyxx(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn yyxy(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn yyxz(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.x, self.z)
+ }
+ #[inline]
+ fn yyyx(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn yyyy(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn yyyz(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.y, self.z)
+ }
+ #[inline]
+ fn yyzx(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.z, self.x)
+ }
+ #[inline]
+ fn yyzy(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.z, self.y)
+ }
+ #[inline]
+ fn yyzz(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.z, self.z)
+ }
+ #[inline]
+ fn yzxx(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.x, self.x)
+ }
+ #[inline]
+ fn yzxy(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.x, self.y)
+ }
+ #[inline]
+ fn yzxz(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.x, self.z)
+ }
+ #[inline]
+ fn yzyx(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.y, self.x)
+ }
+ #[inline]
+ fn yzyy(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.y, self.y)
+ }
+ #[inline]
+ fn yzyz(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.y, self.z)
+ }
+ #[inline]
+ fn yzzx(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.z, self.x)
+ }
+ #[inline]
+ fn yzzy(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.z, self.y)
+ }
+ #[inline]
+ fn yzzz(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.z, self.z)
+ }
+ #[inline]
+ fn zxxx(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn zxxy(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn zxxz(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.x, self.z)
+ }
+ #[inline]
+ fn zxyx(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn zxyy(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn zxyz(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.y, self.z)
+ }
+ #[inline]
+ fn zxzx(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.z, self.x)
+ }
+ #[inline]
+ fn zxzy(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.z, self.y)
+ }
+ #[inline]
+ fn zxzz(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.z, self.z)
+ }
+ #[inline]
+ fn zyxx(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn zyxy(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn zyxz(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.x, self.z)
+ }
+ #[inline]
+ fn zyyx(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn zyyy(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn zyyz(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.y, self.z)
+ }
+ #[inline]
+ fn zyzx(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.z, self.x)
+ }
+ #[inline]
+ fn zyzy(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.z, self.y)
+ }
+ #[inline]
+ fn zyzz(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.z, self.z)
+ }
+ #[inline]
+ fn zzxx(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.x, self.x)
+ }
+ #[inline]
+ fn zzxy(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.x, self.y)
+ }
+ #[inline]
+ fn zzxz(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.x, self.z)
+ }
+ #[inline]
+ fn zzyx(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.y, self.x)
+ }
+ #[inline]
+ fn zzyy(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.y, self.y)
+ }
+ #[inline]
+ fn zzyz(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.y, self.z)
+ }
+ #[inline]
+ fn zzzx(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.z, self.x)
+ }
+ #[inline]
+ fn zzzy(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.z, self.y)
+ }
+ #[inline]
+ fn zzzz(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.z, self.z)
+ }
+ #[inline]
+ fn xxx(self) -> Self {
+ Self::new(self.x, self.x, self.x)
+ }
+ #[inline]
+ fn xxy(self) -> Self {
+ Self::new(self.x, self.x, self.y)
+ }
+ #[inline]
+ fn xxz(self) -> Self {
+ Self::new(self.x, self.x, self.z)
+ }
+ #[inline]
+ fn xyx(self) -> Self {
+ Self::new(self.x, self.y, self.x)
+ }
+ #[inline]
+ fn xyy(self) -> Self {
+ Self::new(self.x, self.y, self.y)
+ }
+ #[inline]
+ fn xzx(self) -> Self {
+ Self::new(self.x, self.z, self.x)
+ }
+ #[inline]
+ fn xzy(self) -> Self {
+ Self::new(self.x, self.z, self.y)
+ }
+ #[inline]
+ fn xzz(self) -> Self {
+ Self::new(self.x, self.z, self.z)
+ }
+ #[inline]
+ fn yxx(self) -> Self {
+ Self::new(self.y, self.x, self.x)
+ }
+ #[inline]
+ fn yxy(self) -> Self {
+ Self::new(self.y, self.x, self.y)
+ }
+ #[inline]
+ fn yxz(self) -> Self {
+ Self::new(self.y, self.x, self.z)
+ }
+ #[inline]
+ fn yyx(self) -> Self {
+ Self::new(self.y, self.y, self.x)
+ }
+ #[inline]
+ fn yyy(self) -> Self {
+ Self::new(self.y, self.y, self.y)
+ }
+ #[inline]
+ fn yyz(self) -> Self {
+ Self::new(self.y, self.y, self.z)
+ }
+ #[inline]
+ fn yzx(self) -> Self {
+ Self::new(self.y, self.z, self.x)
+ }
+ #[inline]
+ fn yzy(self) -> Self {
+ Self::new(self.y, self.z, self.y)
+ }
+ #[inline]
+ fn yzz(self) -> Self {
+ Self::new(self.y, self.z, self.z)
+ }
+ #[inline]
+ fn zxx(self) -> Self {
+ Self::new(self.z, self.x, self.x)
+ }
+ #[inline]
+ fn zxy(self) -> Self {
+ Self::new(self.z, self.x, self.y)
+ }
+ #[inline]
+ fn zxz(self) -> Self {
+ Self::new(self.z, self.x, self.z)
+ }
+ #[inline]
+ fn zyx(self) -> Self {
+ Self::new(self.z, self.y, self.x)
+ }
+ #[inline]
+ fn zyy(self) -> Self {
+ Self::new(self.z, self.y, self.y)
+ }
+ #[inline]
+ fn zyz(self) -> Self {
+ Self::new(self.z, self.y, self.z)
+ }
+ #[inline]
+ fn zzx(self) -> Self {
+ Self::new(self.z, self.z, self.x)
+ }
+ #[inline]
+ fn zzy(self) -> Self {
+ Self::new(self.z, self.z, self.y)
+ }
+ #[inline]
+ fn zzz(self) -> Self {
+ Self::new(self.z, self.z, self.z)
+ }
+ #[inline]
+ fn xx(self) -> Vec2 {
+ Vec2::new(self.x, self.x)
+ }
+ #[inline]
+ fn xy(self) -> Vec2 {
+ Vec2::new(self.x, self.y)
+ }
+ #[inline]
+ fn xz(self) -> Vec2 {
+ Vec2::new(self.x, self.z)
+ }
+ #[inline]
+ fn yx(self) -> Vec2 {
+ Vec2::new(self.y, self.x)
+ }
+ #[inline]
+ fn yy(self) -> Vec2 {
+ Vec2::new(self.y, self.y)
+ }
+ #[inline]
+ fn yz(self) -> Vec2 {
+ Vec2::new(self.y, self.z)
+ }
+ #[inline]
+ fn zx(self) -> Vec2 {
+ Vec2::new(self.z, self.x)
+ }
+ #[inline]
+ fn zy(self) -> Vec2 {
+ Vec2::new(self.z, self.y)
+ }
+ #[inline]
+ fn zz(self) -> Vec2 {
+ Vec2::new(self.z, self.z)
+ }
+}
diff --git a/src/swizzles/vec3a_impl_scalar.rs b/src/swizzles/vec3a_impl_scalar.rs
new file mode 100644
index 0000000..a1aaac3
--- /dev/null
+++ b/src/swizzles/vec3a_impl_scalar.rs
@@ -0,0 +1,474 @@
+// Generated by swizzlegen. Do not edit.
+
+use super::Vec3Swizzles;
+use crate::{Vec2, Vec3A, Vec4};
+
+impl Vec3Swizzles for Vec3A {
+ type Vec2 = Vec2;
+ type Vec4 = Vec4;
+
+ #[inline]
+ fn xxxx(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn xxxy(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn xxxz(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.x, self.z)
+ }
+ #[inline]
+ fn xxyx(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn xxyy(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn xxyz(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.y, self.z)
+ }
+ #[inline]
+ fn xxzx(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.z, self.x)
+ }
+ #[inline]
+ fn xxzy(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.z, self.y)
+ }
+ #[inline]
+ fn xxzz(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.z, self.z)
+ }
+ #[inline]
+ fn xyxx(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn xyxy(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn xyxz(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.x, self.z)
+ }
+ #[inline]
+ fn xyyx(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn xyyy(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn xyyz(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.y, self.z)
+ }
+ #[inline]
+ fn xyzx(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.z, self.x)
+ }
+ #[inline]
+ fn xyzy(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.z, self.y)
+ }
+ #[inline]
+ fn xyzz(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.z, self.z)
+ }
+ #[inline]
+ fn xzxx(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.x, self.x)
+ }
+ #[inline]
+ fn xzxy(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.x, self.y)
+ }
+ #[inline]
+ fn xzxz(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.x, self.z)
+ }
+ #[inline]
+ fn xzyx(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.y, self.x)
+ }
+ #[inline]
+ fn xzyy(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.y, self.y)
+ }
+ #[inline]
+ fn xzyz(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.y, self.z)
+ }
+ #[inline]
+ fn xzzx(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.z, self.x)
+ }
+ #[inline]
+ fn xzzy(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.z, self.y)
+ }
+ #[inline]
+ fn xzzz(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.z, self.z)
+ }
+ #[inline]
+ fn yxxx(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn yxxy(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn yxxz(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.x, self.z)
+ }
+ #[inline]
+ fn yxyx(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn yxyy(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn yxyz(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.y, self.z)
+ }
+ #[inline]
+ fn yxzx(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.z, self.x)
+ }
+ #[inline]
+ fn yxzy(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.z, self.y)
+ }
+ #[inline]
+ fn yxzz(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.z, self.z)
+ }
+ #[inline]
+ fn yyxx(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn yyxy(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn yyxz(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.x, self.z)
+ }
+ #[inline]
+ fn yyyx(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn yyyy(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn yyyz(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.y, self.z)
+ }
+ #[inline]
+ fn yyzx(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.z, self.x)
+ }
+ #[inline]
+ fn yyzy(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.z, self.y)
+ }
+ #[inline]
+ fn yyzz(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.z, self.z)
+ }
+ #[inline]
+ fn yzxx(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.x, self.x)
+ }
+ #[inline]
+ fn yzxy(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.x, self.y)
+ }
+ #[inline]
+ fn yzxz(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.x, self.z)
+ }
+ #[inline]
+ fn yzyx(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.y, self.x)
+ }
+ #[inline]
+ fn yzyy(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.y, self.y)
+ }
+ #[inline]
+ fn yzyz(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.y, self.z)
+ }
+ #[inline]
+ fn yzzx(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.z, self.x)
+ }
+ #[inline]
+ fn yzzy(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.z, self.y)
+ }
+ #[inline]
+ fn yzzz(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.z, self.z)
+ }
+ #[inline]
+ fn zxxx(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn zxxy(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn zxxz(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.x, self.z)
+ }
+ #[inline]
+ fn zxyx(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn zxyy(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn zxyz(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.y, self.z)
+ }
+ #[inline]
+ fn zxzx(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.z, self.x)
+ }
+ #[inline]
+ fn zxzy(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.z, self.y)
+ }
+ #[inline]
+ fn zxzz(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.z, self.z)
+ }
+ #[inline]
+ fn zyxx(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn zyxy(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn zyxz(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.x, self.z)
+ }
+ #[inline]
+ fn zyyx(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn zyyy(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn zyyz(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.y, self.z)
+ }
+ #[inline]
+ fn zyzx(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.z, self.x)
+ }
+ #[inline]
+ fn zyzy(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.z, self.y)
+ }
+ #[inline]
+ fn zyzz(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.z, self.z)
+ }
+ #[inline]
+ fn zzxx(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.x, self.x)
+ }
+ #[inline]
+ fn zzxy(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.x, self.y)
+ }
+ #[inline]
+ fn zzxz(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.x, self.z)
+ }
+ #[inline]
+ fn zzyx(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.y, self.x)
+ }
+ #[inline]
+ fn zzyy(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.y, self.y)
+ }
+ #[inline]
+ fn zzyz(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.y, self.z)
+ }
+ #[inline]
+ fn zzzx(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.z, self.x)
+ }
+ #[inline]
+ fn zzzy(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.z, self.y)
+ }
+ #[inline]
+ fn zzzz(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.z, self.z)
+ }
+ #[inline]
+ fn xxx(self) -> Self {
+ Self::new(self.x, self.x, self.x)
+ }
+ #[inline]
+ fn xxy(self) -> Self {
+ Self::new(self.x, self.x, self.y)
+ }
+ #[inline]
+ fn xxz(self) -> Self {
+ Self::new(self.x, self.x, self.z)
+ }
+ #[inline]
+ fn xyx(self) -> Self {
+ Self::new(self.x, self.y, self.x)
+ }
+ #[inline]
+ fn xyy(self) -> Self {
+ Self::new(self.x, self.y, self.y)
+ }
+ #[inline]
+ fn xzx(self) -> Self {
+ Self::new(self.x, self.z, self.x)
+ }
+ #[inline]
+ fn xzy(self) -> Self {
+ Self::new(self.x, self.z, self.y)
+ }
+ #[inline]
+ fn xzz(self) -> Self {
+ Self::new(self.x, self.z, self.z)
+ }
+ #[inline]
+ fn yxx(self) -> Self {
+ Self::new(self.y, self.x, self.x)
+ }
+ #[inline]
+ fn yxy(self) -> Self {
+ Self::new(self.y, self.x, self.y)
+ }
+ #[inline]
+ fn yxz(self) -> Self {
+ Self::new(self.y, self.x, self.z)
+ }
+ #[inline]
+ fn yyx(self) -> Self {
+ Self::new(self.y, self.y, self.x)
+ }
+ #[inline]
+ fn yyy(self) -> Self {
+ Self::new(self.y, self.y, self.y)
+ }
+ #[inline]
+ fn yyz(self) -> Self {
+ Self::new(self.y, self.y, self.z)
+ }
+ #[inline]
+ fn yzx(self) -> Self {
+ Self::new(self.y, self.z, self.x)
+ }
+ #[inline]
+ fn yzy(self) -> Self {
+ Self::new(self.y, self.z, self.y)
+ }
+ #[inline]
+ fn yzz(self) -> Self {
+ Self::new(self.y, self.z, self.z)
+ }
+ #[inline]
+ fn zxx(self) -> Self {
+ Self::new(self.z, self.x, self.x)
+ }
+ #[inline]
+ fn zxy(self) -> Self {
+ Self::new(self.z, self.x, self.y)
+ }
+ #[inline]
+ fn zxz(self) -> Self {
+ Self::new(self.z, self.x, self.z)
+ }
+ #[inline]
+ fn zyx(self) -> Self {
+ Self::new(self.z, self.y, self.x)
+ }
+ #[inline]
+ fn zyy(self) -> Self {
+ Self::new(self.z, self.y, self.y)
+ }
+ #[inline]
+ fn zyz(self) -> Self {
+ Self::new(self.z, self.y, self.z)
+ }
+ #[inline]
+ fn zzx(self) -> Self {
+ Self::new(self.z, self.z, self.x)
+ }
+ #[inline]
+ fn zzy(self) -> Self {
+ Self::new(self.z, self.z, self.y)
+ }
+ #[inline]
+ fn zzz(self) -> Self {
+ Self::new(self.z, self.z, self.z)
+ }
+ #[inline]
+ fn xx(self) -> Vec2 {
+ Vec2::new(self.x, self.x)
+ }
+ #[inline]
+ fn xy(self) -> Vec2 {
+ Vec2::new(self.x, self.y)
+ }
+ #[inline]
+ fn xz(self) -> Vec2 {
+ Vec2::new(self.x, self.z)
+ }
+ #[inline]
+ fn yx(self) -> Vec2 {
+ Vec2::new(self.y, self.x)
+ }
+ #[inline]
+ fn yy(self) -> Vec2 {
+ Vec2::new(self.y, self.y)
+ }
+ #[inline]
+ fn yz(self) -> Vec2 {
+ Vec2::new(self.y, self.z)
+ }
+ #[inline]
+ fn zx(self) -> Vec2 {
+ Vec2::new(self.z, self.x)
+ }
+ #[inline]
+ fn zy(self) -> Vec2 {
+ Vec2::new(self.z, self.y)
+ }
+ #[inline]
+ fn zz(self) -> Vec2 {
+ Vec2::new(self.z, self.z)
+ }
+}
diff --git a/src/swizzles/vec3a_impl_sse2.rs b/src/swizzles/vec3a_impl_sse2.rs
new file mode 100644
index 0000000..275ad70
--- /dev/null
+++ b/src/swizzles/vec3a_impl_sse2.rs
@@ -0,0 +1,479 @@
+// Generated by swizzlegen. Do not edit.
+
+use super::Vec3Swizzles;
+use crate::{Vec2, Vec3A, Vec4, XY};
+
+#[cfg(target_arch = "x86")]
+use core::arch::x86::*;
+#[cfg(target_arch = "x86_64")]
+use core::arch::x86_64::*;
+
+impl Vec3Swizzles for Vec3A {
+ type Vec2 = Vec2;
+ type Vec4 = Vec4;
+
+ #[inline]
+ fn xxxx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_00_00_00)) }
+ }
+ #[inline]
+ fn xxxy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_00_00_00)) }
+ }
+ #[inline]
+ fn xxxz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_00_00_00)) }
+ }
+ #[inline]
+ fn xxyx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_01_00_00)) }
+ }
+ #[inline]
+ fn xxyy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_01_00_00)) }
+ }
+ #[inline]
+ fn xxyz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_01_00_00)) }
+ }
+ #[inline]
+ fn xxzx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_10_00_00)) }
+ }
+ #[inline]
+ fn xxzy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_10_00_00)) }
+ }
+ #[inline]
+ fn xxzz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_10_00_00)) }
+ }
+ #[inline]
+ fn xyxx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_00_01_00)) }
+ }
+ #[inline]
+ fn xyxy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_00_01_00)) }
+ }
+ #[inline]
+ fn xyxz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_00_01_00)) }
+ }
+ #[inline]
+ fn xyyx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_01_01_00)) }
+ }
+ #[inline]
+ fn xyyy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_01_01_00)) }
+ }
+ #[inline]
+ fn xyyz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_01_01_00)) }
+ }
+ #[inline]
+ fn xyzx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_10_01_00)) }
+ }
+ #[inline]
+ fn xyzy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_10_01_00)) }
+ }
+ #[inline]
+ fn xyzz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_10_01_00)) }
+ }
+ #[inline]
+ fn xzxx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_00_10_00)) }
+ }
+ #[inline]
+ fn xzxy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_00_10_00)) }
+ }
+ #[inline]
+ fn xzxz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_00_10_00)) }
+ }
+ #[inline]
+ fn xzyx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_01_10_00)) }
+ }
+ #[inline]
+ fn xzyy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_01_10_00)) }
+ }
+ #[inline]
+ fn xzyz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_01_10_00)) }
+ }
+ #[inline]
+ fn xzzx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_10_10_00)) }
+ }
+ #[inline]
+ fn xzzy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_10_10_00)) }
+ }
+ #[inline]
+ fn xzzz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_10_10_00)) }
+ }
+ #[inline]
+ fn yxxx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_00_00_01)) }
+ }
+ #[inline]
+ fn yxxy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_00_00_01)) }
+ }
+ #[inline]
+ fn yxxz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_00_00_01)) }
+ }
+ #[inline]
+ fn yxyx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_01_00_01)) }
+ }
+ #[inline]
+ fn yxyy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_01_00_01)) }
+ }
+ #[inline]
+ fn yxyz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_01_00_01)) }
+ }
+ #[inline]
+ fn yxzx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_10_00_01)) }
+ }
+ #[inline]
+ fn yxzy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_10_00_01)) }
+ }
+ #[inline]
+ fn yxzz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_10_00_01)) }
+ }
+ #[inline]
+ fn yyxx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_00_01_01)) }
+ }
+ #[inline]
+ fn yyxy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_00_01_01)) }
+ }
+ #[inline]
+ fn yyxz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_00_01_01)) }
+ }
+ #[inline]
+ fn yyyx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_01_01_01)) }
+ }
+ #[inline]
+ fn yyyy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_01_01_01)) }
+ }
+ #[inline]
+ fn yyyz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_01_01_01)) }
+ }
+ #[inline]
+ fn yyzx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_10_01_01)) }
+ }
+ #[inline]
+ fn yyzy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_10_01_01)) }
+ }
+ #[inline]
+ fn yyzz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_10_01_01)) }
+ }
+ #[inline]
+ fn yzxx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_00_10_01)) }
+ }
+ #[inline]
+ fn yzxy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_00_10_01)) }
+ }
+ #[inline]
+ fn yzxz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_00_10_01)) }
+ }
+ #[inline]
+ fn yzyx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_01_10_01)) }
+ }
+ #[inline]
+ fn yzyy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_01_10_01)) }
+ }
+ #[inline]
+ fn yzyz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_01_10_01)) }
+ }
+ #[inline]
+ fn yzzx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_10_10_01)) }
+ }
+ #[inline]
+ fn yzzy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_10_10_01)) }
+ }
+ #[inline]
+ fn yzzz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_10_10_01)) }
+ }
+ #[inline]
+ fn zxxx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_00_00_10)) }
+ }
+ #[inline]
+ fn zxxy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_00_00_10)) }
+ }
+ #[inline]
+ fn zxxz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_00_00_10)) }
+ }
+ #[inline]
+ fn zxyx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_01_00_10)) }
+ }
+ #[inline]
+ fn zxyy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_01_00_10)) }
+ }
+ #[inline]
+ fn zxyz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_01_00_10)) }
+ }
+ #[inline]
+ fn zxzx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_10_00_10)) }
+ }
+ #[inline]
+ fn zxzy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_10_00_10)) }
+ }
+ #[inline]
+ fn zxzz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_10_00_10)) }
+ }
+ #[inline]
+ fn zyxx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_00_01_10)) }
+ }
+ #[inline]
+ fn zyxy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_00_01_10)) }
+ }
+ #[inline]
+ fn zyxz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_00_01_10)) }
+ }
+ #[inline]
+ fn zyyx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_01_01_10)) }
+ }
+ #[inline]
+ fn zyyy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_01_01_10)) }
+ }
+ #[inline]
+ fn zyyz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_01_01_10)) }
+ }
+ #[inline]
+ fn zyzx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_10_01_10)) }
+ }
+ #[inline]
+ fn zyzy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_10_01_10)) }
+ }
+ #[inline]
+ fn zyzz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_10_01_10)) }
+ }
+ #[inline]
+ fn zzxx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_00_10_10)) }
+ }
+ #[inline]
+ fn zzxy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_00_10_10)) }
+ }
+ #[inline]
+ fn zzxz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_00_10_10)) }
+ }
+ #[inline]
+ fn zzyx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_01_10_10)) }
+ }
+ #[inline]
+ fn zzyy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_01_10_10)) }
+ }
+ #[inline]
+ fn zzyz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_01_10_10)) }
+ }
+ #[inline]
+ fn zzzx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_10_10_10)) }
+ }
+ #[inline]
+ fn zzzy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_10_10_10)) }
+ }
+ #[inline]
+ fn zzzz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_10_10_10)) }
+ }
+ #[inline]
+ fn xxx(self) -> Self {
+ unsafe { Self(_mm_shuffle_ps(self.0, self.0, 0b00_00_00_00)) }
+ }
+ #[inline]
+ fn xxy(self) -> Self {
+ unsafe { Self(_mm_shuffle_ps(self.0, self.0, 0b00_01_00_00)) }
+ }
+ #[inline]
+ fn xxz(self) -> Self {
+ unsafe { Self(_mm_shuffle_ps(self.0, self.0, 0b00_10_00_00)) }
+ }
+ #[inline]
+ fn xyx(self) -> Self {
+ unsafe { Self(_mm_shuffle_ps(self.0, self.0, 0b00_00_01_00)) }
+ }
+ #[inline]
+ fn xyy(self) -> Self {
+ unsafe { Self(_mm_shuffle_ps(self.0, self.0, 0b00_01_01_00)) }
+ }
+ #[inline]
+ fn xzx(self) -> Self {
+ unsafe { Self(_mm_shuffle_ps(self.0, self.0, 0b00_00_10_00)) }
+ }
+ #[inline]
+ fn xzy(self) -> Self {
+ unsafe { Self(_mm_shuffle_ps(self.0, self.0, 0b00_01_10_00)) }
+ }
+ #[inline]
+ fn xzz(self) -> Self {
+ unsafe { Self(_mm_shuffle_ps(self.0, self.0, 0b00_10_10_00)) }
+ }
+ #[inline]
+ fn yxx(self) -> Self {
+ unsafe { Self(_mm_shuffle_ps(self.0, self.0, 0b00_00_00_01)) }
+ }
+ #[inline]
+ fn yxy(self) -> Self {
+ unsafe { Self(_mm_shuffle_ps(self.0, self.0, 0b00_01_00_01)) }
+ }
+ #[inline]
+ fn yxz(self) -> Self {
+ unsafe { Self(_mm_shuffle_ps(self.0, self.0, 0b00_10_00_01)) }
+ }
+ #[inline]
+ fn yyx(self) -> Self {
+ unsafe { Self(_mm_shuffle_ps(self.0, self.0, 0b00_00_01_01)) }
+ }
+ #[inline]
+ fn yyy(self) -> Self {
+ unsafe { Self(_mm_shuffle_ps(self.0, self.0, 0b00_01_01_01)) }
+ }
+ #[inline]
+ fn yyz(self) -> Self {
+ unsafe { Self(_mm_shuffle_ps(self.0, self.0, 0b00_10_01_01)) }
+ }
+ #[inline]
+ fn yzx(self) -> Self {
+ unsafe { Self(_mm_shuffle_ps(self.0, self.0, 0b00_00_10_01)) }
+ }
+ #[inline]
+ fn yzy(self) -> Self {
+ unsafe { Self(_mm_shuffle_ps(self.0, self.0, 0b00_01_10_01)) }
+ }
+ #[inline]
+ fn yzz(self) -> Self {
+ unsafe { Self(_mm_shuffle_ps(self.0, self.0, 0b00_10_10_01)) }
+ }
+ #[inline]
+ fn zxx(self) -> Self {
+ unsafe { Self(_mm_shuffle_ps(self.0, self.0, 0b00_00_00_10)) }
+ }
+ #[inline]
+ fn zxy(self) -> Self {
+ unsafe { Self(_mm_shuffle_ps(self.0, self.0, 0b00_01_00_10)) }
+ }
+ #[inline]
+ fn zxz(self) -> Self {
+ unsafe { Self(_mm_shuffle_ps(self.0, self.0, 0b00_10_00_10)) }
+ }
+ #[inline]
+ fn zyx(self) -> Self {
+ unsafe { Self(_mm_shuffle_ps(self.0, self.0, 0b00_00_01_10)) }
+ }
+ #[inline]
+ fn zyy(self) -> Self {
+ unsafe { Self(_mm_shuffle_ps(self.0, self.0, 0b00_01_01_10)) }
+ }
+ #[inline]
+ fn zyz(self) -> Self {
+ unsafe { Self(_mm_shuffle_ps(self.0, self.0, 0b00_10_01_10)) }
+ }
+ #[inline]
+ fn zzx(self) -> Self {
+ unsafe { Self(_mm_shuffle_ps(self.0, self.0, 0b00_00_10_10)) }
+ }
+ #[inline]
+ fn zzy(self) -> Self {
+ unsafe { Self(_mm_shuffle_ps(self.0, self.0, 0b00_01_10_10)) }
+ }
+ #[inline]
+ fn zzz(self) -> Self {
+ unsafe { Self(_mm_shuffle_ps(self.0, self.0, 0b00_10_10_10)) }
+ }
+ #[inline]
+ fn xx(self) -> Vec2 {
+ unsafe { Vec2(XY::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_00_00))) }
+ }
+ #[inline]
+ fn xy(self) -> Vec2 {
+ unsafe { Vec2(XY::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_01_00))) }
+ }
+ #[inline]
+ fn xz(self) -> Vec2 {
+ unsafe { Vec2(XY::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_10_00))) }
+ }
+ #[inline]
+ fn yx(self) -> Vec2 {
+ unsafe { Vec2(XY::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_00_01))) }
+ }
+ #[inline]
+ fn yy(self) -> Vec2 {
+ unsafe { Vec2(XY::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_01_01))) }
+ }
+ #[inline]
+ fn yz(self) -> Vec2 {
+ unsafe { Vec2(XY::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_10_01))) }
+ }
+ #[inline]
+ fn zx(self) -> Vec2 {
+ unsafe { Vec2(XY::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_00_10))) }
+ }
+ #[inline]
+ fn zy(self) -> Vec2 {
+ unsafe { Vec2(XY::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_01_10))) }
+ }
+ #[inline]
+ fn zz(self) -> Vec2 {
+ unsafe { Vec2(XY::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_10_10))) }
+ }
+}
diff --git a/src/swizzles/vec3a_impl_wasm32.rs b/src/swizzles/vec3a_impl_wasm32.rs
new file mode 100644
index 0000000..fa5e01a
--- /dev/null
+++ b/src/swizzles/vec3a_impl_wasm32.rs
@@ -0,0 +1,476 @@
+// Generated by swizzlegen. Do not edit.
+
+use super::Vec3Swizzles;
+use crate::{Vec2, Vec3A, Vec4, XY};
+
+use core::arch::wasm32::*;
+
+impl Vec3Swizzles for Vec3A {
+ type Vec2 = Vec2;
+ type Vec4 = Vec4;
+
+ #[inline]
+ fn xxxx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 0, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn xxxy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 0, 4, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn xxxz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 0, 4, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn xxyx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 0, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn xxyy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 0, 5, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn xxyz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 0, 5, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn xxzx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 0, 6, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn xxzy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 0, 6, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn xxzz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 0, 6, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn xyxx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 1, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn xyxy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 1, 4, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn xyxz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 1, 4, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn xyyx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 1, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn xyyy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 1, 5, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn xyyz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 1, 5, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn xyzx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 1, 6, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn xyzy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 1, 6, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn xyzz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 1, 6, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn xzxx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 2, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn xzxy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 2, 4, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn xzxz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 2, 4, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn xzyx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 2, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn xzyy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 2, 5, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn xzyz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 2, 5, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn xzzx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 2, 6, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn xzzy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 2, 6, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn xzzz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 2, 6, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn yxxx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 0, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn yxxy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 0, 4, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn yxxz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 0, 4, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn yxyx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 0, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn yxyy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 0, 5, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn yxyz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 0, 5, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn yxzx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 0, 6, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn yxzy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 0, 6, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn yxzz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 0, 6, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn yyxx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 1, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn yyxy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 1, 4, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn yyxz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 1, 4, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn yyyx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 1, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn yyyy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 1, 5, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn yyyz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 1, 5, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn yyzx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 1, 6, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn yyzy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 1, 6, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn yyzz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 1, 6, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn yzxx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 2, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn yzxy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 2, 4, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn yzxz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 2, 4, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn yzyx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 2, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn yzyy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 2, 5, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn yzyz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 2, 5, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn yzzx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 2, 6, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn yzzy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 2, 6, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn yzzz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 2, 6, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn zxxx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 0, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zxxy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 0, 4, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn zxxz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 0, 4, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn zxyx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 0, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zxyy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 0, 5, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn zxyz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 0, 5, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn zxzx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 0, 6, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zxzy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 0, 6, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn zxzz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 0, 6, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn zyxx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 1, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zyxy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 1, 4, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn zyxz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 1, 4, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn zyyx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 1, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zyyy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 1, 5, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn zyyz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 1, 5, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn zyzx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 1, 6, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zyzy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 1, 6, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn zyzz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 1, 6, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn zzxx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 2, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zzxy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 2, 4, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn zzxz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 2, 4, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn zzyx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 2, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zzyy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 2, 5, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn zzyz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 2, 5, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn zzzx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 2, 6, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zzzy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 2, 6, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn zzzz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 2, 6, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn xxx(self) -> Self {
+ Self(i32x4_shuffle::<0, 0, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn xxy(self) -> Self {
+ Self(i32x4_shuffle::<0, 0, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn xxz(self) -> Self {
+ Self(i32x4_shuffle::<0, 0, 6, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn xyx(self) -> Self {
+ Self(i32x4_shuffle::<0, 1, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn xyy(self) -> Self {
+ Self(i32x4_shuffle::<0, 1, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn xzx(self) -> Self {
+ Self(i32x4_shuffle::<0, 2, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn xzy(self) -> Self {
+ Self(i32x4_shuffle::<0, 2, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn xzz(self) -> Self {
+ Self(i32x4_shuffle::<0, 2, 6, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn yxx(self) -> Self {
+ Self(i32x4_shuffle::<1, 0, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn yxy(self) -> Self {
+ Self(i32x4_shuffle::<1, 0, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn yxz(self) -> Self {
+ Self(i32x4_shuffle::<1, 0, 6, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn yyx(self) -> Self {
+ Self(i32x4_shuffle::<1, 1, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn yyy(self) -> Self {
+ Self(i32x4_shuffle::<1, 1, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn yyz(self) -> Self {
+ Self(i32x4_shuffle::<1, 1, 6, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn yzx(self) -> Self {
+ Self(i32x4_shuffle::<1, 2, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn yzy(self) -> Self {
+ Self(i32x4_shuffle::<1, 2, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn yzz(self) -> Self {
+ Self(i32x4_shuffle::<1, 2, 6, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zxx(self) -> Self {
+ Self(i32x4_shuffle::<2, 0, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zxy(self) -> Self {
+ Self(i32x4_shuffle::<2, 0, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zxz(self) -> Self {
+ Self(i32x4_shuffle::<2, 0, 6, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zyx(self) -> Self {
+ Self(i32x4_shuffle::<2, 1, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zyy(self) -> Self {
+ Self(i32x4_shuffle::<2, 1, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zyz(self) -> Self {
+ Self(i32x4_shuffle::<2, 1, 6, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zzx(self) -> Self {
+ Self(i32x4_shuffle::<2, 2, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zzy(self) -> Self {
+ Self(i32x4_shuffle::<2, 2, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zzz(self) -> Self {
+ Self(i32x4_shuffle::<2, 2, 6, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn xx(self) -> Vec2 {
+ Vec2(XY::from(i32x4_shuffle::<0, 0, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn xy(self) -> Vec2 {
+ Vec2(XY::from(i32x4_shuffle::<0, 1, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn xz(self) -> Vec2 {
+ Vec2(XY::from(i32x4_shuffle::<0, 2, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn yx(self) -> Vec2 {
+ Vec2(XY::from(i32x4_shuffle::<1, 0, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn yy(self) -> Vec2 {
+ Vec2(XY::from(i32x4_shuffle::<1, 1, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn yz(self) -> Vec2 {
+ Vec2(XY::from(i32x4_shuffle::<1, 2, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn zx(self) -> Vec2 {
+ Vec2(XY::from(i32x4_shuffle::<2, 0, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn zy(self) -> Vec2 {
+ Vec2(XY::from(i32x4_shuffle::<2, 1, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn zz(self) -> Vec2 {
+ Vec2(XY::from(i32x4_shuffle::<2, 2, 4, 4>(self.0, self.0)))
+ }
+}
diff --git a/src/swizzles/vec4_impl_scalar.rs b/src/swizzles/vec4_impl_scalar.rs
new file mode 100644
index 0000000..19f97a7
--- /dev/null
+++ b/src/swizzles/vec4_impl_scalar.rs
@@ -0,0 +1,1350 @@
+// Generated by swizzlegen. Do not edit.
+
+use super::Vec4Swizzles;
+use crate::{Vec2, Vec3, Vec4};
+
+impl Vec4Swizzles for Vec4 {
+ type Vec2 = Vec2;
+ type Vec3 = Vec3;
+
+ #[inline]
+ fn xxxx(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn xxxy(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn xxxz(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.x, self.z)
+ }
+ #[inline]
+ fn xxxw(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.x, self.w)
+ }
+ #[inline]
+ fn xxyx(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn xxyy(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn xxyz(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.y, self.z)
+ }
+ #[inline]
+ fn xxyw(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.y, self.w)
+ }
+ #[inline]
+ fn xxzx(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.z, self.x)
+ }
+ #[inline]
+ fn xxzy(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.z, self.y)
+ }
+ #[inline]
+ fn xxzz(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.z, self.z)
+ }
+ #[inline]
+ fn xxzw(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.z, self.w)
+ }
+ #[inline]
+ fn xxwx(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.w, self.x)
+ }
+ #[inline]
+ fn xxwy(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.w, self.y)
+ }
+ #[inline]
+ fn xxwz(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.w, self.z)
+ }
+ #[inline]
+ fn xxww(self) -> Vec4 {
+ Vec4::new(self.x, self.x, self.w, self.w)
+ }
+ #[inline]
+ fn xyxx(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn xyxy(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn xyxz(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.x, self.z)
+ }
+ #[inline]
+ fn xyxw(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.x, self.w)
+ }
+ #[inline]
+ fn xyyx(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn xyyy(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn xyyz(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.y, self.z)
+ }
+ #[inline]
+ fn xyyw(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.y, self.w)
+ }
+ #[inline]
+ fn xyzx(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.z, self.x)
+ }
+ #[inline]
+ fn xyzy(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.z, self.y)
+ }
+ #[inline]
+ fn xyzz(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.z, self.z)
+ }
+ #[inline]
+ fn xywx(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.w, self.x)
+ }
+ #[inline]
+ fn xywy(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.w, self.y)
+ }
+ #[inline]
+ fn xywz(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.w, self.z)
+ }
+ #[inline]
+ fn xyww(self) -> Vec4 {
+ Vec4::new(self.x, self.y, self.w, self.w)
+ }
+ #[inline]
+ fn xzxx(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.x, self.x)
+ }
+ #[inline]
+ fn xzxy(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.x, self.y)
+ }
+ #[inline]
+ fn xzxz(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.x, self.z)
+ }
+ #[inline]
+ fn xzxw(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.x, self.w)
+ }
+ #[inline]
+ fn xzyx(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.y, self.x)
+ }
+ #[inline]
+ fn xzyy(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.y, self.y)
+ }
+ #[inline]
+ fn xzyz(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.y, self.z)
+ }
+ #[inline]
+ fn xzyw(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.y, self.w)
+ }
+ #[inline]
+ fn xzzx(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.z, self.x)
+ }
+ #[inline]
+ fn xzzy(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.z, self.y)
+ }
+ #[inline]
+ fn xzzz(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.z, self.z)
+ }
+ #[inline]
+ fn xzzw(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.z, self.w)
+ }
+ #[inline]
+ fn xzwx(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.w, self.x)
+ }
+ #[inline]
+ fn xzwy(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.w, self.y)
+ }
+ #[inline]
+ fn xzwz(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.w, self.z)
+ }
+ #[inline]
+ fn xzww(self) -> Vec4 {
+ Vec4::new(self.x, self.z, self.w, self.w)
+ }
+ #[inline]
+ fn xwxx(self) -> Vec4 {
+ Vec4::new(self.x, self.w, self.x, self.x)
+ }
+ #[inline]
+ fn xwxy(self) -> Vec4 {
+ Vec4::new(self.x, self.w, self.x, self.y)
+ }
+ #[inline]
+ fn xwxz(self) -> Vec4 {
+ Vec4::new(self.x, self.w, self.x, self.z)
+ }
+ #[inline]
+ fn xwxw(self) -> Vec4 {
+ Vec4::new(self.x, self.w, self.x, self.w)
+ }
+ #[inline]
+ fn xwyx(self) -> Vec4 {
+ Vec4::new(self.x, self.w, self.y, self.x)
+ }
+ #[inline]
+ fn xwyy(self) -> Vec4 {
+ Vec4::new(self.x, self.w, self.y, self.y)
+ }
+ #[inline]
+ fn xwyz(self) -> Vec4 {
+ Vec4::new(self.x, self.w, self.y, self.z)
+ }
+ #[inline]
+ fn xwyw(self) -> Vec4 {
+ Vec4::new(self.x, self.w, self.y, self.w)
+ }
+ #[inline]
+ fn xwzx(self) -> Vec4 {
+ Vec4::new(self.x, self.w, self.z, self.x)
+ }
+ #[inline]
+ fn xwzy(self) -> Vec4 {
+ Vec4::new(self.x, self.w, self.z, self.y)
+ }
+ #[inline]
+ fn xwzz(self) -> Vec4 {
+ Vec4::new(self.x, self.w, self.z, self.z)
+ }
+ #[inline]
+ fn xwzw(self) -> Vec4 {
+ Vec4::new(self.x, self.w, self.z, self.w)
+ }
+ #[inline]
+ fn xwwx(self) -> Vec4 {
+ Vec4::new(self.x, self.w, self.w, self.x)
+ }
+ #[inline]
+ fn xwwy(self) -> Vec4 {
+ Vec4::new(self.x, self.w, self.w, self.y)
+ }
+ #[inline]
+ fn xwwz(self) -> Vec4 {
+ Vec4::new(self.x, self.w, self.w, self.z)
+ }
+ #[inline]
+ fn xwww(self) -> Vec4 {
+ Vec4::new(self.x, self.w, self.w, self.w)
+ }
+ #[inline]
+ fn yxxx(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn yxxy(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn yxxz(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.x, self.z)
+ }
+ #[inline]
+ fn yxxw(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.x, self.w)
+ }
+ #[inline]
+ fn yxyx(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn yxyy(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn yxyz(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.y, self.z)
+ }
+ #[inline]
+ fn yxyw(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.y, self.w)
+ }
+ #[inline]
+ fn yxzx(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.z, self.x)
+ }
+ #[inline]
+ fn yxzy(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.z, self.y)
+ }
+ #[inline]
+ fn yxzz(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.z, self.z)
+ }
+ #[inline]
+ fn yxzw(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.z, self.w)
+ }
+ #[inline]
+ fn yxwx(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.w, self.x)
+ }
+ #[inline]
+ fn yxwy(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.w, self.y)
+ }
+ #[inline]
+ fn yxwz(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.w, self.z)
+ }
+ #[inline]
+ fn yxww(self) -> Vec4 {
+ Vec4::new(self.y, self.x, self.w, self.w)
+ }
+ #[inline]
+ fn yyxx(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn yyxy(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn yyxz(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.x, self.z)
+ }
+ #[inline]
+ fn yyxw(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.x, self.w)
+ }
+ #[inline]
+ fn yyyx(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn yyyy(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn yyyz(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.y, self.z)
+ }
+ #[inline]
+ fn yyyw(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.y, self.w)
+ }
+ #[inline]
+ fn yyzx(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.z, self.x)
+ }
+ #[inline]
+ fn yyzy(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.z, self.y)
+ }
+ #[inline]
+ fn yyzz(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.z, self.z)
+ }
+ #[inline]
+ fn yyzw(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.z, self.w)
+ }
+ #[inline]
+ fn yywx(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.w, self.x)
+ }
+ #[inline]
+ fn yywy(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.w, self.y)
+ }
+ #[inline]
+ fn yywz(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.w, self.z)
+ }
+ #[inline]
+ fn yyww(self) -> Vec4 {
+ Vec4::new(self.y, self.y, self.w, self.w)
+ }
+ #[inline]
+ fn yzxx(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.x, self.x)
+ }
+ #[inline]
+ fn yzxy(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.x, self.y)
+ }
+ #[inline]
+ fn yzxz(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.x, self.z)
+ }
+ #[inline]
+ fn yzxw(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.x, self.w)
+ }
+ #[inline]
+ fn yzyx(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.y, self.x)
+ }
+ #[inline]
+ fn yzyy(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.y, self.y)
+ }
+ #[inline]
+ fn yzyz(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.y, self.z)
+ }
+ #[inline]
+ fn yzyw(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.y, self.w)
+ }
+ #[inline]
+ fn yzzx(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.z, self.x)
+ }
+ #[inline]
+ fn yzzy(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.z, self.y)
+ }
+ #[inline]
+ fn yzzz(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.z, self.z)
+ }
+ #[inline]
+ fn yzzw(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.z, self.w)
+ }
+ #[inline]
+ fn yzwx(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.w, self.x)
+ }
+ #[inline]
+ fn yzwy(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.w, self.y)
+ }
+ #[inline]
+ fn yzwz(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.w, self.z)
+ }
+ #[inline]
+ fn yzww(self) -> Vec4 {
+ Vec4::new(self.y, self.z, self.w, self.w)
+ }
+ #[inline]
+ fn ywxx(self) -> Vec4 {
+ Vec4::new(self.y, self.w, self.x, self.x)
+ }
+ #[inline]
+ fn ywxy(self) -> Vec4 {
+ Vec4::new(self.y, self.w, self.x, self.y)
+ }
+ #[inline]
+ fn ywxz(self) -> Vec4 {
+ Vec4::new(self.y, self.w, self.x, self.z)
+ }
+ #[inline]
+ fn ywxw(self) -> Vec4 {
+ Vec4::new(self.y, self.w, self.x, self.w)
+ }
+ #[inline]
+ fn ywyx(self) -> Vec4 {
+ Vec4::new(self.y, self.w, self.y, self.x)
+ }
+ #[inline]
+ fn ywyy(self) -> Vec4 {
+ Vec4::new(self.y, self.w, self.y, self.y)
+ }
+ #[inline]
+ fn ywyz(self) -> Vec4 {
+ Vec4::new(self.y, self.w, self.y, self.z)
+ }
+ #[inline]
+ fn ywyw(self) -> Vec4 {
+ Vec4::new(self.y, self.w, self.y, self.w)
+ }
+ #[inline]
+ fn ywzx(self) -> Vec4 {
+ Vec4::new(self.y, self.w, self.z, self.x)
+ }
+ #[inline]
+ fn ywzy(self) -> Vec4 {
+ Vec4::new(self.y, self.w, self.z, self.y)
+ }
+ #[inline]
+ fn ywzz(self) -> Vec4 {
+ Vec4::new(self.y, self.w, self.z, self.z)
+ }
+ #[inline]
+ fn ywzw(self) -> Vec4 {
+ Vec4::new(self.y, self.w, self.z, self.w)
+ }
+ #[inline]
+ fn ywwx(self) -> Vec4 {
+ Vec4::new(self.y, self.w, self.w, self.x)
+ }
+ #[inline]
+ fn ywwy(self) -> Vec4 {
+ Vec4::new(self.y, self.w, self.w, self.y)
+ }
+ #[inline]
+ fn ywwz(self) -> Vec4 {
+ Vec4::new(self.y, self.w, self.w, self.z)
+ }
+ #[inline]
+ fn ywww(self) -> Vec4 {
+ Vec4::new(self.y, self.w, self.w, self.w)
+ }
+ #[inline]
+ fn zxxx(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn zxxy(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn zxxz(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.x, self.z)
+ }
+ #[inline]
+ fn zxxw(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.x, self.w)
+ }
+ #[inline]
+ fn zxyx(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn zxyy(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn zxyz(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.y, self.z)
+ }
+ #[inline]
+ fn zxyw(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.y, self.w)
+ }
+ #[inline]
+ fn zxzx(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.z, self.x)
+ }
+ #[inline]
+ fn zxzy(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.z, self.y)
+ }
+ #[inline]
+ fn zxzz(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.z, self.z)
+ }
+ #[inline]
+ fn zxzw(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.z, self.w)
+ }
+ #[inline]
+ fn zxwx(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.w, self.x)
+ }
+ #[inline]
+ fn zxwy(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.w, self.y)
+ }
+ #[inline]
+ fn zxwz(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.w, self.z)
+ }
+ #[inline]
+ fn zxww(self) -> Vec4 {
+ Vec4::new(self.z, self.x, self.w, self.w)
+ }
+ #[inline]
+ fn zyxx(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn zyxy(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn zyxz(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.x, self.z)
+ }
+ #[inline]
+ fn zyxw(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.x, self.w)
+ }
+ #[inline]
+ fn zyyx(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn zyyy(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn zyyz(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.y, self.z)
+ }
+ #[inline]
+ fn zyyw(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.y, self.w)
+ }
+ #[inline]
+ fn zyzx(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.z, self.x)
+ }
+ #[inline]
+ fn zyzy(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.z, self.y)
+ }
+ #[inline]
+ fn zyzz(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.z, self.z)
+ }
+ #[inline]
+ fn zyzw(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.z, self.w)
+ }
+ #[inline]
+ fn zywx(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.w, self.x)
+ }
+ #[inline]
+ fn zywy(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.w, self.y)
+ }
+ #[inline]
+ fn zywz(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.w, self.z)
+ }
+ #[inline]
+ fn zyww(self) -> Vec4 {
+ Vec4::new(self.z, self.y, self.w, self.w)
+ }
+ #[inline]
+ fn zzxx(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.x, self.x)
+ }
+ #[inline]
+ fn zzxy(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.x, self.y)
+ }
+ #[inline]
+ fn zzxz(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.x, self.z)
+ }
+ #[inline]
+ fn zzxw(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.x, self.w)
+ }
+ #[inline]
+ fn zzyx(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.y, self.x)
+ }
+ #[inline]
+ fn zzyy(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.y, self.y)
+ }
+ #[inline]
+ fn zzyz(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.y, self.z)
+ }
+ #[inline]
+ fn zzyw(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.y, self.w)
+ }
+ #[inline]
+ fn zzzx(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.z, self.x)
+ }
+ #[inline]
+ fn zzzy(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.z, self.y)
+ }
+ #[inline]
+ fn zzzz(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.z, self.z)
+ }
+ #[inline]
+ fn zzzw(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.z, self.w)
+ }
+ #[inline]
+ fn zzwx(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.w, self.x)
+ }
+ #[inline]
+ fn zzwy(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.w, self.y)
+ }
+ #[inline]
+ fn zzwz(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.w, self.z)
+ }
+ #[inline]
+ fn zzww(self) -> Vec4 {
+ Vec4::new(self.z, self.z, self.w, self.w)
+ }
+ #[inline]
+ fn zwxx(self) -> Vec4 {
+ Vec4::new(self.z, self.w, self.x, self.x)
+ }
+ #[inline]
+ fn zwxy(self) -> Vec4 {
+ Vec4::new(self.z, self.w, self.x, self.y)
+ }
+ #[inline]
+ fn zwxz(self) -> Vec4 {
+ Vec4::new(self.z, self.w, self.x, self.z)
+ }
+ #[inline]
+ fn zwxw(self) -> Vec4 {
+ Vec4::new(self.z, self.w, self.x, self.w)
+ }
+ #[inline]
+ fn zwyx(self) -> Vec4 {
+ Vec4::new(self.z, self.w, self.y, self.x)
+ }
+ #[inline]
+ fn zwyy(self) -> Vec4 {
+ Vec4::new(self.z, self.w, self.y, self.y)
+ }
+ #[inline]
+ fn zwyz(self) -> Vec4 {
+ Vec4::new(self.z, self.w, self.y, self.z)
+ }
+ #[inline]
+ fn zwyw(self) -> Vec4 {
+ Vec4::new(self.z, self.w, self.y, self.w)
+ }
+ #[inline]
+ fn zwzx(self) -> Vec4 {
+ Vec4::new(self.z, self.w, self.z, self.x)
+ }
+ #[inline]
+ fn zwzy(self) -> Vec4 {
+ Vec4::new(self.z, self.w, self.z, self.y)
+ }
+ #[inline]
+ fn zwzz(self) -> Vec4 {
+ Vec4::new(self.z, self.w, self.z, self.z)
+ }
+ #[inline]
+ fn zwzw(self) -> Vec4 {
+ Vec4::new(self.z, self.w, self.z, self.w)
+ }
+ #[inline]
+ fn zwwx(self) -> Vec4 {
+ Vec4::new(self.z, self.w, self.w, self.x)
+ }
+ #[inline]
+ fn zwwy(self) -> Vec4 {
+ Vec4::new(self.z, self.w, self.w, self.y)
+ }
+ #[inline]
+ fn zwwz(self) -> Vec4 {
+ Vec4::new(self.z, self.w, self.w, self.z)
+ }
+ #[inline]
+ fn zwww(self) -> Vec4 {
+ Vec4::new(self.z, self.w, self.w, self.w)
+ }
+ #[inline]
+ fn wxxx(self) -> Vec4 {
+ Vec4::new(self.w, self.x, self.x, self.x)
+ }
+ #[inline]
+ fn wxxy(self) -> Vec4 {
+ Vec4::new(self.w, self.x, self.x, self.y)
+ }
+ #[inline]
+ fn wxxz(self) -> Vec4 {
+ Vec4::new(self.w, self.x, self.x, self.z)
+ }
+ #[inline]
+ fn wxxw(self) -> Vec4 {
+ Vec4::new(self.w, self.x, self.x, self.w)
+ }
+ #[inline]
+ fn wxyx(self) -> Vec4 {
+ Vec4::new(self.w, self.x, self.y, self.x)
+ }
+ #[inline]
+ fn wxyy(self) -> Vec4 {
+ Vec4::new(self.w, self.x, self.y, self.y)
+ }
+ #[inline]
+ fn wxyz(self) -> Vec4 {
+ Vec4::new(self.w, self.x, self.y, self.z)
+ }
+ #[inline]
+ fn wxyw(self) -> Vec4 {
+ Vec4::new(self.w, self.x, self.y, self.w)
+ }
+ #[inline]
+ fn wxzx(self) -> Vec4 {
+ Vec4::new(self.w, self.x, self.z, self.x)
+ }
+ #[inline]
+ fn wxzy(self) -> Vec4 {
+ Vec4::new(self.w, self.x, self.z, self.y)
+ }
+ #[inline]
+ fn wxzz(self) -> Vec4 {
+ Vec4::new(self.w, self.x, self.z, self.z)
+ }
+ #[inline]
+ fn wxzw(self) -> Vec4 {
+ Vec4::new(self.w, self.x, self.z, self.w)
+ }
+ #[inline]
+ fn wxwx(self) -> Vec4 {
+ Vec4::new(self.w, self.x, self.w, self.x)
+ }
+ #[inline]
+ fn wxwy(self) -> Vec4 {
+ Vec4::new(self.w, self.x, self.w, self.y)
+ }
+ #[inline]
+ fn wxwz(self) -> Vec4 {
+ Vec4::new(self.w, self.x, self.w, self.z)
+ }
+ #[inline]
+ fn wxww(self) -> Vec4 {
+ Vec4::new(self.w, self.x, self.w, self.w)
+ }
+ #[inline]
+ fn wyxx(self) -> Vec4 {
+ Vec4::new(self.w, self.y, self.x, self.x)
+ }
+ #[inline]
+ fn wyxy(self) -> Vec4 {
+ Vec4::new(self.w, self.y, self.x, self.y)
+ }
+ #[inline]
+ fn wyxz(self) -> Vec4 {
+ Vec4::new(self.w, self.y, self.x, self.z)
+ }
+ #[inline]
+ fn wyxw(self) -> Vec4 {
+ Vec4::new(self.w, self.y, self.x, self.w)
+ }
+ #[inline]
+ fn wyyx(self) -> Vec4 {
+ Vec4::new(self.w, self.y, self.y, self.x)
+ }
+ #[inline]
+ fn wyyy(self) -> Vec4 {
+ Vec4::new(self.w, self.y, self.y, self.y)
+ }
+ #[inline]
+ fn wyyz(self) -> Vec4 {
+ Vec4::new(self.w, self.y, self.y, self.z)
+ }
+ #[inline]
+ fn wyyw(self) -> Vec4 {
+ Vec4::new(self.w, self.y, self.y, self.w)
+ }
+ #[inline]
+ fn wyzx(self) -> Vec4 {
+ Vec4::new(self.w, self.y, self.z, self.x)
+ }
+ #[inline]
+ fn wyzy(self) -> Vec4 {
+ Vec4::new(self.w, self.y, self.z, self.y)
+ }
+ #[inline]
+ fn wyzz(self) -> Vec4 {
+ Vec4::new(self.w, self.y, self.z, self.z)
+ }
+ #[inline]
+ fn wyzw(self) -> Vec4 {
+ Vec4::new(self.w, self.y, self.z, self.w)
+ }
+ #[inline]
+ fn wywx(self) -> Vec4 {
+ Vec4::new(self.w, self.y, self.w, self.x)
+ }
+ #[inline]
+ fn wywy(self) -> Vec4 {
+ Vec4::new(self.w, self.y, self.w, self.y)
+ }
+ #[inline]
+ fn wywz(self) -> Vec4 {
+ Vec4::new(self.w, self.y, self.w, self.z)
+ }
+ #[inline]
+ fn wyww(self) -> Vec4 {
+ Vec4::new(self.w, self.y, self.w, self.w)
+ }
+ #[inline]
+ fn wzxx(self) -> Vec4 {
+ Vec4::new(self.w, self.z, self.x, self.x)
+ }
+ #[inline]
+ fn wzxy(self) -> Vec4 {
+ Vec4::new(self.w, self.z, self.x, self.y)
+ }
+ #[inline]
+ fn wzxz(self) -> Vec4 {
+ Vec4::new(self.w, self.z, self.x, self.z)
+ }
+ #[inline]
+ fn wzxw(self) -> Vec4 {
+ Vec4::new(self.w, self.z, self.x, self.w)
+ }
+ #[inline]
+ fn wzyx(self) -> Vec4 {
+ Vec4::new(self.w, self.z, self.y, self.x)
+ }
+ #[inline]
+ fn wzyy(self) -> Vec4 {
+ Vec4::new(self.w, self.z, self.y, self.y)
+ }
+ #[inline]
+ fn wzyz(self) -> Vec4 {
+ Vec4::new(self.w, self.z, self.y, self.z)
+ }
+ #[inline]
+ fn wzyw(self) -> Vec4 {
+ Vec4::new(self.w, self.z, self.y, self.w)
+ }
+ #[inline]
+ fn wzzx(self) -> Vec4 {
+ Vec4::new(self.w, self.z, self.z, self.x)
+ }
+ #[inline]
+ fn wzzy(self) -> Vec4 {
+ Vec4::new(self.w, self.z, self.z, self.y)
+ }
+ #[inline]
+ fn wzzz(self) -> Vec4 {
+ Vec4::new(self.w, self.z, self.z, self.z)
+ }
+ #[inline]
+ fn wzzw(self) -> Vec4 {
+ Vec4::new(self.w, self.z, self.z, self.w)
+ }
+ #[inline]
+ fn wzwx(self) -> Vec4 {
+ Vec4::new(self.w, self.z, self.w, self.x)
+ }
+ #[inline]
+ fn wzwy(self) -> Vec4 {
+ Vec4::new(self.w, self.z, self.w, self.y)
+ }
+ #[inline]
+ fn wzwz(self) -> Vec4 {
+ Vec4::new(self.w, self.z, self.w, self.z)
+ }
+ #[inline]
+ fn wzww(self) -> Vec4 {
+ Vec4::new(self.w, self.z, self.w, self.w)
+ }
+ #[inline]
+ fn wwxx(self) -> Vec4 {
+ Vec4::new(self.w, self.w, self.x, self.x)
+ }
+ #[inline]
+ fn wwxy(self) -> Vec4 {
+ Vec4::new(self.w, self.w, self.x, self.y)
+ }
+ #[inline]
+ fn wwxz(self) -> Vec4 {
+ Vec4::new(self.w, self.w, self.x, self.z)
+ }
+ #[inline]
+ fn wwxw(self) -> Vec4 {
+ Vec4::new(self.w, self.w, self.x, self.w)
+ }
+ #[inline]
+ fn wwyx(self) -> Vec4 {
+ Vec4::new(self.w, self.w, self.y, self.x)
+ }
+ #[inline]
+ fn wwyy(self) -> Vec4 {
+ Vec4::new(self.w, self.w, self.y, self.y)
+ }
+ #[inline]
+ fn wwyz(self) -> Vec4 {
+ Vec4::new(self.w, self.w, self.y, self.z)
+ }
+ #[inline]
+ fn wwyw(self) -> Vec4 {
+ Vec4::new(self.w, self.w, self.y, self.w)
+ }
+ #[inline]
+ fn wwzx(self) -> Vec4 {
+ Vec4::new(self.w, self.w, self.z, self.x)
+ }
+ #[inline]
+ fn wwzy(self) -> Vec4 {
+ Vec4::new(self.w, self.w, self.z, self.y)
+ }
+ #[inline]
+ fn wwzz(self) -> Vec4 {
+ Vec4::new(self.w, self.w, self.z, self.z)
+ }
+ #[inline]
+ fn wwzw(self) -> Vec4 {
+ Vec4::new(self.w, self.w, self.z, self.w)
+ }
+ #[inline]
+ fn wwwx(self) -> Vec4 {
+ Vec4::new(self.w, self.w, self.w, self.x)
+ }
+ #[inline]
+ fn wwwy(self) -> Vec4 {
+ Vec4::new(self.w, self.w, self.w, self.y)
+ }
+ #[inline]
+ fn wwwz(self) -> Vec4 {
+ Vec4::new(self.w, self.w, self.w, self.z)
+ }
+ #[inline]
+ fn wwww(self) -> Vec4 {
+ Vec4::new(self.w, self.w, self.w, self.w)
+ }
+ #[inline]
+ fn xxx(self) -> Vec3 {
+ Vec3::new(self.x, self.x, self.x)
+ }
+ #[inline]
+ fn xxy(self) -> Vec3 {
+ Vec3::new(self.x, self.x, self.y)
+ }
+ #[inline]
+ fn xxz(self) -> Vec3 {
+ Vec3::new(self.x, self.x, self.z)
+ }
+ #[inline]
+ fn xxw(self) -> Vec3 {
+ Vec3::new(self.x, self.x, self.w)
+ }
+ #[inline]
+ fn xyx(self) -> Vec3 {
+ Vec3::new(self.x, self.y, self.x)
+ }
+ #[inline]
+ fn xyy(self) -> Vec3 {
+ Vec3::new(self.x, self.y, self.y)
+ }
+ #[inline]
+ fn xyz(self) -> Vec3 {
+ Vec3::new(self.x, self.y, self.z)
+ }
+ #[inline]
+ fn xyw(self) -> Vec3 {
+ Vec3::new(self.x, self.y, self.w)
+ }
+ #[inline]
+ fn xzx(self) -> Vec3 {
+ Vec3::new(self.x, self.z, self.x)
+ }
+ #[inline]
+ fn xzy(self) -> Vec3 {
+ Vec3::new(self.x, self.z, self.y)
+ }
+ #[inline]
+ fn xzz(self) -> Vec3 {
+ Vec3::new(self.x, self.z, self.z)
+ }
+ #[inline]
+ fn xzw(self) -> Vec3 {
+ Vec3::new(self.x, self.z, self.w)
+ }
+ #[inline]
+ fn xwx(self) -> Vec3 {
+ Vec3::new(self.x, self.w, self.x)
+ }
+ #[inline]
+ fn xwy(self) -> Vec3 {
+ Vec3::new(self.x, self.w, self.y)
+ }
+ #[inline]
+ fn xwz(self) -> Vec3 {
+ Vec3::new(self.x, self.w, self.z)
+ }
+ #[inline]
+ fn xww(self) -> Vec3 {
+ Vec3::new(self.x, self.w, self.w)
+ }
+ #[inline]
+ fn yxx(self) -> Vec3 {
+ Vec3::new(self.y, self.x, self.x)
+ }
+ #[inline]
+ fn yxy(self) -> Vec3 {
+ Vec3::new(self.y, self.x, self.y)
+ }
+ #[inline]
+ fn yxz(self) -> Vec3 {
+ Vec3::new(self.y, self.x, self.z)
+ }
+ #[inline]
+ fn yxw(self) -> Vec3 {
+ Vec3::new(self.y, self.x, self.w)
+ }
+ #[inline]
+ fn yyx(self) -> Vec3 {
+ Vec3::new(self.y, self.y, self.x)
+ }
+ #[inline]
+ fn yyy(self) -> Vec3 {
+ Vec3::new(self.y, self.y, self.y)
+ }
+ #[inline]
+ fn yyz(self) -> Vec3 {
+ Vec3::new(self.y, self.y, self.z)
+ }
+ #[inline]
+ fn yyw(self) -> Vec3 {
+ Vec3::new(self.y, self.y, self.w)
+ }
+ #[inline]
+ fn yzx(self) -> Vec3 {
+ Vec3::new(self.y, self.z, self.x)
+ }
+ #[inline]
+ fn yzy(self) -> Vec3 {
+ Vec3::new(self.y, self.z, self.y)
+ }
+ #[inline]
+ fn yzz(self) -> Vec3 {
+ Vec3::new(self.y, self.z, self.z)
+ }
+ #[inline]
+ fn yzw(self) -> Vec3 {
+ Vec3::new(self.y, self.z, self.w)
+ }
+ #[inline]
+ fn ywx(self) -> Vec3 {
+ Vec3::new(self.y, self.w, self.x)
+ }
+ #[inline]
+ fn ywy(self) -> Vec3 {
+ Vec3::new(self.y, self.w, self.y)
+ }
+ #[inline]
+ fn ywz(self) -> Vec3 {
+ Vec3::new(self.y, self.w, self.z)
+ }
+ #[inline]
+ fn yww(self) -> Vec3 {
+ Vec3::new(self.y, self.w, self.w)
+ }
+ #[inline]
+ fn zxx(self) -> Vec3 {
+ Vec3::new(self.z, self.x, self.x)
+ }
+ #[inline]
+ fn zxy(self) -> Vec3 {
+ Vec3::new(self.z, self.x, self.y)
+ }
+ #[inline]
+ fn zxz(self) -> Vec3 {
+ Vec3::new(self.z, self.x, self.z)
+ }
+ #[inline]
+ fn zxw(self) -> Vec3 {
+ Vec3::new(self.z, self.x, self.w)
+ }
+ #[inline]
+ fn zyx(self) -> Vec3 {
+ Vec3::new(self.z, self.y, self.x)
+ }
+ #[inline]
+ fn zyy(self) -> Vec3 {
+ Vec3::new(self.z, self.y, self.y)
+ }
+ #[inline]
+ fn zyz(self) -> Vec3 {
+ Vec3::new(self.z, self.y, self.z)
+ }
+ #[inline]
+ fn zyw(self) -> Vec3 {
+ Vec3::new(self.z, self.y, self.w)
+ }
+ #[inline]
+ fn zzx(self) -> Vec3 {
+ Vec3::new(self.z, self.z, self.x)
+ }
+ #[inline]
+ fn zzy(self) -> Vec3 {
+ Vec3::new(self.z, self.z, self.y)
+ }
+ #[inline]
+ fn zzz(self) -> Vec3 {
+ Vec3::new(self.z, self.z, self.z)
+ }
+ #[inline]
+ fn zzw(self) -> Vec3 {
+ Vec3::new(self.z, self.z, self.w)
+ }
+ #[inline]
+ fn zwx(self) -> Vec3 {
+ Vec3::new(self.z, self.w, self.x)
+ }
+ #[inline]
+ fn zwy(self) -> Vec3 {
+ Vec3::new(self.z, self.w, self.y)
+ }
+ #[inline]
+ fn zwz(self) -> Vec3 {
+ Vec3::new(self.z, self.w, self.z)
+ }
+ #[inline]
+ fn zww(self) -> Vec3 {
+ Vec3::new(self.z, self.w, self.w)
+ }
+ #[inline]
+ fn wxx(self) -> Vec3 {
+ Vec3::new(self.w, self.x, self.x)
+ }
+ #[inline]
+ fn wxy(self) -> Vec3 {
+ Vec3::new(self.w, self.x, self.y)
+ }
+ #[inline]
+ fn wxz(self) -> Vec3 {
+ Vec3::new(self.w, self.x, self.z)
+ }
+ #[inline]
+ fn wxw(self) -> Vec3 {
+ Vec3::new(self.w, self.x, self.w)
+ }
+ #[inline]
+ fn wyx(self) -> Vec3 {
+ Vec3::new(self.w, self.y, self.x)
+ }
+ #[inline]
+ fn wyy(self) -> Vec3 {
+ Vec3::new(self.w, self.y, self.y)
+ }
+ #[inline]
+ fn wyz(self) -> Vec3 {
+ Vec3::new(self.w, self.y, self.z)
+ }
+ #[inline]
+ fn wyw(self) -> Vec3 {
+ Vec3::new(self.w, self.y, self.w)
+ }
+ #[inline]
+ fn wzx(self) -> Vec3 {
+ Vec3::new(self.w, self.z, self.x)
+ }
+ #[inline]
+ fn wzy(self) -> Vec3 {
+ Vec3::new(self.w, self.z, self.y)
+ }
+ #[inline]
+ fn wzz(self) -> Vec3 {
+ Vec3::new(self.w, self.z, self.z)
+ }
+ #[inline]
+ fn wzw(self) -> Vec3 {
+ Vec3::new(self.w, self.z, self.w)
+ }
+ #[inline]
+ fn wwx(self) -> Vec3 {
+ Vec3::new(self.w, self.w, self.x)
+ }
+ #[inline]
+ fn wwy(self) -> Vec3 {
+ Vec3::new(self.w, self.w, self.y)
+ }
+ #[inline]
+ fn wwz(self) -> Vec3 {
+ Vec3::new(self.w, self.w, self.z)
+ }
+ #[inline]
+ fn www(self) -> Vec3 {
+ Vec3::new(self.w, self.w, self.w)
+ }
+ #[inline]
+ fn xx(self) -> Vec2 {
+ Vec2::new(self.x, self.x)
+ }
+ #[inline]
+ fn xy(self) -> Vec2 {
+ Vec2::new(self.x, self.y)
+ }
+ #[inline]
+ fn xz(self) -> Vec2 {
+ Vec2::new(self.x, self.z)
+ }
+ #[inline]
+ fn xw(self) -> Vec2 {
+ Vec2::new(self.x, self.w)
+ }
+ #[inline]
+ fn yx(self) -> Vec2 {
+ Vec2::new(self.y, self.x)
+ }
+ #[inline]
+ fn yy(self) -> Vec2 {
+ Vec2::new(self.y, self.y)
+ }
+ #[inline]
+ fn yz(self) -> Vec2 {
+ Vec2::new(self.y, self.z)
+ }
+ #[inline]
+ fn yw(self) -> Vec2 {
+ Vec2::new(self.y, self.w)
+ }
+ #[inline]
+ fn zx(self) -> Vec2 {
+ Vec2::new(self.z, self.x)
+ }
+ #[inline]
+ fn zy(self) -> Vec2 {
+ Vec2::new(self.z, self.y)
+ }
+ #[inline]
+ fn zz(self) -> Vec2 {
+ Vec2::new(self.z, self.z)
+ }
+ #[inline]
+ fn zw(self) -> Vec2 {
+ Vec2::new(self.z, self.w)
+ }
+ #[inline]
+ fn wx(self) -> Vec2 {
+ Vec2::new(self.w, self.x)
+ }
+ #[inline]
+ fn wy(self) -> Vec2 {
+ Vec2::new(self.w, self.y)
+ }
+ #[inline]
+ fn wz(self) -> Vec2 {
+ Vec2::new(self.w, self.z)
+ }
+ #[inline]
+ fn ww(self) -> Vec2 {
+ Vec2::new(self.w, self.w)
+ }
+}
diff --git a/src/swizzles/vec4_impl_sse2.rs b/src/swizzles/vec4_impl_sse2.rs
new file mode 100644
index 0000000..ac8d942
--- /dev/null
+++ b/src/swizzles/vec4_impl_sse2.rs
@@ -0,0 +1,1355 @@
+// Generated by swizzlegen. Do not edit.
+
+use super::Vec4Swizzles;
+use crate::{Vec2, Vec3, Vec4, XY, XYZ};
+
+#[cfg(target_arch = "x86")]
+use core::arch::x86::*;
+#[cfg(target_arch = "x86_64")]
+use core::arch::x86_64::*;
+
+impl Vec4Swizzles for Vec4 {
+ type Vec2 = Vec2;
+ type Vec3 = Vec3;
+
+ #[inline]
+ fn xxxx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_00_00_00)) }
+ }
+ #[inline]
+ fn xxxy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_00_00_00)) }
+ }
+ #[inline]
+ fn xxxz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_00_00_00)) }
+ }
+ #[inline]
+ fn xxxw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_00_00_00)) }
+ }
+ #[inline]
+ fn xxyx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_01_00_00)) }
+ }
+ #[inline]
+ fn xxyy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_01_00_00)) }
+ }
+ #[inline]
+ fn xxyz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_01_00_00)) }
+ }
+ #[inline]
+ fn xxyw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_01_00_00)) }
+ }
+ #[inline]
+ fn xxzx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_10_00_00)) }
+ }
+ #[inline]
+ fn xxzy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_10_00_00)) }
+ }
+ #[inline]
+ fn xxzz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_10_00_00)) }
+ }
+ #[inline]
+ fn xxzw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_10_00_00)) }
+ }
+ #[inline]
+ fn xxwx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_11_00_00)) }
+ }
+ #[inline]
+ fn xxwy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_11_00_00)) }
+ }
+ #[inline]
+ fn xxwz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_11_00_00)) }
+ }
+ #[inline]
+ fn xxww(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_11_00_00)) }
+ }
+ #[inline]
+ fn xyxx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_00_01_00)) }
+ }
+ #[inline]
+ fn xyxy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_00_01_00)) }
+ }
+ #[inline]
+ fn xyxz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_00_01_00)) }
+ }
+ #[inline]
+ fn xyxw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_00_01_00)) }
+ }
+ #[inline]
+ fn xyyx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_01_01_00)) }
+ }
+ #[inline]
+ fn xyyy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_01_01_00)) }
+ }
+ #[inline]
+ fn xyyz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_01_01_00)) }
+ }
+ #[inline]
+ fn xyyw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_01_01_00)) }
+ }
+ #[inline]
+ fn xyzx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_10_01_00)) }
+ }
+ #[inline]
+ fn xyzy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_10_01_00)) }
+ }
+ #[inline]
+ fn xyzz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_10_01_00)) }
+ }
+ #[inline]
+ fn xywx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_11_01_00)) }
+ }
+ #[inline]
+ fn xywy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_11_01_00)) }
+ }
+ #[inline]
+ fn xywz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_11_01_00)) }
+ }
+ #[inline]
+ fn xyww(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_11_01_00)) }
+ }
+ #[inline]
+ fn xzxx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_00_10_00)) }
+ }
+ #[inline]
+ fn xzxy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_00_10_00)) }
+ }
+ #[inline]
+ fn xzxz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_00_10_00)) }
+ }
+ #[inline]
+ fn xzxw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_00_10_00)) }
+ }
+ #[inline]
+ fn xzyx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_01_10_00)) }
+ }
+ #[inline]
+ fn xzyy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_01_10_00)) }
+ }
+ #[inline]
+ fn xzyz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_01_10_00)) }
+ }
+ #[inline]
+ fn xzyw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_01_10_00)) }
+ }
+ #[inline]
+ fn xzzx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_10_10_00)) }
+ }
+ #[inline]
+ fn xzzy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_10_10_00)) }
+ }
+ #[inline]
+ fn xzzz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_10_10_00)) }
+ }
+ #[inline]
+ fn xzzw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_10_10_00)) }
+ }
+ #[inline]
+ fn xzwx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_11_10_00)) }
+ }
+ #[inline]
+ fn xzwy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_11_10_00)) }
+ }
+ #[inline]
+ fn xzwz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_11_10_00)) }
+ }
+ #[inline]
+ fn xzww(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_11_10_00)) }
+ }
+ #[inline]
+ fn xwxx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_00_11_00)) }
+ }
+ #[inline]
+ fn xwxy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_00_11_00)) }
+ }
+ #[inline]
+ fn xwxz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_00_11_00)) }
+ }
+ #[inline]
+ fn xwxw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_00_11_00)) }
+ }
+ #[inline]
+ fn xwyx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_01_11_00)) }
+ }
+ #[inline]
+ fn xwyy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_01_11_00)) }
+ }
+ #[inline]
+ fn xwyz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_01_11_00)) }
+ }
+ #[inline]
+ fn xwyw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_01_11_00)) }
+ }
+ #[inline]
+ fn xwzx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_10_11_00)) }
+ }
+ #[inline]
+ fn xwzy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_10_11_00)) }
+ }
+ #[inline]
+ fn xwzz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_10_11_00)) }
+ }
+ #[inline]
+ fn xwzw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_10_11_00)) }
+ }
+ #[inline]
+ fn xwwx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_11_11_00)) }
+ }
+ #[inline]
+ fn xwwy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_11_11_00)) }
+ }
+ #[inline]
+ fn xwwz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_11_11_00)) }
+ }
+ #[inline]
+ fn xwww(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_11_11_00)) }
+ }
+ #[inline]
+ fn yxxx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_00_00_01)) }
+ }
+ #[inline]
+ fn yxxy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_00_00_01)) }
+ }
+ #[inline]
+ fn yxxz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_00_00_01)) }
+ }
+ #[inline]
+ fn yxxw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_00_00_01)) }
+ }
+ #[inline]
+ fn yxyx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_01_00_01)) }
+ }
+ #[inline]
+ fn yxyy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_01_00_01)) }
+ }
+ #[inline]
+ fn yxyz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_01_00_01)) }
+ }
+ #[inline]
+ fn yxyw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_01_00_01)) }
+ }
+ #[inline]
+ fn yxzx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_10_00_01)) }
+ }
+ #[inline]
+ fn yxzy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_10_00_01)) }
+ }
+ #[inline]
+ fn yxzz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_10_00_01)) }
+ }
+ #[inline]
+ fn yxzw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_10_00_01)) }
+ }
+ #[inline]
+ fn yxwx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_11_00_01)) }
+ }
+ #[inline]
+ fn yxwy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_11_00_01)) }
+ }
+ #[inline]
+ fn yxwz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_11_00_01)) }
+ }
+ #[inline]
+ fn yxww(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_11_00_01)) }
+ }
+ #[inline]
+ fn yyxx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_00_01_01)) }
+ }
+ #[inline]
+ fn yyxy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_00_01_01)) }
+ }
+ #[inline]
+ fn yyxz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_00_01_01)) }
+ }
+ #[inline]
+ fn yyxw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_00_01_01)) }
+ }
+ #[inline]
+ fn yyyx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_01_01_01)) }
+ }
+ #[inline]
+ fn yyyy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_01_01_01)) }
+ }
+ #[inline]
+ fn yyyz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_01_01_01)) }
+ }
+ #[inline]
+ fn yyyw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_01_01_01)) }
+ }
+ #[inline]
+ fn yyzx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_10_01_01)) }
+ }
+ #[inline]
+ fn yyzy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_10_01_01)) }
+ }
+ #[inline]
+ fn yyzz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_10_01_01)) }
+ }
+ #[inline]
+ fn yyzw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_10_01_01)) }
+ }
+ #[inline]
+ fn yywx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_11_01_01)) }
+ }
+ #[inline]
+ fn yywy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_11_01_01)) }
+ }
+ #[inline]
+ fn yywz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_11_01_01)) }
+ }
+ #[inline]
+ fn yyww(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_11_01_01)) }
+ }
+ #[inline]
+ fn yzxx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_00_10_01)) }
+ }
+ #[inline]
+ fn yzxy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_00_10_01)) }
+ }
+ #[inline]
+ fn yzxz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_00_10_01)) }
+ }
+ #[inline]
+ fn yzxw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_00_10_01)) }
+ }
+ #[inline]
+ fn yzyx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_01_10_01)) }
+ }
+ #[inline]
+ fn yzyy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_01_10_01)) }
+ }
+ #[inline]
+ fn yzyz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_01_10_01)) }
+ }
+ #[inline]
+ fn yzyw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_01_10_01)) }
+ }
+ #[inline]
+ fn yzzx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_10_10_01)) }
+ }
+ #[inline]
+ fn yzzy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_10_10_01)) }
+ }
+ #[inline]
+ fn yzzz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_10_10_01)) }
+ }
+ #[inline]
+ fn yzzw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_10_10_01)) }
+ }
+ #[inline]
+ fn yzwx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_11_10_01)) }
+ }
+ #[inline]
+ fn yzwy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_11_10_01)) }
+ }
+ #[inline]
+ fn yzwz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_11_10_01)) }
+ }
+ #[inline]
+ fn yzww(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_11_10_01)) }
+ }
+ #[inline]
+ fn ywxx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_00_11_01)) }
+ }
+ #[inline]
+ fn ywxy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_00_11_01)) }
+ }
+ #[inline]
+ fn ywxz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_00_11_01)) }
+ }
+ #[inline]
+ fn ywxw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_00_11_01)) }
+ }
+ #[inline]
+ fn ywyx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_01_11_01)) }
+ }
+ #[inline]
+ fn ywyy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_01_11_01)) }
+ }
+ #[inline]
+ fn ywyz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_01_11_01)) }
+ }
+ #[inline]
+ fn ywyw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_01_11_01)) }
+ }
+ #[inline]
+ fn ywzx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_10_11_01)) }
+ }
+ #[inline]
+ fn ywzy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_10_11_01)) }
+ }
+ #[inline]
+ fn ywzz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_10_11_01)) }
+ }
+ #[inline]
+ fn ywzw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_10_11_01)) }
+ }
+ #[inline]
+ fn ywwx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_11_11_01)) }
+ }
+ #[inline]
+ fn ywwy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_11_11_01)) }
+ }
+ #[inline]
+ fn ywwz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_11_11_01)) }
+ }
+ #[inline]
+ fn ywww(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_11_11_01)) }
+ }
+ #[inline]
+ fn zxxx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_00_00_10)) }
+ }
+ #[inline]
+ fn zxxy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_00_00_10)) }
+ }
+ #[inline]
+ fn zxxz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_00_00_10)) }
+ }
+ #[inline]
+ fn zxxw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_00_00_10)) }
+ }
+ #[inline]
+ fn zxyx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_01_00_10)) }
+ }
+ #[inline]
+ fn zxyy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_01_00_10)) }
+ }
+ #[inline]
+ fn zxyz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_01_00_10)) }
+ }
+ #[inline]
+ fn zxyw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_01_00_10)) }
+ }
+ #[inline]
+ fn zxzx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_10_00_10)) }
+ }
+ #[inline]
+ fn zxzy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_10_00_10)) }
+ }
+ #[inline]
+ fn zxzz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_10_00_10)) }
+ }
+ #[inline]
+ fn zxzw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_10_00_10)) }
+ }
+ #[inline]
+ fn zxwx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_11_00_10)) }
+ }
+ #[inline]
+ fn zxwy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_11_00_10)) }
+ }
+ #[inline]
+ fn zxwz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_11_00_10)) }
+ }
+ #[inline]
+ fn zxww(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_11_00_10)) }
+ }
+ #[inline]
+ fn zyxx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_00_01_10)) }
+ }
+ #[inline]
+ fn zyxy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_00_01_10)) }
+ }
+ #[inline]
+ fn zyxz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_00_01_10)) }
+ }
+ #[inline]
+ fn zyxw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_00_01_10)) }
+ }
+ #[inline]
+ fn zyyx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_01_01_10)) }
+ }
+ #[inline]
+ fn zyyy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_01_01_10)) }
+ }
+ #[inline]
+ fn zyyz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_01_01_10)) }
+ }
+ #[inline]
+ fn zyyw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_01_01_10)) }
+ }
+ #[inline]
+ fn zyzx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_10_01_10)) }
+ }
+ #[inline]
+ fn zyzy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_10_01_10)) }
+ }
+ #[inline]
+ fn zyzz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_10_01_10)) }
+ }
+ #[inline]
+ fn zyzw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_10_01_10)) }
+ }
+ #[inline]
+ fn zywx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_11_01_10)) }
+ }
+ #[inline]
+ fn zywy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_11_01_10)) }
+ }
+ #[inline]
+ fn zywz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_11_01_10)) }
+ }
+ #[inline]
+ fn zyww(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_11_01_10)) }
+ }
+ #[inline]
+ fn zzxx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_00_10_10)) }
+ }
+ #[inline]
+ fn zzxy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_00_10_10)) }
+ }
+ #[inline]
+ fn zzxz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_00_10_10)) }
+ }
+ #[inline]
+ fn zzxw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_00_10_10)) }
+ }
+ #[inline]
+ fn zzyx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_01_10_10)) }
+ }
+ #[inline]
+ fn zzyy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_01_10_10)) }
+ }
+ #[inline]
+ fn zzyz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_01_10_10)) }
+ }
+ #[inline]
+ fn zzyw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_01_10_10)) }
+ }
+ #[inline]
+ fn zzzx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_10_10_10)) }
+ }
+ #[inline]
+ fn zzzy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_10_10_10)) }
+ }
+ #[inline]
+ fn zzzz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_10_10_10)) }
+ }
+ #[inline]
+ fn zzzw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_10_10_10)) }
+ }
+ #[inline]
+ fn zzwx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_11_10_10)) }
+ }
+ #[inline]
+ fn zzwy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_11_10_10)) }
+ }
+ #[inline]
+ fn zzwz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_11_10_10)) }
+ }
+ #[inline]
+ fn zzww(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_11_10_10)) }
+ }
+ #[inline]
+ fn zwxx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_00_11_10)) }
+ }
+ #[inline]
+ fn zwxy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_00_11_10)) }
+ }
+ #[inline]
+ fn zwxz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_00_11_10)) }
+ }
+ #[inline]
+ fn zwxw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_00_11_10)) }
+ }
+ #[inline]
+ fn zwyx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_01_11_10)) }
+ }
+ #[inline]
+ fn zwyy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_01_11_10)) }
+ }
+ #[inline]
+ fn zwyz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_01_11_10)) }
+ }
+ #[inline]
+ fn zwyw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_01_11_10)) }
+ }
+ #[inline]
+ fn zwzx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_10_11_10)) }
+ }
+ #[inline]
+ fn zwzy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_10_11_10)) }
+ }
+ #[inline]
+ fn zwzz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_10_11_10)) }
+ }
+ #[inline]
+ fn zwzw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_10_11_10)) }
+ }
+ #[inline]
+ fn zwwx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_11_11_10)) }
+ }
+ #[inline]
+ fn zwwy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_11_11_10)) }
+ }
+ #[inline]
+ fn zwwz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_11_11_10)) }
+ }
+ #[inline]
+ fn zwww(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_11_11_10)) }
+ }
+ #[inline]
+ fn wxxx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_00_00_11)) }
+ }
+ #[inline]
+ fn wxxy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_00_00_11)) }
+ }
+ #[inline]
+ fn wxxz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_00_00_11)) }
+ }
+ #[inline]
+ fn wxxw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_00_00_11)) }
+ }
+ #[inline]
+ fn wxyx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_01_00_11)) }
+ }
+ #[inline]
+ fn wxyy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_01_00_11)) }
+ }
+ #[inline]
+ fn wxyz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_01_00_11)) }
+ }
+ #[inline]
+ fn wxyw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_01_00_11)) }
+ }
+ #[inline]
+ fn wxzx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_10_00_11)) }
+ }
+ #[inline]
+ fn wxzy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_10_00_11)) }
+ }
+ #[inline]
+ fn wxzz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_10_00_11)) }
+ }
+ #[inline]
+ fn wxzw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_10_00_11)) }
+ }
+ #[inline]
+ fn wxwx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_11_00_11)) }
+ }
+ #[inline]
+ fn wxwy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_11_00_11)) }
+ }
+ #[inline]
+ fn wxwz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_11_00_11)) }
+ }
+ #[inline]
+ fn wxww(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_11_00_11)) }
+ }
+ #[inline]
+ fn wyxx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_00_01_11)) }
+ }
+ #[inline]
+ fn wyxy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_00_01_11)) }
+ }
+ #[inline]
+ fn wyxz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_00_01_11)) }
+ }
+ #[inline]
+ fn wyxw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_00_01_11)) }
+ }
+ #[inline]
+ fn wyyx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_01_01_11)) }
+ }
+ #[inline]
+ fn wyyy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_01_01_11)) }
+ }
+ #[inline]
+ fn wyyz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_01_01_11)) }
+ }
+ #[inline]
+ fn wyyw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_01_01_11)) }
+ }
+ #[inline]
+ fn wyzx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_10_01_11)) }
+ }
+ #[inline]
+ fn wyzy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_10_01_11)) }
+ }
+ #[inline]
+ fn wyzz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_10_01_11)) }
+ }
+ #[inline]
+ fn wyzw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_10_01_11)) }
+ }
+ #[inline]
+ fn wywx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_11_01_11)) }
+ }
+ #[inline]
+ fn wywy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_11_01_11)) }
+ }
+ #[inline]
+ fn wywz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_11_01_11)) }
+ }
+ #[inline]
+ fn wyww(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_11_01_11)) }
+ }
+ #[inline]
+ fn wzxx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_00_10_11)) }
+ }
+ #[inline]
+ fn wzxy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_00_10_11)) }
+ }
+ #[inline]
+ fn wzxz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_00_10_11)) }
+ }
+ #[inline]
+ fn wzxw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_00_10_11)) }
+ }
+ #[inline]
+ fn wzyx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_01_10_11)) }
+ }
+ #[inline]
+ fn wzyy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_01_10_11)) }
+ }
+ #[inline]
+ fn wzyz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_01_10_11)) }
+ }
+ #[inline]
+ fn wzyw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_01_10_11)) }
+ }
+ #[inline]
+ fn wzzx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_10_10_11)) }
+ }
+ #[inline]
+ fn wzzy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_10_10_11)) }
+ }
+ #[inline]
+ fn wzzz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_10_10_11)) }
+ }
+ #[inline]
+ fn wzzw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_10_10_11)) }
+ }
+ #[inline]
+ fn wzwx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_11_10_11)) }
+ }
+ #[inline]
+ fn wzwy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_11_10_11)) }
+ }
+ #[inline]
+ fn wzwz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_11_10_11)) }
+ }
+ #[inline]
+ fn wzww(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_11_10_11)) }
+ }
+ #[inline]
+ fn wwxx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_00_11_11)) }
+ }
+ #[inline]
+ fn wwxy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_00_11_11)) }
+ }
+ #[inline]
+ fn wwxz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_00_11_11)) }
+ }
+ #[inline]
+ fn wwxw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_00_11_11)) }
+ }
+ #[inline]
+ fn wwyx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_01_11_11)) }
+ }
+ #[inline]
+ fn wwyy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_01_11_11)) }
+ }
+ #[inline]
+ fn wwyz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_01_11_11)) }
+ }
+ #[inline]
+ fn wwyw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_01_11_11)) }
+ }
+ #[inline]
+ fn wwzx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_10_11_11)) }
+ }
+ #[inline]
+ fn wwzy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_10_11_11)) }
+ }
+ #[inline]
+ fn wwzz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_10_11_11)) }
+ }
+ #[inline]
+ fn wwzw(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_10_11_11)) }
+ }
+ #[inline]
+ fn wwwx(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b00_11_11_11)) }
+ }
+ #[inline]
+ fn wwwy(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b01_11_11_11)) }
+ }
+ #[inline]
+ fn wwwz(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b10_11_11_11)) }
+ }
+ #[inline]
+ fn wwww(self) -> Vec4 {
+ unsafe { Vec4(_mm_shuffle_ps(self.0, self.0, 0b11_11_11_11)) }
+ }
+ #[inline]
+ fn xxx(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_00_00))) }
+ }
+ #[inline]
+ fn xxy(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_01_00_00))) }
+ }
+ #[inline]
+ fn xxz(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_10_00_00))) }
+ }
+ #[inline]
+ fn xxw(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_11_00_00))) }
+ }
+ #[inline]
+ fn xyx(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_01_00))) }
+ }
+ #[inline]
+ fn xyy(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_01_01_00))) }
+ }
+ #[inline]
+ fn xyz(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_10_01_00))) }
+ }
+ #[inline]
+ fn xyw(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_11_01_00))) }
+ }
+ #[inline]
+ fn xzx(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_10_00))) }
+ }
+ #[inline]
+ fn xzy(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_01_10_00))) }
+ }
+ #[inline]
+ fn xzz(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_10_10_00))) }
+ }
+ #[inline]
+ fn xzw(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_11_10_00))) }
+ }
+ #[inline]
+ fn xwx(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_11_00))) }
+ }
+ #[inline]
+ fn xwy(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_01_11_00))) }
+ }
+ #[inline]
+ fn xwz(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_10_11_00))) }
+ }
+ #[inline]
+ fn xww(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_11_11_00))) }
+ }
+ #[inline]
+ fn yxx(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_00_01))) }
+ }
+ #[inline]
+ fn yxy(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_01_00_01))) }
+ }
+ #[inline]
+ fn yxz(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_10_00_01))) }
+ }
+ #[inline]
+ fn yxw(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_11_00_01))) }
+ }
+ #[inline]
+ fn yyx(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_01_01))) }
+ }
+ #[inline]
+ fn yyy(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_01_01_01))) }
+ }
+ #[inline]
+ fn yyz(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_10_01_01))) }
+ }
+ #[inline]
+ fn yyw(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_11_01_01))) }
+ }
+ #[inline]
+ fn yzx(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_10_01))) }
+ }
+ #[inline]
+ fn yzy(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_01_10_01))) }
+ }
+ #[inline]
+ fn yzz(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_10_10_01))) }
+ }
+ #[inline]
+ fn yzw(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_11_10_01))) }
+ }
+ #[inline]
+ fn ywx(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_11_01))) }
+ }
+ #[inline]
+ fn ywy(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_01_11_01))) }
+ }
+ #[inline]
+ fn ywz(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_10_11_01))) }
+ }
+ #[inline]
+ fn yww(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_11_11_01))) }
+ }
+ #[inline]
+ fn zxx(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_00_10))) }
+ }
+ #[inline]
+ fn zxy(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_01_00_10))) }
+ }
+ #[inline]
+ fn zxz(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_10_00_10))) }
+ }
+ #[inline]
+ fn zxw(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_11_00_10))) }
+ }
+ #[inline]
+ fn zyx(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_01_10))) }
+ }
+ #[inline]
+ fn zyy(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_01_01_10))) }
+ }
+ #[inline]
+ fn zyz(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_10_01_10))) }
+ }
+ #[inline]
+ fn zyw(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_11_01_10))) }
+ }
+ #[inline]
+ fn zzx(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_10_10))) }
+ }
+ #[inline]
+ fn zzy(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_01_10_10))) }
+ }
+ #[inline]
+ fn zzz(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_10_10_10))) }
+ }
+ #[inline]
+ fn zzw(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_11_10_10))) }
+ }
+ #[inline]
+ fn zwx(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_11_10))) }
+ }
+ #[inline]
+ fn zwy(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_01_11_10))) }
+ }
+ #[inline]
+ fn zwz(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_10_11_10))) }
+ }
+ #[inline]
+ fn zww(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_11_11_10))) }
+ }
+ #[inline]
+ fn wxx(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_00_11))) }
+ }
+ #[inline]
+ fn wxy(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_01_00_11))) }
+ }
+ #[inline]
+ fn wxz(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_10_00_11))) }
+ }
+ #[inline]
+ fn wxw(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_11_00_11))) }
+ }
+ #[inline]
+ fn wyx(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_01_11))) }
+ }
+ #[inline]
+ fn wyy(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_01_01_11))) }
+ }
+ #[inline]
+ fn wyz(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_10_01_11))) }
+ }
+ #[inline]
+ fn wyw(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_11_01_11))) }
+ }
+ #[inline]
+ fn wzx(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_10_11))) }
+ }
+ #[inline]
+ fn wzy(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_01_10_11))) }
+ }
+ #[inline]
+ fn wzz(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_10_10_11))) }
+ }
+ #[inline]
+ fn wzw(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_11_10_11))) }
+ }
+ #[inline]
+ fn wwx(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_11_11))) }
+ }
+ #[inline]
+ fn wwy(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_01_11_11))) }
+ }
+ #[inline]
+ fn wwz(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_10_11_11))) }
+ }
+ #[inline]
+ fn www(self) -> Vec3 {
+ unsafe { Vec3(XYZ::from(_mm_shuffle_ps(self.0, self.0, 0b00_11_11_11))) }
+ }
+ #[inline]
+ fn xx(self) -> Vec2 {
+ unsafe { Vec2(XY::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_00_00))) }
+ }
+ #[inline]
+ fn xy(self) -> Vec2 {
+ unsafe { Vec2(XY::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_01_00))) }
+ }
+ #[inline]
+ fn xz(self) -> Vec2 {
+ unsafe { Vec2(XY::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_10_00))) }
+ }
+ #[inline]
+ fn xw(self) -> Vec2 {
+ unsafe { Vec2(XY::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_11_00))) }
+ }
+ #[inline]
+ fn yx(self) -> Vec2 {
+ unsafe { Vec2(XY::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_00_01))) }
+ }
+ #[inline]
+ fn yy(self) -> Vec2 {
+ unsafe { Vec2(XY::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_01_01))) }
+ }
+ #[inline]
+ fn yz(self) -> Vec2 {
+ unsafe { Vec2(XY::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_10_01))) }
+ }
+ #[inline]
+ fn yw(self) -> Vec2 {
+ unsafe { Vec2(XY::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_11_01))) }
+ }
+ #[inline]
+ fn zx(self) -> Vec2 {
+ unsafe { Vec2(XY::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_00_10))) }
+ }
+ #[inline]
+ fn zy(self) -> Vec2 {
+ unsafe { Vec2(XY::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_01_10))) }
+ }
+ #[inline]
+ fn zz(self) -> Vec2 {
+ unsafe { Vec2(XY::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_10_10))) }
+ }
+ #[inline]
+ fn zw(self) -> Vec2 {
+ unsafe { Vec2(XY::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_11_10))) }
+ }
+ #[inline]
+ fn wx(self) -> Vec2 {
+ unsafe { Vec2(XY::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_00_11))) }
+ }
+ #[inline]
+ fn wy(self) -> Vec2 {
+ unsafe { Vec2(XY::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_01_11))) }
+ }
+ #[inline]
+ fn wz(self) -> Vec2 {
+ unsafe { Vec2(XY::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_10_11))) }
+ }
+ #[inline]
+ fn ww(self) -> Vec2 {
+ unsafe { Vec2(XY::from(_mm_shuffle_ps(self.0, self.0, 0b00_00_11_11))) }
+ }
+}
diff --git a/src/swizzles/vec4_impl_wasm32.rs b/src/swizzles/vec4_impl_wasm32.rs
new file mode 100644
index 0000000..46c7a22
--- /dev/null
+++ b/src/swizzles/vec4_impl_wasm32.rs
@@ -0,0 +1,1352 @@
+// Generated by swizzlegen. Do not edit.
+
+use super::Vec4Swizzles;
+use crate::{Vec2, Vec3, Vec4, XY, XYZ};
+
+use core::arch::wasm32::*;
+
+impl Vec4Swizzles for Vec4 {
+ type Vec2 = Vec2;
+ type Vec3 = Vec3;
+
+ #[inline]
+ fn xxxx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 0, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn xxxy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 0, 4, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn xxxz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 0, 4, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn xxxw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 0, 4, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn xxyx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 0, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn xxyy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 0, 5, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn xxyz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 0, 5, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn xxyw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 0, 5, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn xxzx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 0, 6, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn xxzy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 0, 6, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn xxzz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 0, 6, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn xxzw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 0, 6, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn xxwx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 0, 7, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn xxwy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 0, 7, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn xxwz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 0, 7, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn xxww(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 0, 7, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn xyxx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 1, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn xyxy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 1, 4, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn xyxz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 1, 4, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn xyxw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 1, 4, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn xyyx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 1, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn xyyy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 1, 5, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn xyyz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 1, 5, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn xyyw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 1, 5, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn xyzx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 1, 6, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn xyzy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 1, 6, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn xyzz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 1, 6, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn xywx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 1, 7, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn xywy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 1, 7, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn xywz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 1, 7, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn xyww(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 1, 7, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn xzxx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 2, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn xzxy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 2, 4, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn xzxz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 2, 4, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn xzxw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 2, 4, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn xzyx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 2, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn xzyy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 2, 5, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn xzyz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 2, 5, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn xzyw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 2, 5, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn xzzx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 2, 6, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn xzzy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 2, 6, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn xzzz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 2, 6, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn xzzw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 2, 6, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn xzwx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 2, 7, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn xzwy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 2, 7, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn xzwz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 2, 7, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn xzww(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 2, 7, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn xwxx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 3, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn xwxy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 3, 4, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn xwxz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 3, 4, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn xwxw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 3, 4, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn xwyx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 3, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn xwyy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 3, 5, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn xwyz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 3, 5, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn xwyw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 3, 5, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn xwzx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 3, 6, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn xwzy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 3, 6, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn xwzz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 3, 6, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn xwzw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 3, 6, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn xwwx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 3, 7, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn xwwy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 3, 7, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn xwwz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 3, 7, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn xwww(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<0, 3, 7, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn yxxx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 0, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn yxxy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 0, 4, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn yxxz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 0, 4, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn yxxw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 0, 4, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn yxyx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 0, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn yxyy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 0, 5, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn yxyz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 0, 5, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn yxyw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 0, 5, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn yxzx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 0, 6, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn yxzy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 0, 6, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn yxzz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 0, 6, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn yxzw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 0, 6, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn yxwx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 0, 7, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn yxwy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 0, 7, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn yxwz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 0, 7, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn yxww(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 0, 7, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn yyxx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 1, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn yyxy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 1, 4, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn yyxz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 1, 4, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn yyxw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 1, 4, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn yyyx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 1, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn yyyy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 1, 5, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn yyyz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 1, 5, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn yyyw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 1, 5, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn yyzx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 1, 6, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn yyzy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 1, 6, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn yyzz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 1, 6, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn yyzw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 1, 6, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn yywx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 1, 7, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn yywy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 1, 7, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn yywz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 1, 7, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn yyww(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 1, 7, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn yzxx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 2, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn yzxy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 2, 4, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn yzxz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 2, 4, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn yzxw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 2, 4, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn yzyx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 2, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn yzyy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 2, 5, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn yzyz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 2, 5, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn yzyw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 2, 5, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn yzzx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 2, 6, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn yzzy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 2, 6, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn yzzz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 2, 6, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn yzzw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 2, 6, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn yzwx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 2, 7, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn yzwy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 2, 7, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn yzwz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 2, 7, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn yzww(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 2, 7, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn ywxx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 3, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn ywxy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 3, 4, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn ywxz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 3, 4, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn ywxw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 3, 4, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn ywyx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 3, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn ywyy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 3, 5, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn ywyz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 3, 5, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn ywyw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 3, 5, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn ywzx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 3, 6, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn ywzy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 3, 6, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn ywzz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 3, 6, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn ywzw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 3, 6, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn ywwx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 3, 7, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn ywwy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 3, 7, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn ywwz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 3, 7, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn ywww(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<1, 3, 7, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn zxxx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 0, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zxxy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 0, 4, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn zxxz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 0, 4, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn zxxw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 0, 4, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn zxyx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 0, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zxyy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 0, 5, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn zxyz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 0, 5, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn zxyw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 0, 5, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn zxzx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 0, 6, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zxzy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 0, 6, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn zxzz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 0, 6, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn zxzw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 0, 6, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn zxwx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 0, 7, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zxwy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 0, 7, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn zxwz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 0, 7, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn zxww(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 0, 7, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn zyxx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 1, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zyxy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 1, 4, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn zyxz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 1, 4, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn zyxw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 1, 4, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn zyyx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 1, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zyyy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 1, 5, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn zyyz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 1, 5, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn zyyw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 1, 5, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn zyzx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 1, 6, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zyzy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 1, 6, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn zyzz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 1, 6, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn zyzw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 1, 6, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn zywx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 1, 7, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zywy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 1, 7, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn zywz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 1, 7, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn zyww(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 1, 7, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn zzxx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 2, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zzxy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 2, 4, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn zzxz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 2, 4, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn zzxw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 2, 4, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn zzyx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 2, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zzyy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 2, 5, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn zzyz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 2, 5, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn zzyw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 2, 5, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn zzzx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 2, 6, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zzzy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 2, 6, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn zzzz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 2, 6, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn zzzw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 2, 6, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn zzwx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 2, 7, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zzwy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 2, 7, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn zzwz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 2, 7, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn zzww(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 2, 7, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn zwxx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 3, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zwxy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 3, 4, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn zwxz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 3, 4, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn zwxw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 3, 4, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn zwyx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 3, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zwyy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 3, 5, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn zwyz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 3, 5, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn zwyw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 3, 5, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn zwzx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 3, 6, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zwzy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 3, 6, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn zwzz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 3, 6, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn zwzw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 3, 6, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn zwwx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 3, 7, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn zwwy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 3, 7, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn zwwz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 3, 7, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn zwww(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<2, 3, 7, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn wxxx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 0, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn wxxy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 0, 4, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn wxxz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 0, 4, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn wxxw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 0, 4, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn wxyx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 0, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn wxyy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 0, 5, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn wxyz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 0, 5, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn wxyw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 0, 5, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn wxzx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 0, 6, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn wxzy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 0, 6, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn wxzz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 0, 6, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn wxzw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 0, 6, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn wxwx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 0, 7, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn wxwy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 0, 7, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn wxwz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 0, 7, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn wxww(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 0, 7, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn wyxx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 1, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn wyxy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 1, 4, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn wyxz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 1, 4, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn wyxw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 1, 4, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn wyyx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 1, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn wyyy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 1, 5, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn wyyz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 1, 5, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn wyyw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 1, 5, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn wyzx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 1, 6, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn wyzy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 1, 6, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn wyzz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 1, 6, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn wyzw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 1, 6, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn wywx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 1, 7, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn wywy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 1, 7, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn wywz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 1, 7, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn wyww(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 1, 7, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn wzxx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 2, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn wzxy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 2, 4, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn wzxz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 2, 4, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn wzxw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 2, 4, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn wzyx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 2, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn wzyy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 2, 5, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn wzyz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 2, 5, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn wzyw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 2, 5, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn wzzx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 2, 6, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn wzzy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 2, 6, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn wzzz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 2, 6, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn wzzw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 2, 6, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn wzwx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 2, 7, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn wzwy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 2, 7, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn wzwz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 2, 7, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn wzww(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 2, 7, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn wwxx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 3, 4, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn wwxy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 3, 4, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn wwxz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 3, 4, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn wwxw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 3, 4, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn wwyx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 3, 5, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn wwyy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 3, 5, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn wwyz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 3, 5, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn wwyw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 3, 5, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn wwzx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 3, 6, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn wwzy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 3, 6, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn wwzz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 3, 6, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn wwzw(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 3, 6, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn wwwx(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 3, 7, 4>(self.0, self.0))
+ }
+ #[inline]
+ fn wwwy(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 3, 7, 5>(self.0, self.0))
+ }
+ #[inline]
+ fn wwwz(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 3, 7, 6>(self.0, self.0))
+ }
+ #[inline]
+ fn wwww(self) -> Vec4 {
+ Vec4(i32x4_shuffle::<3, 3, 7, 7>(self.0, self.0))
+ }
+ #[inline]
+ fn xxx(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<0, 0, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn xxy(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<0, 0, 5, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn xxz(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<0, 0, 6, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn xxw(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<0, 0, 7, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn xyx(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<0, 1, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn xyy(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<0, 1, 5, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn xyz(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<0, 1, 6, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn xyw(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<0, 1, 7, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn xzx(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<0, 2, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn xzy(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<0, 2, 5, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn xzz(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<0, 2, 6, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn xzw(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<0, 2, 7, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn xwx(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<0, 3, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn xwy(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<0, 3, 5, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn xwz(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<0, 3, 6, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn xww(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<0, 3, 7, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn yxx(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<1, 0, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn yxy(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<1, 0, 5, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn yxz(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<1, 0, 6, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn yxw(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<1, 0, 7, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn yyx(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<1, 1, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn yyy(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<1, 1, 5, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn yyz(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<1, 1, 6, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn yyw(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<1, 1, 7, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn yzx(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<1, 2, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn yzy(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<1, 2, 5, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn yzz(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<1, 2, 6, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn yzw(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<1, 2, 7, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn ywx(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<1, 3, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn ywy(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<1, 3, 5, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn ywz(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<1, 3, 6, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn yww(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<1, 3, 7, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn zxx(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<2, 0, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn zxy(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<2, 0, 5, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn zxz(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<2, 0, 6, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn zxw(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<2, 0, 7, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn zyx(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<2, 1, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn zyy(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<2, 1, 5, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn zyz(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<2, 1, 6, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn zyw(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<2, 1, 7, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn zzx(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<2, 2, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn zzy(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<2, 2, 5, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn zzz(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<2, 2, 6, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn zzw(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<2, 2, 7, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn zwx(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<2, 3, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn zwy(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<2, 3, 5, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn zwz(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<2, 3, 6, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn zww(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<2, 3, 7, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn wxx(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<3, 0, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn wxy(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<3, 0, 5, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn wxz(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<3, 0, 6, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn wxw(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<3, 0, 7, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn wyx(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<3, 1, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn wyy(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<3, 1, 5, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn wyz(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<3, 1, 6, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn wyw(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<3, 1, 7, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn wzx(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<3, 2, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn wzy(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<3, 2, 5, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn wzz(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<3, 2, 6, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn wzw(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<3, 2, 7, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn wwx(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<3, 3, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn wwy(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<3, 3, 5, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn wwz(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<3, 3, 6, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn www(self) -> Vec3 {
+ Vec3(XYZ::from(i32x4_shuffle::<3, 3, 7, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn xx(self) -> Vec2 {
+ Vec2(XY::from(i32x4_shuffle::<0, 0, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn xy(self) -> Vec2 {
+ Vec2(XY::from(i32x4_shuffle::<0, 1, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn xz(self) -> Vec2 {
+ Vec2(XY::from(i32x4_shuffle::<0, 2, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn xw(self) -> Vec2 {
+ Vec2(XY::from(i32x4_shuffle::<0, 3, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn yx(self) -> Vec2 {
+ Vec2(XY::from(i32x4_shuffle::<1, 0, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn yy(self) -> Vec2 {
+ Vec2(XY::from(i32x4_shuffle::<1, 1, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn yz(self) -> Vec2 {
+ Vec2(XY::from(i32x4_shuffle::<1, 2, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn yw(self) -> Vec2 {
+ Vec2(XY::from(i32x4_shuffle::<1, 3, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn zx(self) -> Vec2 {
+ Vec2(XY::from(i32x4_shuffle::<2, 0, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn zy(self) -> Vec2 {
+ Vec2(XY::from(i32x4_shuffle::<2, 1, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn zz(self) -> Vec2 {
+ Vec2(XY::from(i32x4_shuffle::<2, 2, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn zw(self) -> Vec2 {
+ Vec2(XY::from(i32x4_shuffle::<2, 3, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn wx(self) -> Vec2 {
+ Vec2(XY::from(i32x4_shuffle::<3, 0, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn wy(self) -> Vec2 {
+ Vec2(XY::from(i32x4_shuffle::<3, 1, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn wz(self) -> Vec2 {
+ Vec2(XY::from(i32x4_shuffle::<3, 2, 4, 4>(self.0, self.0)))
+ }
+ #[inline]
+ fn ww(self) -> Vec2 {
+ Vec2(XY::from(i32x4_shuffle::<3, 3, 4, 4>(self.0, self.0)))
+ }
+}
diff --git a/src/swizzles/vec_traits.rs b/src/swizzles/vec_traits.rs
new file mode 100644
index 0000000..fc9bb4a
--- /dev/null
+++ b/src/swizzles/vec_traits.rs
@@ -0,0 +1,512 @@
+// Generated by swizzlegen. Do not edit.
+/** Swizzle methods for 2-dimensional vector types. */
+pub trait Vec2Swizzles: Sized + Copy + Clone {
+ type Vec3;
+ type Vec4;
+
+ #[inline]
+ fn xy(self) -> Self {
+ self
+ }
+
+ fn xxxx(self) -> Self::Vec4;
+ fn xxxy(self) -> Self::Vec4;
+ fn xxyx(self) -> Self::Vec4;
+ fn xxyy(self) -> Self::Vec4;
+ fn xyxx(self) -> Self::Vec4;
+ fn xyxy(self) -> Self::Vec4;
+ fn xyyx(self) -> Self::Vec4;
+ fn xyyy(self) -> Self::Vec4;
+ fn yxxx(self) -> Self::Vec4;
+ fn yxxy(self) -> Self::Vec4;
+ fn yxyx(self) -> Self::Vec4;
+ fn yxyy(self) -> Self::Vec4;
+ fn yyxx(self) -> Self::Vec4;
+ fn yyxy(self) -> Self::Vec4;
+ fn yyyx(self) -> Self::Vec4;
+ fn yyyy(self) -> Self::Vec4;
+ fn xxx(self) -> Self::Vec3;
+ fn xxy(self) -> Self::Vec3;
+ fn xyx(self) -> Self::Vec3;
+ fn xyy(self) -> Self::Vec3;
+ fn yxx(self) -> Self::Vec3;
+ fn yxy(self) -> Self::Vec3;
+ fn yyx(self) -> Self::Vec3;
+ fn yyy(self) -> Self::Vec3;
+ fn xx(self) -> Self;
+ fn yx(self) -> Self;
+ fn yy(self) -> Self;
+}
+/** Swizzle methods for 3-dimensional vector types. */
+pub trait Vec3Swizzles: Sized + Copy + Clone {
+ type Vec2;
+ type Vec4;
+
+ #[inline]
+ fn xyz(self) -> Self {
+ self
+ }
+
+ fn xxxx(self) -> Self::Vec4;
+ fn xxxy(self) -> Self::Vec4;
+ fn xxxz(self) -> Self::Vec4;
+ fn xxyx(self) -> Self::Vec4;
+ fn xxyy(self) -> Self::Vec4;
+ fn xxyz(self) -> Self::Vec4;
+ fn xxzx(self) -> Self::Vec4;
+ fn xxzy(self) -> Self::Vec4;
+ fn xxzz(self) -> Self::Vec4;
+ fn xyxx(self) -> Self::Vec4;
+ fn xyxy(self) -> Self::Vec4;
+ fn xyxz(self) -> Self::Vec4;
+ fn xyyx(self) -> Self::Vec4;
+ fn xyyy(self) -> Self::Vec4;
+ fn xyyz(self) -> Self::Vec4;
+ fn xyzx(self) -> Self::Vec4;
+ fn xyzy(self) -> Self::Vec4;
+ fn xyzz(self) -> Self::Vec4;
+ fn xzxx(self) -> Self::Vec4;
+ fn xzxy(self) -> Self::Vec4;
+ fn xzxz(self) -> Self::Vec4;
+ fn xzyx(self) -> Self::Vec4;
+ fn xzyy(self) -> Self::Vec4;
+ fn xzyz(self) -> Self::Vec4;
+ fn xzzx(self) -> Self::Vec4;
+ fn xzzy(self) -> Self::Vec4;
+ fn xzzz(self) -> Self::Vec4;
+ fn yxxx(self) -> Self::Vec4;
+ fn yxxy(self) -> Self::Vec4;
+ fn yxxz(self) -> Self::Vec4;
+ fn yxyx(self) -> Self::Vec4;
+ fn yxyy(self) -> Self::Vec4;
+ fn yxyz(self) -> Self::Vec4;
+ fn yxzx(self) -> Self::Vec4;
+ fn yxzy(self) -> Self::Vec4;
+ fn yxzz(self) -> Self::Vec4;
+ fn yyxx(self) -> Self::Vec4;
+ fn yyxy(self) -> Self::Vec4;
+ fn yyxz(self) -> Self::Vec4;
+ fn yyyx(self) -> Self::Vec4;
+ fn yyyy(self) -> Self::Vec4;
+ fn yyyz(self) -> Self::Vec4;
+ fn yyzx(self) -> Self::Vec4;
+ fn yyzy(self) -> Self::Vec4;
+ fn yyzz(self) -> Self::Vec4;
+ fn yzxx(self) -> Self::Vec4;
+ fn yzxy(self) -> Self::Vec4;
+ fn yzxz(self) -> Self::Vec4;
+ fn yzyx(self) -> Self::Vec4;
+ fn yzyy(self) -> Self::Vec4;
+ fn yzyz(self) -> Self::Vec4;
+ fn yzzx(self) -> Self::Vec4;
+ fn yzzy(self) -> Self::Vec4;
+ fn yzzz(self) -> Self::Vec4;
+ fn zxxx(self) -> Self::Vec4;
+ fn zxxy(self) -> Self::Vec4;
+ fn zxxz(self) -> Self::Vec4;
+ fn zxyx(self) -> Self::Vec4;
+ fn zxyy(self) -> Self::Vec4;
+ fn zxyz(self) -> Self::Vec4;
+ fn zxzx(self) -> Self::Vec4;
+ fn zxzy(self) -> Self::Vec4;
+ fn zxzz(self) -> Self::Vec4;
+ fn zyxx(self) -> Self::Vec4;
+ fn zyxy(self) -> Self::Vec4;
+ fn zyxz(self) -> Self::Vec4;
+ fn zyyx(self) -> Self::Vec4;
+ fn zyyy(self) -> Self::Vec4;
+ fn zyyz(self) -> Self::Vec4;
+ fn zyzx(self) -> Self::Vec4;
+ fn zyzy(self) -> Self::Vec4;
+ fn zyzz(self) -> Self::Vec4;
+ fn zzxx(self) -> Self::Vec4;
+ fn zzxy(self) -> Self::Vec4;
+ fn zzxz(self) -> Self::Vec4;
+ fn zzyx(self) -> Self::Vec4;
+ fn zzyy(self) -> Self::Vec4;
+ fn zzyz(self) -> Self::Vec4;
+ fn zzzx(self) -> Self::Vec4;
+ fn zzzy(self) -> Self::Vec4;
+ fn zzzz(self) -> Self::Vec4;
+ fn xxx(self) -> Self;
+ fn xxy(self) -> Self;
+ fn xxz(self) -> Self;
+ fn xyx(self) -> Self;
+ fn xyy(self) -> Self;
+ fn xzx(self) -> Self;
+ fn xzy(self) -> Self;
+ fn xzz(self) -> Self;
+ fn yxx(self) -> Self;
+ fn yxy(self) -> Self;
+ fn yxz(self) -> Self;
+ fn yyx(self) -> Self;
+ fn yyy(self) -> Self;
+ fn yyz(self) -> Self;
+ fn yzx(self) -> Self;
+ fn yzy(self) -> Self;
+ fn yzz(self) -> Self;
+ fn zxx(self) -> Self;
+ fn zxy(self) -> Self;
+ fn zxz(self) -> Self;
+ fn zyx(self) -> Self;
+ fn zyy(self) -> Self;
+ fn zyz(self) -> Self;
+ fn zzx(self) -> Self;
+ fn zzy(self) -> Self;
+ fn zzz(self) -> Self;
+ fn xx(self) -> Self::Vec2;
+ fn xy(self) -> Self::Vec2;
+ fn xz(self) -> Self::Vec2;
+ fn yx(self) -> Self::Vec2;
+ fn yy(self) -> Self::Vec2;
+ fn yz(self) -> Self::Vec2;
+ fn zx(self) -> Self::Vec2;
+ fn zy(self) -> Self::Vec2;
+ fn zz(self) -> Self::Vec2;
+}
+/** Swizzle methods for 4-dimensional vector types. */
+pub trait Vec4Swizzles: Sized + Copy + Clone {
+ type Vec2;
+ type Vec3;
+
+ #[inline]
+ fn xyzw(self) -> Self {
+ self
+ }
+
+ fn xxxx(self) -> Self;
+ fn xxxy(self) -> Self;
+ fn xxxz(self) -> Self;
+ fn xxxw(self) -> Self;
+ fn xxyx(self) -> Self;
+ fn xxyy(self) -> Self;
+ fn xxyz(self) -> Self;
+ fn xxyw(self) -> Self;
+ fn xxzx(self) -> Self;
+ fn xxzy(self) -> Self;
+ fn xxzz(self) -> Self;
+ fn xxzw(self) -> Self;
+ fn xxwx(self) -> Self;
+ fn xxwy(self) -> Self;
+ fn xxwz(self) -> Self;
+ fn xxww(self) -> Self;
+ fn xyxx(self) -> Self;
+ fn xyxy(self) -> Self;
+ fn xyxz(self) -> Self;
+ fn xyxw(self) -> Self;
+ fn xyyx(self) -> Self;
+ fn xyyy(self) -> Self;
+ fn xyyz(self) -> Self;
+ fn xyyw(self) -> Self;
+ fn xyzx(self) -> Self;
+ fn xyzy(self) -> Self;
+ fn xyzz(self) -> Self;
+ fn xywx(self) -> Self;
+ fn xywy(self) -> Self;
+ fn xywz(self) -> Self;
+ fn xyww(self) -> Self;
+ fn xzxx(self) -> Self;
+ fn xzxy(self) -> Self;
+ fn xzxz(self) -> Self;
+ fn xzxw(self) -> Self;
+ fn xzyx(self) -> Self;
+ fn xzyy(self) -> Self;
+ fn xzyz(self) -> Self;
+ fn xzyw(self) -> Self;
+ fn xzzx(self) -> Self;
+ fn xzzy(self) -> Self;
+ fn xzzz(self) -> Self;
+ fn xzzw(self) -> Self;
+ fn xzwx(self) -> Self;
+ fn xzwy(self) -> Self;
+ fn xzwz(self) -> Self;
+ fn xzww(self) -> Self;
+ fn xwxx(self) -> Self;
+ fn xwxy(self) -> Self;
+ fn xwxz(self) -> Self;
+ fn xwxw(self) -> Self;
+ fn xwyx(self) -> Self;
+ fn xwyy(self) -> Self;
+ fn xwyz(self) -> Self;
+ fn xwyw(self) -> Self;
+ fn xwzx(self) -> Self;
+ fn xwzy(self) -> Self;
+ fn xwzz(self) -> Self;
+ fn xwzw(self) -> Self;
+ fn xwwx(self) -> Self;
+ fn xwwy(self) -> Self;
+ fn xwwz(self) -> Self;
+ fn xwww(self) -> Self;
+ fn yxxx(self) -> Self;
+ fn yxxy(self) -> Self;
+ fn yxxz(self) -> Self;
+ fn yxxw(self) -> Self;
+ fn yxyx(self) -> Self;
+ fn yxyy(self) -> Self;
+ fn yxyz(self) -> Self;
+ fn yxyw(self) -> Self;
+ fn yxzx(self) -> Self;
+ fn yxzy(self) -> Self;
+ fn yxzz(self) -> Self;
+ fn yxzw(self) -> Self;
+ fn yxwx(self) -> Self;
+ fn yxwy(self) -> Self;
+ fn yxwz(self) -> Self;
+ fn yxww(self) -> Self;
+ fn yyxx(self) -> Self;
+ fn yyxy(self) -> Self;
+ fn yyxz(self) -> Self;
+ fn yyxw(self) -> Self;
+ fn yyyx(self) -> Self;
+ fn yyyy(self) -> Self;
+ fn yyyz(self) -> Self;
+ fn yyyw(self) -> Self;
+ fn yyzx(self) -> Self;
+ fn yyzy(self) -> Self;
+ fn yyzz(self) -> Self;
+ fn yyzw(self) -> Self;
+ fn yywx(self) -> Self;
+ fn yywy(self) -> Self;
+ fn yywz(self) -> Self;
+ fn yyww(self) -> Self;
+ fn yzxx(self) -> Self;
+ fn yzxy(self) -> Self;
+ fn yzxz(self) -> Self;
+ fn yzxw(self) -> Self;
+ fn yzyx(self) -> Self;
+ fn yzyy(self) -> Self;
+ fn yzyz(self) -> Self;
+ fn yzyw(self) -> Self;
+ fn yzzx(self) -> Self;
+ fn yzzy(self) -> Self;
+ fn yzzz(self) -> Self;
+ fn yzzw(self) -> Self;
+ fn yzwx(self) -> Self;
+ fn yzwy(self) -> Self;
+ fn yzwz(self) -> Self;
+ fn yzww(self) -> Self;
+ fn ywxx(self) -> Self;
+ fn ywxy(self) -> Self;
+ fn ywxz(self) -> Self;
+ fn ywxw(self) -> Self;
+ fn ywyx(self) -> Self;
+ fn ywyy(self) -> Self;
+ fn ywyz(self) -> Self;
+ fn ywyw(self) -> Self;
+ fn ywzx(self) -> Self;
+ fn ywzy(self) -> Self;
+ fn ywzz(self) -> Self;
+ fn ywzw(self) -> Self;
+ fn ywwx(self) -> Self;
+ fn ywwy(self) -> Self;
+ fn ywwz(self) -> Self;
+ fn ywww(self) -> Self;
+ fn zxxx(self) -> Self;
+ fn zxxy(self) -> Self;
+ fn zxxz(self) -> Self;
+ fn zxxw(self) -> Self;
+ fn zxyx(self) -> Self;
+ fn zxyy(self) -> Self;
+ fn zxyz(self) -> Self;
+ fn zxyw(self) -> Self;
+ fn zxzx(self) -> Self;
+ fn zxzy(self) -> Self;
+ fn zxzz(self) -> Self;
+ fn zxzw(self) -> Self;
+ fn zxwx(self) -> Self;
+ fn zxwy(self) -> Self;
+ fn zxwz(self) -> Self;
+ fn zxww(self) -> Self;
+ fn zyxx(self) -> Self;
+ fn zyxy(self) -> Self;
+ fn zyxz(self) -> Self;
+ fn zyxw(self) -> Self;
+ fn zyyx(self) -> Self;
+ fn zyyy(self) -> Self;
+ fn zyyz(self) -> Self;
+ fn zyyw(self) -> Self;
+ fn zyzx(self) -> Self;
+ fn zyzy(self) -> Self;
+ fn zyzz(self) -> Self;
+ fn zyzw(self) -> Self;
+ fn zywx(self) -> Self;
+ fn zywy(self) -> Self;
+ fn zywz(self) -> Self;
+ fn zyww(self) -> Self;
+ fn zzxx(self) -> Self;
+ fn zzxy(self) -> Self;
+ fn zzxz(self) -> Self;
+ fn zzxw(self) -> Self;
+ fn zzyx(self) -> Self;
+ fn zzyy(self) -> Self;
+ fn zzyz(self) -> Self;
+ fn zzyw(self) -> Self;
+ fn zzzx(self) -> Self;
+ fn zzzy(self) -> Self;
+ fn zzzz(self) -> Self;
+ fn zzzw(self) -> Self;
+ fn zzwx(self) -> Self;
+ fn zzwy(self) -> Self;
+ fn zzwz(self) -> Self;
+ fn zzww(self) -> Self;
+ fn zwxx(self) -> Self;
+ fn zwxy(self) -> Self;
+ fn zwxz(self) -> Self;
+ fn zwxw(self) -> Self;
+ fn zwyx(self) -> Self;
+ fn zwyy(self) -> Self;
+ fn zwyz(self) -> Self;
+ fn zwyw(self) -> Self;
+ fn zwzx(self) -> Self;
+ fn zwzy(self) -> Self;
+ fn zwzz(self) -> Self;
+ fn zwzw(self) -> Self;
+ fn zwwx(self) -> Self;
+ fn zwwy(self) -> Self;
+ fn zwwz(self) -> Self;
+ fn zwww(self) -> Self;
+ fn wxxx(self) -> Self;
+ fn wxxy(self) -> Self;
+ fn wxxz(self) -> Self;
+ fn wxxw(self) -> Self;
+ fn wxyx(self) -> Self;
+ fn wxyy(self) -> Self;
+ fn wxyz(self) -> Self;
+ fn wxyw(self) -> Self;
+ fn wxzx(self) -> Self;
+ fn wxzy(self) -> Self;
+ fn wxzz(self) -> Self;
+ fn wxzw(self) -> Self;
+ fn wxwx(self) -> Self;
+ fn wxwy(self) -> Self;
+ fn wxwz(self) -> Self;
+ fn wxww(self) -> Self;
+ fn wyxx(self) -> Self;
+ fn wyxy(self) -> Self;
+ fn wyxz(self) -> Self;
+ fn wyxw(self) -> Self;
+ fn wyyx(self) -> Self;
+ fn wyyy(self) -> Self;
+ fn wyyz(self) -> Self;
+ fn wyyw(self) -> Self;
+ fn wyzx(self) -> Self;
+ fn wyzy(self) -> Self;
+ fn wyzz(self) -> Self;
+ fn wyzw(self) -> Self;
+ fn wywx(self) -> Self;
+ fn wywy(self) -> Self;
+ fn wywz(self) -> Self;
+ fn wyww(self) -> Self;
+ fn wzxx(self) -> Self;
+ fn wzxy(self) -> Self;
+ fn wzxz(self) -> Self;
+ fn wzxw(self) -> Self;
+ fn wzyx(self) -> Self;
+ fn wzyy(self) -> Self;
+ fn wzyz(self) -> Self;
+ fn wzyw(self) -> Self;
+ fn wzzx(self) -> Self;
+ fn wzzy(self) -> Self;
+ fn wzzz(self) -> Self;
+ fn wzzw(self) -> Self;
+ fn wzwx(self) -> Self;
+ fn wzwy(self) -> Self;
+ fn wzwz(self) -> Self;
+ fn wzww(self) -> Self;
+ fn wwxx(self) -> Self;
+ fn wwxy(self) -> Self;
+ fn wwxz(self) -> Self;
+ fn wwxw(self) -> Self;
+ fn wwyx(self) -> Self;
+ fn wwyy(self) -> Self;
+ fn wwyz(self) -> Self;
+ fn wwyw(self) -> Self;
+ fn wwzx(self) -> Self;
+ fn wwzy(self) -> Self;
+ fn wwzz(self) -> Self;
+ fn wwzw(self) -> Self;
+ fn wwwx(self) -> Self;
+ fn wwwy(self) -> Self;
+ fn wwwz(self) -> Self;
+ fn wwww(self) -> Self;
+ fn xxx(self) -> Self::Vec3;
+ fn xxy(self) -> Self::Vec3;
+ fn xxz(self) -> Self::Vec3;
+ fn xxw(self) -> Self::Vec3;
+ fn xyx(self) -> Self::Vec3;
+ fn xyy(self) -> Self::Vec3;
+ fn xyz(self) -> Self::Vec3;
+ fn xyw(self) -> Self::Vec3;
+ fn xzx(self) -> Self::Vec3;
+ fn xzy(self) -> Self::Vec3;
+ fn xzz(self) -> Self::Vec3;
+ fn xzw(self) -> Self::Vec3;
+ fn xwx(self) -> Self::Vec3;
+ fn xwy(self) -> Self::Vec3;
+ fn xwz(self) -> Self::Vec3;
+ fn xww(self) -> Self::Vec3;
+ fn yxx(self) -> Self::Vec3;
+ fn yxy(self) -> Self::Vec3;
+ fn yxz(self) -> Self::Vec3;
+ fn yxw(self) -> Self::Vec3;
+ fn yyx(self) -> Self::Vec3;
+ fn yyy(self) -> Self::Vec3;
+ fn yyz(self) -> Self::Vec3;
+ fn yyw(self) -> Self::Vec3;
+ fn yzx(self) -> Self::Vec3;
+ fn yzy(self) -> Self::Vec3;
+ fn yzz(self) -> Self::Vec3;
+ fn yzw(self) -> Self::Vec3;
+ fn ywx(self) -> Self::Vec3;
+ fn ywy(self) -> Self::Vec3;
+ fn ywz(self) -> Self::Vec3;
+ fn yww(self) -> Self::Vec3;
+ fn zxx(self) -> Self::Vec3;
+ fn zxy(self) -> Self::Vec3;
+ fn zxz(self) -> Self::Vec3;
+ fn zxw(self) -> Self::Vec3;
+ fn zyx(self) -> Self::Vec3;
+ fn zyy(self) -> Self::Vec3;
+ fn zyz(self) -> Self::Vec3;
+ fn zyw(self) -> Self::Vec3;
+ fn zzx(self) -> Self::Vec3;
+ fn zzy(self) -> Self::Vec3;
+ fn zzz(self) -> Self::Vec3;
+ fn zzw(self) -> Self::Vec3;
+ fn zwx(self) -> Self::Vec3;
+ fn zwy(self) -> Self::Vec3;
+ fn zwz(self) -> Self::Vec3;
+ fn zww(self) -> Self::Vec3;
+ fn wxx(self) -> Self::Vec3;
+ fn wxy(self) -> Self::Vec3;
+ fn wxz(self) -> Self::Vec3;
+ fn wxw(self) -> Self::Vec3;
+ fn wyx(self) -> Self::Vec3;
+ fn wyy(self) -> Self::Vec3;
+ fn wyz(self) -> Self::Vec3;
+ fn wyw(self) -> Self::Vec3;
+ fn wzx(self) -> Self::Vec3;
+ fn wzy(self) -> Self::Vec3;
+ fn wzz(self) -> Self::Vec3;
+ fn wzw(self) -> Self::Vec3;
+ fn wwx(self) -> Self::Vec3;
+ fn wwy(self) -> Self::Vec3;
+ fn wwz(self) -> Self::Vec3;
+ fn www(self) -> Self::Vec3;
+ fn xx(self) -> Self::Vec2;
+ fn xy(self) -> Self::Vec2;
+ fn xz(self) -> Self::Vec2;
+ fn xw(self) -> Self::Vec2;
+ fn yx(self) -> Self::Vec2;
+ fn yy(self) -> Self::Vec2;
+ fn yz(self) -> Self::Vec2;
+ fn yw(self) -> Self::Vec2;
+ fn zx(self) -> Self::Vec2;
+ fn zy(self) -> Self::Vec2;
+ fn zz(self) -> Self::Vec2;
+ fn zw(self) -> Self::Vec2;
+ fn wx(self) -> Self::Vec2;
+ fn wy(self) -> Self::Vec2;
+ fn wz(self) -> Self::Vec2;
+ fn ww(self) -> Self::Vec2;
+}
diff --git a/src/transform.rs b/src/transform.rs
new file mode 100644
index 0000000..d7b1b66
--- /dev/null
+++ b/src/transform.rs
@@ -0,0 +1,432 @@
+#![allow(deprecated)]
+
+use crate::{Affine3A, Mat4, Quat, Vec3, Vec3A, Vec3Swizzles};
+use core::ops::Mul;
+
+#[cfg(feature = "rand")]
+use rand::{
+ distributions::{Distribution, Standard},
+ Rng,
+};
+
+/**
+ * A transform containing non-uniform scale, rotation and translation.
+ *
+ * Scale and translation are stored as `Vec3A` for better performance.
+ */
+#[derive(Clone, Copy, PartialEq, Debug)]
+#[repr(C)]
+#[deprecated(
+ since = "0.15.0",
+ note = "Moving to a separate crate, see https://github.com/bitshifter/glam-rs/issues/175"
+)]
+pub struct TransformSRT {
+ pub rotation: Quat,
+ pub translation: Vec3,
+ pub scale: Vec3,
+}
+
+impl Default for TransformSRT {
+ #[inline]
+ fn default() -> Self {
+ Self::IDENTITY
+ }
+}
+
+/**
+ * A transform containing rotation and translation.
+ *
+ * Translation is stored as a `Vec3A` for better performance.
+ */
+#[derive(Clone, Copy, PartialEq, Debug)]
+#[repr(C)]
+#[deprecated(
+ since = "0.15.0",
+ note = "Moving to a separate crate, see https://github.com/bitshifter/glam-rs/issues/175"
+)]
+pub struct TransformRT {
+ pub rotation: Quat,
+ pub translation: Vec3,
+}
+
+impl Default for TransformRT {
+ #[inline]
+ fn default() -> Self {
+ Self::IDENTITY
+ }
+}
+
+impl TransformSRT {
+ /// The identity transforms that does nothing.
+ pub const IDENTITY: Self = Self {
+ scale: Vec3::ONE,
+ rotation: Quat::IDENTITY,
+ translation: Vec3::ZERO,
+ };
+
+ /// All NaN:s.
+ pub const NAN: Self = Self {
+ scale: Vec3::NAN,
+ rotation: Quat::NAN,
+ translation: Vec3::NAN,
+ };
+
+ #[inline]
+ pub fn from_scale_rotation_translation(scale: Vec3, rotation: Quat, translation: Vec3) -> Self {
+ Self {
+ rotation,
+ translation,
+ scale,
+ }
+ }
+
+ /// Returns `true` if, and only if, all elements are finite.
+ /// If any element is either `NaN`, positive or negative infinity, this will return `false`.
+ #[inline]
+ pub fn is_finite(&self) -> bool {
+ self.rotation.is_finite() && self.translation.is_finite()
+ }
+
+ /// Returns `true` if, and only if, any element is `NaN`.
+ #[inline]
+ pub fn is_nan(&self) -> bool {
+ self.rotation.is_nan() || self.translation.is_nan()
+ }
+
+ #[inline]
+ pub fn inverse(&self) -> Self {
+ let scale = self.scale.recip();
+ let rotation = self.rotation.conjugate();
+ let translation = -(rotation * (self.translation * scale));
+ Self {
+ rotation,
+ translation,
+ scale,
+ }
+ }
+
+ #[inline]
+ pub fn normalize(&self) -> Self {
+ let rotation = self.rotation.normalize();
+ Self {
+ scale: self.scale,
+ rotation,
+ translation: self.translation,
+ }
+ }
+
+ #[inline]
+ pub fn mul_transform(&self, other: &Self) -> Self {
+ mul_srt_srt(self, other)
+ }
+
+ #[deprecated(
+ since = "0.15.0",
+ note = "Please use `transform_point3(other)` instead"
+ )]
+ #[inline]
+ pub fn transform_vec3(&self, other: Vec3) -> Vec3 {
+ self.transform_point3(other)
+ }
+
+ #[inline]
+ pub fn transform_point3(&self, other: Vec3) -> Vec3 {
+ self.transform_point3a(other.into()).into()
+ }
+
+ #[inline]
+ pub fn transform_vector3(&self, other: Vec3) -> Vec3 {
+ self.transform_vector3a(other.into()).into()
+ }
+
+ #[inline]
+ pub fn transform_point3a(&self, other: Vec3A) -> Vec3A {
+ (self.rotation * (other * Vec3A::from(self.scale))) + Vec3A::from(self.translation)
+ }
+
+ #[inline]
+ pub fn transform_vector3a(&self, other: Vec3A) -> Vec3A {
+ self.rotation * (other * Vec3A::from(self.scale))
+ }
+
+ /// Returns true if the absolute difference of all elements between `self`
+ /// and `other` is less than or equal to `max_abs_diff`.
+ ///
+ /// This can be used to compare if two `Mat4`'s contain similar elements. It
+ /// works best when comparing with a known value. The `max_abs_diff` that
+ /// should be used used depends on the values being compared against.
+ ///
+ /// For more on floating point comparisons see
+ /// https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
+ #[inline]
+ pub fn abs_diff_eq(&self, other: Self, max_abs_diff: f32) -> bool {
+ self.scale.abs_diff_eq(other.scale, max_abs_diff)
+ && self.rotation.abs_diff_eq(other.rotation, max_abs_diff)
+ && self
+ .translation
+ .abs_diff_eq(other.translation, max_abs_diff)
+ }
+}
+
+#[inline]
+fn mul_srt_srt(lhs: &TransformSRT, rhs: &TransformSRT) -> TransformSRT {
+ // Based on https://github.com/nfrechette/rtm `rtm::qvv_mul`
+ let lhs_scale = Vec3A::from(lhs.scale);
+ let rhs_scale = Vec3A::from(rhs.scale);
+ let min_scale = lhs_scale.min(rhs_scale);
+ let scale = lhs_scale * rhs_scale;
+
+ if min_scale.cmplt(Vec3A::ZERO).any() {
+ // If negative scale, we go through a matrix
+ let lhs_mtx =
+ Affine3A::from_scale_rotation_translation(lhs.scale, lhs.rotation, lhs.translation);
+ let rhs_mtx =
+ Affine3A::from_scale_rotation_translation(rhs.scale, rhs.rotation, rhs.translation);
+ let mut result_mtx = lhs_mtx * rhs_mtx;
+
+ let sign = scale.signum();
+ result_mtx.x_axis = result_mtx.x_axis.normalize() * sign.xxx();
+ result_mtx.y_axis = result_mtx.y_axis.normalize() * sign.yyy();
+ result_mtx.z_axis = result_mtx.z_axis.normalize() * sign.zzz();
+
+ let scale = Vec3::from(scale);
+ let rotation = Quat::from_affine3(&result_mtx);
+ let translation = Vec3::from(result_mtx.translation);
+ TransformSRT {
+ rotation,
+ translation,
+ scale,
+ }
+ } else {
+ let rotation = lhs.rotation * rhs.rotation;
+ let translation = (rhs.rotation * (lhs.translation * rhs.scale)) + rhs.translation;
+ TransformSRT {
+ rotation,
+ translation,
+ scale: scale.into(),
+ }
+ }
+}
+
+#[inline]
+fn mul_rt_rt(lhs: &TransformRT, rhs: &TransformRT) -> TransformRT {
+ let rotation = lhs.rotation * rhs.rotation;
+ let translation = (rhs.rotation * lhs.translation) + rhs.translation;
+ TransformRT {
+ rotation,
+ translation,
+ }
+}
+
+impl TransformRT {
+ /// The identity transforms that does nothing.
+ pub const IDENTITY: Self = Self {
+ rotation: Quat::IDENTITY,
+ translation: Vec3::ZERO,
+ };
+
+ /// All NaN:s.
+ pub const NAN: Self = Self {
+ rotation: Quat::NAN,
+ translation: Vec3::NAN,
+ };
+
+ #[inline]
+ pub fn from_rotation_translation(rotation: Quat, translation: Vec3) -> Self {
+ Self {
+ rotation,
+ translation,
+ }
+ }
+
+ /// Returns `true` if, and only if, all elements are finite.
+ /// If any element is either `NaN`, positive or negative infinity, this will return `false`.
+ #[inline]
+ pub fn is_finite(&self) -> bool {
+ self.rotation.is_finite() && self.translation.is_finite()
+ }
+
+ /// Returns `true` if, and only if, any element is `NaN`.
+ #[inline]
+ pub fn is_nan(&self) -> bool {
+ self.rotation.is_nan() || self.translation.is_nan()
+ }
+
+ #[inline]
+ pub fn inverse(&self) -> Self {
+ let rotation = self.rotation.conjugate();
+ let translation = -(rotation * self.translation);
+ Self {
+ rotation,
+ translation,
+ }
+ }
+
+ #[inline]
+ pub fn normalize(&self) -> Self {
+ let rotation = self.rotation.normalize();
+ Self {
+ rotation,
+ translation: self.translation,
+ }
+ }
+
+ #[inline]
+ pub fn mul_transform(&self, other: &Self) -> Self {
+ mul_rt_rt(self, other)
+ }
+
+ #[deprecated(
+ since = "0.15.0",
+ note = "Please use `transform_point3(other)` instead"
+ )]
+ #[inline]
+ pub fn transform_vec3(self, other: Vec3) -> Vec3 {
+ self.transform_point3(other)
+ }
+
+ #[inline]
+ pub fn transform_point3(&self, other: Vec3) -> Vec3 {
+ self.transform_point3a(other.into()).into()
+ }
+
+ #[inline]
+ pub fn transform_vector3(&self, other: Vec3) -> Vec3 {
+ self.transform_vector3a(other.into()).into()
+ }
+
+ #[inline]
+ pub fn transform_point3a(&self, other: Vec3A) -> Vec3A {
+ (self.rotation * other) + Vec3A::from(self.translation)
+ }
+
+ #[inline]
+ pub fn transform_vector3a(&self, other: Vec3A) -> Vec3A {
+ self.rotation * other
+ }
+
+ /// Returns true if the absolute difference of all elements between `self`
+ /// and `other` is less than or equal to `max_abs_diff`.
+ ///
+ /// This can be used to compare if two `Mat4`'s contain similar elements. It
+ /// works best when comparing with a known value. The `max_abs_diff` that
+ /// should be used used depends on the values being compared against.
+ ///
+ /// For more on floating point comparisons see
+ /// https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
+ #[inline]
+ pub fn abs_diff_eq(&self, other: Self, max_abs_diff: f32) -> bool {
+ self.rotation.abs_diff_eq(other.rotation, max_abs_diff)
+ && self
+ .translation
+ .abs_diff_eq(other.translation, max_abs_diff)
+ }
+}
+
+impl Mul<TransformRT> for TransformRT {
+ type Output = TransformRT;
+ #[inline]
+ fn mul(self, other: TransformRT) -> TransformRT {
+ mul_rt_rt(&self, &other)
+ }
+}
+
+impl Mul<TransformSRT> for TransformSRT {
+ type Output = Self;
+ #[inline]
+ fn mul(self, other: Self) -> Self::Output {
+ mul_srt_srt(&self, &other)
+ }
+}
+
+impl Mul<TransformRT> for TransformSRT {
+ type Output = TransformSRT;
+ #[inline]
+ fn mul(self, other: TransformRT) -> Self::Output {
+ mul_srt_srt(&self, &other.into())
+ }
+}
+
+impl Mul<TransformSRT> for TransformRT {
+ type Output = TransformSRT;
+ #[inline]
+ fn mul(self, other: TransformSRT) -> Self::Output {
+ mul_srt_srt(&self.into(), &other)
+ }
+}
+
+impl From<TransformRT> for TransformSRT {
+ #[inline]
+ fn from(tr: TransformRT) -> Self {
+ Self {
+ translation: tr.translation,
+ rotation: tr.rotation,
+ scale: Vec3::ONE,
+ }
+ }
+}
+
+#[cfg(feature = "rand")]
+impl Distribution<TransformRT> for Standard {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> TransformRT {
+ TransformRT::from_rotation_translation(
+ rng.gen::<Quat>(),
+ Vec3::new(
+ rng.gen_range(core::f32::MIN..=core::f32::MAX),
+ rng.gen_range(core::f32::MIN..=core::f32::MAX),
+ rng.gen_range(core::f32::MIN..=core::f32::MAX),
+ ),
+ )
+ }
+}
+
+#[cfg(feature = "rand")]
+impl Distribution<TransformSRT> for Standard {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> TransformSRT {
+ let mut gen_non_zero = || loop {
+ let f: f32 = rng.gen_range(core::f32::MIN..=core::f32::MAX);
+ if f.abs() > core::f32::MIN_POSITIVE {
+ return f;
+ }
+ };
+ TransformSRT::from_scale_rotation_translation(
+ Vec3::new(gen_non_zero(), gen_non_zero(), gen_non_zero()),
+ rng.gen::<Quat>(),
+ Vec3::new(
+ rng.gen_range(core::f32::MIN..=core::f32::MAX),
+ rng.gen_range(core::f32::MIN..=core::f32::MAX),
+ rng.gen_range(core::f32::MIN..=core::f32::MAX),
+ ),
+ )
+ }
+}
+
+impl From<TransformSRT> for Mat4 {
+ #[inline]
+ fn from(srt: TransformSRT) -> Self {
+ Self::from_scale_rotation_translation(srt.scale, srt.rotation, srt.translation)
+ }
+}
+
+impl From<TransformRT> for Mat4 {
+ #[inline]
+ fn from(rt: TransformRT) -> Self {
+ Self::from_rotation_translation(rt.rotation, rt.translation)
+ }
+}
+
+impl From<TransformSRT> for Affine3A {
+ #[inline]
+ fn from(srt: TransformSRT) -> Self {
+ Self::from_scale_rotation_translation(srt.scale, srt.rotation, srt.translation)
+ }
+}
+
+impl From<TransformRT> for Affine3A {
+ #[inline]
+ fn from(rt: TransformRT) -> Self {
+ Self::from_rotation_translation(rt.rotation, rt.translation)
+ }
+}
diff --git a/src/vec.rs b/src/vec.rs
new file mode 100644
index 0000000..3c5a226
--- /dev/null
+++ b/src/vec.rs
@@ -0,0 +1,1029 @@
+// Adds common vector methods to an impl.
+
+// The methods here should be supported for all types of $t and all sizes of vector.
+macro_rules! impl_vecn_common_methods {
+ ($t:ty, $vecn:ident, $mask:ident, $inner:ident, $vectrait:ident) => {
+ /// Creates a vector with all elements set to `v`.
+ #[inline(always)]
+ pub fn splat(v: $t) -> Self {
+ Self($inner::splat(v))
+ }
+
+ /// Creates a vector from the elements in `if_true` and `if_false`, selecting which to use
+ /// for each element of `self`.
+ ///
+ /// A true element in the mask uses the corresponding element from `if_true`, and false
+ /// uses the element from `if_false`.
+ #[inline(always)]
+ pub fn select(mask: $mask, if_true: $vecn, if_false: $vecn) -> $vecn {
+ Self($inner::select(mask.0, if_true.0, if_false.0))
+ }
+
+ /// Computes the dot product of `self` and `other`.
+ #[inline(always)]
+ pub fn dot(self, other: Self) -> $t {
+ $vectrait::dot(self.0, other.0)
+ }
+
+ /// Returns a vector containing the minimum values for each element of `self` and `other`.
+ ///
+ /// In other words this computes `[self.x.max(other.x), self.y.max(other.y), ..]`.
+ #[inline(always)]
+ pub fn min(self, other: Self) -> Self {
+ Self(self.0.min(other.0))
+ }
+
+ /// Returns a vector containing the maximum values for each element of `self` and `other`.
+ ///
+ /// In other words this computes `[self.x.max(other.x), self.y.max(other.y), ..]`.
+ #[inline(always)]
+ pub fn max(self, other: Self) -> Self {
+ Self(self.0.max(other.0))
+ }
+
+ /// Component-wise clamping of values, similar to [`f32::clamp`].
+ ///
+ /// Each element in `min` must be less-or-equal to the corresponding element in `max`.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `min` is greater than `max` when `glam_assert` is enabled.
+ #[inline(always)]
+ pub fn clamp(self, min: Self, max: Self) -> Self {
+ Self($vectrait::clamp(self.0, min.0, max.0))
+ }
+
+ /// Returns the horizontal minimum of `self`.
+ ///
+ /// In other words this computes `min(x, y, ..)`.
+ #[inline(always)]
+ pub fn min_element(self) -> $t {
+ $vectrait::min_element(self.0)
+ }
+
+ /// Returns the horizontal maximum of `self`.
+ ///
+ /// In other words this computes `max(x, y, ..)`.
+ #[inline(always)]
+ pub fn max_element(self) -> $t {
+ $vectrait::max_element(self.0)
+ }
+
+ /// Returns a vector mask containing the result of a `==` comparison for each element of
+ /// `self` and `other`.
+ ///
+ /// In other words, this computes `[self.x == other.x, self.y == other.y, ..]` for all
+ /// elements.
+ #[inline(always)]
+ pub fn cmpeq(self, other: Self) -> $mask {
+ $mask(self.0.cmpeq(other.0))
+ }
+
+ /// Returns a vector mask containing the result of a `!=` comparison for each element of
+ /// `self` and `other`.
+ ///
+ /// In other words this computes `[self.x != other.x, self.y != other.y, ..]` for all
+ /// elements.
+ #[inline(always)]
+ pub fn cmpne(self, other: Self) -> $mask {
+ $mask(self.0.cmpne(other.0))
+ }
+
+ /// Returns a vector mask containing the result of a `>=` comparison for each element of
+ /// `self` and `other`.
+ ///
+ /// In other words this computes `[self.x >= other.x, self.y >= other.y, ..]` for all
+ /// elements.
+ #[inline(always)]
+ pub fn cmpge(self, other: Self) -> $mask {
+ $mask(self.0.cmpge(other.0))
+ }
+
+ /// Returns a vector mask containing the result of a `>` comparison for each element of
+ /// `self` and `other`.
+ ///
+ /// In other words this computes `[self.x > other.x, self.y > other.y, ..]` for all
+ /// elements.
+ #[inline(always)]
+ pub fn cmpgt(self, other: Self) -> $mask {
+ $mask(self.0.cmpgt(other.0))
+ }
+
+ /// Returns a vector mask containing the result of a `<=` comparison for each element of
+ /// `self` and `other`.
+ ///
+ /// In other words this computes `[self.x <= other.x, self.y <= other.y, ..]` for all
+ /// elements.
+ #[inline(always)]
+ pub fn cmple(self, other: Self) -> $mask {
+ $mask(self.0.cmple(other.0))
+ }
+
+ /// Returns a vector mask containing the result of a `<` comparison for each element of
+ /// `self` and `other`.
+ ///
+ /// In other words this computes `[self.x < other.x, self.y < other.y, ..]` for all
+ /// elements.
+ #[inline(always)]
+ pub fn cmplt(self, other: Self) -> $mask {
+ $mask(self.0.cmplt(other.0))
+ }
+
+ /// Creates a vector from the first N values in `slice`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `slice` is less than N elements long.
+ #[inline(always)]
+ pub fn from_slice(slice: &[$t]) -> Self {
+ Self($vectrait::from_slice_unaligned(slice))
+ }
+
+ /// Writes the elements of `self` to the first N elements in `slice`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `slice` is less than N elements long.
+ #[inline(always)]
+ pub fn write_to_slice(self, slice: &mut [$t]) {
+ $vectrait::write_to_slice_unaligned(self.0, slice)
+ }
+ };
+}
+
+// Adds signed type vector methods to an impl.
+// The methods here should be supported for signed types of $t and all sizes of vector.
+macro_rules! impl_vecn_signed_methods {
+ ($t:ty, $vecn:ident, $mask:ident, $inner:ident, $sgntrait:ident) => {
+ // impl_vecn_common_methods!($t, $vecn, $mask, $inner, $vectrait);
+
+ /// Returns a vector containing the absolute value of each element of `self`.
+ #[inline(always)]
+ pub fn abs(self) -> Self {
+ Self($sgntrait::abs(self.0))
+ }
+
+ /// Returns a vector with elements representing the sign of `self`.
+ ///
+ /// - `1.0` if the number is positive, `+0.0` or `INFINITY`
+ /// - `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY`
+ /// - `NAN` if the number is `NAN`
+ #[inline(always)]
+ pub fn signum(self) -> Self {
+ Self($sgntrait::signum(self.0))
+ }
+ };
+}
+
+// Adds float type vector methods to an impl.
+// The methods here should be supported for float types of $t and all sizes of vector.
+macro_rules! impl_vecn_float_methods {
+ ($t:ty, $vecn:ident, $mask:ident, $inner:ident, $flttrait:ident) => {
+ // impl_vecn_signed_methods!($t, $vecn, $mask, $inner, $sgntrait, $vectrait);
+
+ /// All NAN.
+ pub const NAN: Self = Self(<$inner as crate::core::traits::scalar::NanConstEx>::NAN);
+
+ /// Returns `true` if, and only if, all elements are finite. If any element is either
+ /// `NaN`, positive or negative infinity, this will return `false`.
+ #[inline(always)]
+ pub fn is_finite(self) -> bool {
+ $flttrait::is_finite(self.0)
+ }
+
+ /// Returns `true` if any elements are `NaN`.
+ #[inline(always)]
+ pub fn is_nan(self) -> bool {
+ $flttrait::is_nan(self.0)
+ }
+
+ /// Performs `is_nan` on each element of self, returning a vector mask of the results.
+ ///
+ /// In other words, this computes `[x.is_nan(), y.is_nan(), z.is_nan(), w.is_nan()]`.
+ #[inline(always)]
+ pub fn is_nan_mask(self) -> $mask {
+ $mask($flttrait::is_nan_mask(self.0))
+ }
+
+ /// Computes the length of `self`.
+ #[doc(alias = "magnitude")]
+ #[inline(always)]
+ pub fn length(self) -> $t {
+ $flttrait::length(self.0)
+ }
+
+ /// Computes the squared length of `self`.
+ ///
+ /// This is faster than `length()` as it avoids a square root operation.
+ #[doc(alias = "magnitude2")]
+ #[inline(always)]
+ pub fn length_squared(self) -> $t {
+ $flttrait::length_squared(self.0)
+ }
+
+ /// Computes `1.0 / length()`.
+ ///
+ /// For valid results, `self` must _not_ be of length zero.
+ #[inline(always)]
+ pub fn length_recip(self) -> $t {
+ $flttrait::length_recip(self.0)
+ }
+
+ /// Computes the Euclidean distance between two points in space.
+ #[inline]
+ pub fn distance(self, other: Self) -> $t {
+ (self - other).length()
+ }
+
+ /// Compute the squared euclidean distance between two points in space.
+ #[inline]
+ pub fn distance_squared(self, other: Self) -> $t {
+ (self - other).length_squared()
+ }
+
+ /// Returns `self` normalized to length 1.0.
+ ///
+ /// For valid results, `self` must _not_ be of length zero, nor very close to zero.
+ ///
+ /// See also [`Self::try_normalize`] and [`Self::normalize_or_zero`].
+ ///
+ /// Panics
+ ///
+ /// Will panic if `self` is zero length when `glam_assert` is enabled.
+ #[must_use]
+ #[inline(always)]
+ pub fn normalize(self) -> Self {
+ Self($flttrait::normalize(self.0))
+ }
+
+ /// Returns `self` normalized to length 1.0 if possible, else returns `None`.
+ ///
+ /// In particular, if the input is zero (or very close to zero), or non-finite,
+ /// the result of this operation will be `None`.
+ ///
+ /// See also [`Self::normalize_or_zero`].
+ #[must_use]
+ #[inline]
+ pub fn try_normalize(self) -> Option<Self> {
+ let rcp = self.length_recip();
+ if rcp.is_finite() && rcp > 0.0 {
+ Some(self * rcp)
+ } else {
+ None
+ }
+ }
+
+ /// Returns `self` normalized to length 1.0 if possible, else returns zero.
+ ///
+ /// In particular, if the input is zero (or very close to zero), or non-finite,
+ /// the result of this operation will be zero.
+ ///
+ /// See also [`Self::try_normalize`].
+ #[must_use]
+ #[inline]
+ pub fn normalize_or_zero(self) -> Self {
+ let rcp = self.length_recip();
+ if rcp.is_finite() && rcp > 0.0 {
+ self * rcp
+ } else {
+ Self::ZERO
+ }
+ }
+
+ /// Returns whether `self` is length `1.0` or not.
+ ///
+ /// Uses a precision threshold of `1e-6`.
+ #[inline(always)]
+ pub fn is_normalized(self) -> bool {
+ $flttrait::is_normalized(self.0)
+ }
+
+ /// Returns the vector projection of `self` onto `other`.
+ ///
+ /// `other` must be of non-zero length.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `other` is zero length when `glam_assert` is enabled.
+ #[must_use]
+ #[inline]
+ pub fn project_onto(self, other: Self) -> Self {
+ let other_len_sq_rcp = other.dot(other).recip();
+ glam_assert!(other_len_sq_rcp.is_finite());
+ other * self.dot(other) * other_len_sq_rcp
+ }
+
+ /// Returns the vector rejection of `self` from `other`.
+ ///
+ /// The vector rejection is the vector perpendicular to the projection of `self` onto
+ /// `other`, in other words the result of `self - self.project_onto(other)`.
+ ///
+ /// `other` must be of non-zero length.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `other` has a length of zero when `glam_assert` is enabled.
+ #[must_use]
+ #[inline]
+ pub fn reject_from(self, other: Self) -> Self {
+ self - self.project_onto(other)
+ }
+
+ /// Returns the vector projection of `self` onto `other`.
+ ///
+ /// `other` must be normalized.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `other` is not normalized when `glam_assert` is enabled.
+ #[must_use]
+ #[inline]
+ pub fn project_onto_normalized(self, other: Self) -> Self {
+ glam_assert!(other.is_normalized());
+ other * self.dot(other)
+ }
+
+ /// Returns the vector rejection of `self` from `other`.
+ ///
+ /// The vector rejection is the vector perpendicular to the projection of `self` onto
+ /// `other`, in other words the result of `self - self.project_onto(other)`.
+ ///
+ /// `other` must be normalized.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `other` is not normalized when `glam_assert` is enabled.
+ #[must_use]
+ #[inline]
+ pub fn reject_from_normalized(self, other: Self) -> Self {
+ self - self.project_onto_normalized(other)
+ }
+
+ /// Returns a vector containing the nearest integer to a number for each element of `self`.
+ /// Round half-way cases away from 0.0.
+ #[inline(always)]
+ pub fn round(self) -> Self {
+ Self($flttrait::round(self.0))
+ }
+
+ /// Returns a vector containing the largest integer less than or equal to a number for each
+ /// element of `self`.
+ #[inline(always)]
+ pub fn floor(self) -> Self {
+ Self($flttrait::floor(self.0))
+ }
+
+ /// Returns a vector containing the smallest integer greater than or equal to a number for
+ /// each element of `self`.
+ #[inline(always)]
+ pub fn ceil(self) -> Self {
+ Self($flttrait::ceil(self.0))
+ }
+
+ /// Returns a vector containing the fractional part of the vector, e.g. `self -
+ /// self.floor()`.
+ ///
+ /// Note that this is fast but not precise for large numbers.
+ #[inline(always)]
+ pub fn fract(self) -> Self {
+ self - self.floor()
+ }
+
+ /// Returns a vector containing `e^self` (the exponential function) for each element of
+ /// `self`.
+ #[inline(always)]
+ pub fn exp(self) -> Self {
+ Self($flttrait::exp(self.0))
+ }
+
+ /// Returns a vector containing each element of `self` raised to the power of `n`.
+ #[inline(always)]
+ pub fn powf(self, n: $t) -> Self {
+ Self($flttrait::powf(self.0, n))
+ }
+
+ /// Returns a vector containing the reciprocal `1.0/n` of each element of `self`.
+ #[inline(always)]
+ pub fn recip(self) -> Self {
+ Self($flttrait::recip(self.0))
+ }
+
+ /// Performs a linear interpolation between `self` and `other` based on the value `s`.
+ ///
+ /// When `s` is `0.0`, the result will be equal to `self`. When `s` is `1.0`, the result
+ /// will be equal to `other`. When `s` is outside of range [0,1], the result is linearly
+ /// extrapolated.
+ #[doc(alias = "mix")]
+ #[inline]
+ pub fn lerp(self, other: Self, s: $t) -> Self {
+ self + ((other - self) * s)
+ }
+
+ /// Returns true if the absolute difference of all elements between `self` and `other` is
+ /// less than or equal to `max_abs_diff`.
+ ///
+ /// This can be used to compare if two vectors contain similar elements. It works best when
+ /// comparing with a known value. The `max_abs_diff` that should be used used depends on
+ /// the values being compared against.
+ ///
+ /// For more see
+ /// [comparing floating point numbers](https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/).
+ #[inline(always)]
+ pub fn abs_diff_eq(self, other: Self, max_abs_diff: $t) -> bool {
+ $flttrait::abs_diff_eq(self.0, other.0, max_abs_diff)
+ }
+
+ /// Returns a vector with a length no less than `min` and no more than `max`
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `min` is greater than `max` when `glam_assert` is enabled.
+ #[inline]
+ pub fn clamp_length(self, min: $t, max: $t) -> Self {
+ glam_assert!(min <= max);
+ let length_sq = self.length_squared();
+ if length_sq < min * min {
+ self * (length_sq.sqrt().recip() * min)
+ } else if length_sq > max * max {
+ self * (length_sq.sqrt().recip() * max)
+ } else {
+ self
+ }
+ }
+
+ /// Returns a vector with a length no more than `max`
+ pub fn clamp_length_max(self, max: $t) -> Self {
+ let length_sq = self.length_squared();
+ if length_sq > max * max {
+ self * (length_sq.sqrt().recip() * max)
+ } else {
+ self
+ }
+ }
+
+ /// Returns a vector with a length no less than `min`
+ pub fn clamp_length_min(self, min: $t) -> Self {
+ let length_sq = self.length_squared();
+ if length_sq < min * min {
+ self * (length_sq.sqrt().recip() * min)
+ } else {
+ self
+ }
+ }
+
+ /// Fused multiply-add. Computes `(self * a) + b` element-wise with only one rounding
+ /// error, yielding a more accurate result than an unfused multiply-add.
+ ///
+ /// Using `mul_add` *may* be more performant than an unfused multiply-add if the target
+ /// architecture has a dedicated fma CPU instruction. However, this is not always true,
+ /// and will be heavily dependant on designing algorithms with specific target hardware in
+ /// mind.
+ #[inline(always)]
+ pub fn mul_add(self, a: Self, b: Self) -> Self {
+ Self($flttrait::mul_add(self.0, a.0, b.0))
+ }
+ };
+}
+
+// Adds common vector trait implementations.
+// The traits here should be supported for all types of $t and all sizes of vector.
+macro_rules! impl_vecn_common_traits {
+ ($t:ty, $size:literal, $vecn:ident, $inner:ident, $trait:ident) => {
+ impl Default for $vecn {
+ #[inline(always)]
+ fn default() -> Self {
+ Self($inner::ZERO)
+ }
+ }
+
+ impl PartialEq for $vecn {
+ #[inline(always)]
+ fn eq(&self, other: &Self) -> bool {
+ self.cmpeq(*other).all()
+ }
+ }
+
+ impl From<$vecn> for $inner {
+ #[inline(always)]
+ fn from(t: $vecn) -> Self {
+ t.0
+ }
+ }
+
+ impl From<$inner> for $vecn {
+ #[inline(always)]
+ fn from(t: $inner) -> Self {
+ Self(t)
+ }
+ }
+
+ impl Div<$vecn> for $vecn {
+ type Output = Self;
+ #[inline(always)]
+ fn div(self, other: $vecn) -> Self {
+ Self(self.0.div(other.0))
+ }
+ }
+
+ impl DivAssign<$vecn> for $vecn {
+ #[inline(always)]
+ fn div_assign(&mut self, other: $vecn) {
+ self.0 = self.0.div(other.0)
+ }
+ }
+
+ impl Div<$t> for $vecn {
+ type Output = Self;
+ #[inline(always)]
+ fn div(self, other: $t) -> Self {
+ Self(self.0.div_scalar(other))
+ }
+ }
+
+ impl DivAssign<$t> for $vecn {
+ #[inline(always)]
+ fn div_assign(&mut self, other: $t) {
+ self.0 = self.0.div_scalar(other)
+ }
+ }
+
+ impl Div<$vecn> for $t {
+ type Output = $vecn;
+ #[inline(always)]
+ fn div(self, other: $vecn) -> $vecn {
+ $vecn($inner::splat(self).div(other.0))
+ }
+ }
+
+ impl Mul<$vecn> for $vecn {
+ type Output = Self;
+ #[inline(always)]
+ fn mul(self, other: $vecn) -> Self {
+ Self(self.0.mul(other.0))
+ }
+ }
+
+ impl MulAssign<$vecn> for $vecn {
+ #[inline(always)]
+ fn mul_assign(&mut self, other: $vecn) {
+ self.0 = self.0.mul(other.0)
+ }
+ }
+
+ impl Mul<$t> for $vecn {
+ type Output = Self;
+ #[inline(always)]
+ fn mul(self, other: $t) -> Self {
+ Self(self.0.mul_scalar(other))
+ }
+ }
+
+ impl MulAssign<$t> for $vecn {
+ #[inline(always)]
+ fn mul_assign(&mut self, other: $t) {
+ self.0 = self.0.mul_scalar(other)
+ }
+ }
+
+ impl Mul<$vecn> for $t {
+ type Output = $vecn;
+ #[inline(always)]
+ fn mul(self, other: $vecn) -> $vecn {
+ $vecn($inner::splat(self).mul(other.0))
+ }
+ }
+
+ impl Add<$vecn> for $vecn {
+ type Output = Self;
+ #[inline(always)]
+ fn add(self, other: $vecn) -> Self {
+ Self(self.0.add(other.0))
+ }
+ }
+
+ impl AddAssign<$vecn> for $vecn {
+ #[inline(always)]
+ fn add_assign(&mut self, other: $vecn) {
+ self.0 = self.0.add(other.0)
+ }
+ }
+
+ impl Add<$t> for $vecn {
+ type Output = Self;
+ #[inline(always)]
+ fn add(self, other: $t) -> Self {
+ Self(self.0.add_scalar(other))
+ }
+ }
+
+ impl AddAssign<$t> for $vecn {
+ #[inline(always)]
+ fn add_assign(&mut self, other: $t) {
+ self.0 = self.0.add_scalar(other)
+ }
+ }
+
+ impl Add<$vecn> for $t {
+ type Output = $vecn;
+ #[inline(always)]
+ fn add(self, other: $vecn) -> $vecn {
+ $vecn($inner::splat(self).add(other.0))
+ }
+ }
+
+ impl Sub<$vecn> for $vecn {
+ type Output = Self;
+ #[inline(always)]
+ fn sub(self, other: $vecn) -> Self {
+ Self(self.0.sub(other.0))
+ }
+ }
+
+ impl SubAssign<$vecn> for $vecn {
+ #[inline(always)]
+ fn sub_assign(&mut self, other: $vecn) {
+ self.0 = self.0.sub(other.0)
+ }
+ }
+
+ impl Sub<$t> for $vecn {
+ type Output = Self;
+ #[inline(always)]
+ fn sub(self, other: $t) -> Self {
+ Self(self.0.sub_scalar(other))
+ }
+ }
+
+ impl SubAssign<$t> for $vecn {
+ #[inline(always)]
+ fn sub_assign(&mut self, other: $t) {
+ self.0 = self.0.sub_scalar(other)
+ }
+ }
+
+ impl Sub<$vecn> for $t {
+ type Output = $vecn;
+ #[inline(always)]
+ fn sub(self, other: $vecn) -> $vecn {
+ $vecn($inner::splat(self).sub(other.0))
+ }
+ }
+
+ impl Rem<$vecn> for $vecn {
+ type Output = Self;
+ #[inline(always)]
+ fn rem(self, other: $vecn) -> Self {
+ Self(self.0.rem(other.0))
+ }
+ }
+
+ impl RemAssign<$vecn> for $vecn {
+ #[inline(always)]
+ fn rem_assign(&mut self, other: $vecn) {
+ self.0 = self.0.rem(other.0)
+ }
+ }
+
+ impl Rem<$t> for $vecn {
+ type Output = Self;
+ #[inline(always)]
+ fn rem(self, other: $t) -> Self {
+ Self(self.0.rem_scalar(other))
+ }
+ }
+
+ impl RemAssign<$t> for $vecn {
+ #[inline(always)]
+ fn rem_assign(&mut self, other: $t) {
+ self.0 = self.0.rem_scalar(other)
+ }
+ }
+
+ impl Rem<$vecn> for $t {
+ type Output = $vecn;
+ #[inline(always)]
+ fn rem(self, other: $vecn) -> $vecn {
+ $vecn($inner::splat(self).rem(other.0))
+ }
+ }
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl AsRef<[$t; $size]> for $vecn {
+ #[inline(always)]
+ fn as_ref(&self) -> &[$t; $size] {
+ unsafe { &*(self as *const $vecn as *const [$t; $size]) }
+ }
+ }
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl AsMut<[$t; $size]> for $vecn {
+ #[inline(always)]
+ fn as_mut(&mut self) -> &mut [$t; $size] {
+ unsafe { &mut *(self as *mut $vecn as *mut [$t; $size]) }
+ }
+ }
+
+ impl From<[$t; $size]> for $vecn {
+ #[inline(always)]
+ fn from(a: [$t; $size]) -> Self {
+ Self($trait::from_array(a))
+ }
+ }
+
+ impl From<$vecn> for [$t; $size] {
+ #[inline(always)]
+ fn from(v: $vecn) -> Self {
+ v.into_array()
+ }
+ }
+
+ impl<'a> Sum<&'a Self> for $vecn {
+ #[inline]
+ fn sum<I>(iter: I) -> Self
+ where
+ I: Iterator<Item = &'a Self>,
+ {
+ iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
+ }
+ }
+
+ impl<'a> Product<&'a Self> for $vecn {
+ #[inline]
+ fn product<I>(iter: I) -> Self
+ where
+ I: Iterator<Item = &'a Self>,
+ {
+ iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
+ }
+ }
+ };
+}
+
+macro_rules! impl_vecn_eq_hash_traits {
+ ($t:ty, $size:literal, $vecn:ident) => {
+ impl Eq for $vecn {}
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl core::hash::Hash for $vecn {
+ fn hash<H: core::hash::Hasher>(&self, state: &mut H) {
+ let inner: &[$t; $size] = self.as_ref();
+ inner.hash(state);
+ }
+ }
+ };
+}
+
+// Adds signed vector trait implementations.
+// The traits here should be supported for signed types of $t and all sizes of vector.
+macro_rules! impl_vecn_signed_traits {
+ ($t:ty, $size:literal, $vecn:ident, $inner:ident, $sgntrait:ident) => {
+ impl Neg for $vecn {
+ type Output = Self;
+ #[inline(always)]
+ fn neg(self) -> Self {
+ Self(self.0.neg())
+ }
+ }
+ };
+}
+
+macro_rules! impl_vecn_shift_op_traits {
+ ($vecn:ident, $rhs:ty, $inner:ident) => {
+ impl Shl<$rhs> for $vecn {
+ type Output = Self;
+
+ #[inline(always)]
+ fn shl(self, rhs: $rhs) -> Self::Output {
+ $vecn($inner::vector_shl(self.0, rhs.0))
+ }
+ }
+
+ impl Shr<$rhs> for $vecn {
+ type Output = Self;
+
+ #[inline(always)]
+ fn shr(self, rhs: $rhs) -> Self::Output {
+ $vecn($inner::vector_shr(self.0, rhs.0))
+ }
+ }
+ };
+}
+
+macro_rules! impl_vecn_scalar_shift_op_traits {
+ ($vecn:ident, $rhs:ty, $inner:ident) => {
+ impl Shl<$rhs> for $vecn {
+ type Output = Self;
+
+ #[inline(always)]
+ fn shl(self, rhs: $rhs) -> Self::Output {
+ $vecn($inner::scalar_shl(self.0, rhs))
+ }
+ }
+
+ impl Shr<$rhs> for $vecn {
+ type Output = Self;
+
+ #[inline(always)]
+ fn shr(self, rhs: $rhs) -> Self::Output {
+ $vecn($inner::scalar_shr(self.0, rhs))
+ }
+ }
+ };
+}
+
+macro_rules! impl_vecn_bit_op_traits {
+ ($vecn:ident, $inner:ident) => {
+ impl Not for $vecn {
+ type Output = Self;
+
+ #[inline(always)]
+ fn not(self) -> Self::Output {
+ $vecn($inner::not(self.0))
+ }
+ }
+
+ impl BitAnd for $vecn {
+ type Output = Self;
+
+ #[inline(always)]
+ fn bitand(self, rhs: Self) -> Self::Output {
+ $vecn($inner::vector_bitand(self.0, rhs.0))
+ }
+ }
+
+ impl BitOr for $vecn {
+ type Output = Self;
+
+ #[inline(always)]
+ fn bitor(self, rhs: Self) -> Self::Output {
+ $vecn($inner::vector_bitor(self.0, rhs.0))
+ }
+ }
+
+ impl BitXor for $vecn {
+ type Output = Self;
+
+ #[inline(always)]
+ fn bitxor(self, rhs: Self) -> Self::Output {
+ $vecn($inner::vector_bitxor(self.0, rhs.0))
+ }
+ }
+ };
+}
+
+macro_rules! impl_vecn_scalar_bit_op_traits {
+ ($vecn:ident, $rhs:ty, $inner:ident) => {
+ impl BitAnd<$rhs> for $vecn {
+ type Output = Self;
+
+ #[inline(always)]
+ fn bitand(self, rhs: $rhs) -> Self::Output {
+ $vecn($inner::scalar_bitand(self.0, rhs))
+ }
+ }
+
+ impl BitOr<$rhs> for $vecn {
+ type Output = Self;
+
+ #[inline(always)]
+ fn bitor(self, rhs: $rhs) -> Self::Output {
+ $vecn($inner::scalar_bitor(self.0, rhs))
+ }
+ }
+
+ impl BitXor<$rhs> for $vecn {
+ type Output = Self;
+
+ #[inline(always)]
+ fn bitxor(self, rhs: $rhs) -> Self::Output {
+ $vecn($inner::scalar_bitxor(self.0, rhs))
+ }
+ }
+ };
+}
+
+macro_rules! impl_as_vec2 {
+ () => {
+ /// Casts all elements of `self` to `f32`.
+ #[inline(always)]
+ pub fn as_vec2(&self) -> Vec2 {
+ Vec2::new(self.x as f32, self.y as f32)
+ }
+ };
+}
+
+macro_rules! impl_as_vec3 {
+ () => {
+ /// Casts all elements of `self` to `f32`.
+ #[inline(always)]
+ pub fn as_vec3(&self) -> Vec3 {
+ Vec3::new(self.x as f32, self.y as f32, self.z as f32)
+ }
+
+ /// Casts all elements of `self` to `f32`.
+ #[inline(always)]
+ pub fn as_vec3a(&self) -> Vec3A {
+ Vec3A::new(self.x as f32, self.y as f32, self.z as f32)
+ }
+ };
+}
+
+macro_rules! impl_as_vec4 {
+ () => {
+ /// Casts all elements of `self` to `f32`.
+ #[inline(always)]
+ pub fn as_vec4(&self) -> Vec4 {
+ Vec4::new(self.x as f32, self.y as f32, self.z as f32, self.w as f32)
+ }
+ };
+}
+
+macro_rules! impl_as_dvec2 {
+ () => {
+ /// Casts all elements of `self` to `f64`.
+ #[inline(always)]
+ pub fn as_dvec2(&self) -> DVec2 {
+ DVec2::new(self.x as f64, self.y as f64)
+ }
+ };
+}
+
+macro_rules! impl_as_dvec3 {
+ () => {
+ /// Casts all elements of `self` to `f64`.
+ #[inline(always)]
+ pub fn as_dvec3(&self) -> DVec3 {
+ DVec3::new(self.x as f64, self.y as f64, self.z as f64)
+ }
+ };
+}
+
+macro_rules! impl_as_dvec4 {
+ () => {
+ /// Casts all elements of `self` to `f64`.
+ #[inline(always)]
+ pub fn as_dvec4(&self) -> DVec4 {
+ DVec4::new(self.x as f64, self.y as f64, self.z as f64, self.w as f64)
+ }
+ };
+}
+
+macro_rules! impl_as_ivec2 {
+ () => {
+ /// Casts all elements of `self` to `i32`.
+ #[inline(always)]
+ pub fn as_ivec2(&self) -> IVec2 {
+ IVec2::new(self.x as i32, self.y as i32)
+ }
+ };
+}
+
+macro_rules! impl_as_ivec3 {
+ () => {
+ /// Casts all elements of `self` to `i32`.
+ #[inline(always)]
+ pub fn as_ivec3(&self) -> IVec3 {
+ IVec3::new(self.x as i32, self.y as i32, self.z as i32)
+ }
+ };
+}
+
+macro_rules! impl_as_ivec4 {
+ () => {
+ /// Casts all elements of `self` to `i32`.
+ #[inline(always)]
+ pub fn as_ivec4(&self) -> IVec4 {
+ IVec4::new(self.x as i32, self.y as i32, self.z as i32, self.w as i32)
+ }
+ };
+}
+
+macro_rules! impl_as_uvec2 {
+ () => {
+ /// Casts all elements of `self` to `u32`.
+ #[inline(always)]
+ pub fn as_uvec2(&self) -> UVec2 {
+ UVec2::new(self.x as u32, self.y as u32)
+ }
+ };
+}
+
+macro_rules! impl_as_uvec3 {
+ () => {
+ /// Casts all elements of `self` to `u32`.
+ #[inline(always)]
+ pub fn as_uvec3(&self) -> UVec3 {
+ UVec3::new(self.x as u32, self.y as u32, self.z as u32)
+ }
+ };
+}
+
+macro_rules! impl_as_uvec4 {
+ () => {
+ /// Casts all elements of `self` to `u32`.
+ #[inline(always)]
+ pub fn as_uvec4(&self) -> UVec4 {
+ UVec4::new(self.x as u32, self.y as u32, self.z as u32, self.w as u32)
+ }
+ };
+}
diff --git a/src/vec2.rs b/src/vec2.rs
new file mode 100644
index 0000000..98ffa38
--- /dev/null
+++ b/src/vec2.rs
@@ -0,0 +1,317 @@
+use crate::core::traits::vector::*;
+use crate::{BVec2, DVec3, IVec3, UVec3, Vec3, XY};
+#[cfg(not(target_arch = "spirv"))]
+use core::fmt;
+use core::iter::{Product, Sum};
+use core::{f32, ops::*};
+
+#[cfg(not(feature = "std"))]
+use num_traits::Float;
+
+macro_rules! impl_vec2_common_methods {
+ ($t:ty, $vec2:ident, $vec3:ident, $mask:ident, $inner:ident) => {
+ /// All zeroes.
+ pub const ZERO: Self = Self($inner::ZERO);
+
+ /// All ones.
+ pub const ONE: Self = Self($inner::ONE);
+
+ /// `[1, 0]`: a unit-length vector pointing along the positive X axis.
+ pub const X: Self = Self($inner::X);
+
+ /// `[0, 1]`: a unit-length vector pointing along the positive Y axis.
+ pub const Y: Self = Self($inner::Y);
+
+ /// The unit axes.
+ pub const AXES: [Self; 2] = [Self::X, Self::Y];
+
+ /// Creates a new vector.
+ #[inline(always)]
+ pub fn new(x: $t, y: $t) -> $vec2 {
+ Self(Vector2::new(x, y))
+ }
+
+ /// Creates a 3D vector from `self` and the given `z` value.
+ #[inline(always)]
+ pub fn extend(self, z: $t) -> $vec3 {
+ $vec3::new(self.x, self.y, z)
+ }
+
+ /// `[x, y]`
+ #[inline(always)]
+ pub fn to_array(&self) -> [$t; 2] {
+ [self.x, self.y]
+ }
+
+ impl_vecn_common_methods!($t, $vec2, $mask, $inner, Vector2);
+ };
+}
+
+macro_rules! impl_vec2_signed_methods {
+ ($t:ty, $vec2:ident, $vec3:ident, $mask:ident, $inner:ident) => {
+ impl_vec2_common_methods!($t, $vec2, $vec3, $mask, $inner);
+ impl_vecn_signed_methods!($t, $vec2, $mask, $inner, SignedVector2);
+
+ /// Returns a vector that is equal to `self` rotated by 90 degrees.
+ #[inline(always)]
+ pub fn perp(self) -> Self {
+ Self(self.0.perp())
+ }
+
+ /// The perpendicular dot product of `self` and `other`.
+ /// Also known as the wedge product, 2d cross product, and determinant.
+ #[doc(alias = "wedge")]
+ #[doc(alias = "cross")]
+ #[doc(alias = "determinant")]
+ #[inline(always)]
+ pub fn perp_dot(self, other: $vec2) -> $t {
+ self.0.perp_dot(other.0)
+ }
+ };
+}
+
+macro_rules! impl_vec2_float_methods {
+ ($t:ty, $vec2:ident, $vec3:ident, $mask:ident, $inner:ident) => {
+ impl_vec2_signed_methods!($t, $vec2, $vec3, $mask, $inner);
+ impl_vecn_float_methods!($t, $vec2, $mask, $inner, FloatVector2);
+
+ /// Returns the angle (in radians) between `self` and `other`.
+ ///
+ /// The input vectors do not need to be unit length however they must be non-zero.
+ #[inline(always)]
+ pub fn angle_between(self, other: Self) -> $t {
+ self.0.angle_between(other.0)
+ }
+ };
+}
+
+macro_rules! impl_vec2_common_traits {
+ ($t:ty, $new:ident, $vec2:ident, $vec3:ident, $mask:ident, $inner:ident) => {
+ /// Creates a 2-dimensional vector.
+ #[inline(always)]
+ pub fn $new(x: $t, y: $t) -> $vec2 {
+ $vec2::new(x, y)
+ }
+
+ impl Index<usize> for $vec2 {
+ type Output = $t;
+ #[inline(always)]
+ fn index(&self, index: usize) -> &Self::Output {
+ match index {
+ 0 => &self.x,
+ 1 => &self.y,
+ _ => panic!("index out of bounds"),
+ }
+ }
+ }
+
+ impl IndexMut<usize> for $vec2 {
+ #[inline(always)]
+ fn index_mut(&mut self, index: usize) -> &mut Self::Output {
+ match index {
+ 0 => &mut self.x,
+ 1 => &mut self.y,
+ _ => panic!("index out of bounds"),
+ }
+ }
+ }
+ #[cfg(not(target_arch = "spirv"))]
+ impl fmt::Display for $vec2 {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "[{}, {}]", self.x, self.y)
+ }
+ }
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl fmt::Debug for $vec2 {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_tuple(stringify!($vec2))
+ .field(&self.x)
+ .field(&self.y)
+ .finish()
+ }
+ }
+
+ impl From<($t, $t)> for $vec2 {
+ #[inline(always)]
+ fn from(t: ($t, $t)) -> Self {
+ Self($inner::from_tuple(t))
+ }
+ }
+
+ impl From<$vec2> for ($t, $t) {
+ #[inline(always)]
+ fn from(v: $vec2) -> Self {
+ v.0.into_tuple()
+ }
+ }
+
+ impl Deref for $vec2 {
+ type Target = XY<$t>;
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ self.0.as_ref_xy()
+ }
+ }
+
+ impl DerefMut for $vec2 {
+ #[inline(always)]
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ self.0.as_mut_xy()
+ }
+ }
+
+ impl_vecn_common_traits!($t, 2, $vec2, $inner, Vector2);
+ };
+}
+
+macro_rules! impl_vec2_unsigned_traits {
+ ($t:ty, $new:ident, $vec2:ident, $vec3:ident, $mask:ident, $inner:ident) => {
+ impl_vec2_common_traits!($t, $new, $vec2, $vec3, $mask, $inner);
+ };
+}
+
+macro_rules! impl_vec2_signed_traits {
+ ($t:ty, $new:ident, $vec2:ident, $vec3:ident, $mask:ident, $inner:ident) => {
+ impl_vec2_common_traits!($t, $new, $vec2, $vec3, $mask, $inner);
+ impl_vecn_signed_traits!($t, 2, $vec2, $inner, SignedVector2);
+ };
+}
+
+type XYF32 = XY<f32>;
+
+/// A 2-dimensional vector.
+#[derive(Clone, Copy)]
+#[cfg_attr(feature = "cuda", repr(C, align(8)))]
+#[cfg_attr(not(feature = "cuda"), repr(transparent))]
+pub struct Vec2(pub(crate) XYF32);
+
+impl Vec2 {
+ impl_vec2_float_methods!(f32, Vec2, Vec3, BVec2, XYF32);
+ impl_as_dvec2!();
+ impl_as_ivec2!();
+ impl_as_uvec2!();
+}
+impl_vec2_signed_traits!(f32, vec2, Vec2, Vec3, BVec2, XYF32);
+
+type XYF64 = XY<f64>;
+
+/// A 2-dimensional vector.
+#[derive(Clone, Copy)]
+#[cfg_attr(feature = "cuda", repr(C, align(16)))]
+#[cfg_attr(not(feature = "cuda"), repr(transparent))]
+pub struct DVec2(pub(crate) XYF64);
+
+impl DVec2 {
+ impl_vec2_float_methods!(f64, DVec2, DVec3, BVec2, XYF64);
+ impl_as_vec2!();
+ impl_as_ivec2!();
+ impl_as_uvec2!();
+}
+impl_vec2_signed_traits!(f64, dvec2, DVec2, DVec3, BVec2, XYF64);
+
+type XYI32 = XY<i32>;
+
+/// A 2-dimensional vector.
+#[derive(Clone, Copy)]
+#[cfg_attr(feature = "cuda", repr(C, align(8)))]
+#[cfg_attr(not(feature = "cuda"), repr(transparent))]
+pub struct IVec2(pub(crate) XYI32);
+
+impl IVec2 {
+ impl_vec2_signed_methods!(i32, IVec2, IVec3, BVec2, XYI32);
+ impl_as_vec2!();
+ impl_as_dvec2!();
+ impl_as_uvec2!();
+}
+impl_vec2_signed_traits!(i32, ivec2, IVec2, IVec3, BVec2, XYI32);
+impl_vecn_eq_hash_traits!(i32, 2, IVec2);
+
+impl_vecn_scalar_shift_op_traits!(IVec2, i8, XYI32);
+impl_vecn_scalar_shift_op_traits!(IVec2, i16, XYI32);
+impl_vecn_scalar_shift_op_traits!(IVec2, i32, XYI32);
+impl_vecn_scalar_shift_op_traits!(IVec2, u8, XYI32);
+impl_vecn_scalar_shift_op_traits!(IVec2, u16, XYI32);
+impl_vecn_scalar_shift_op_traits!(IVec2, u32, XYI32);
+
+impl_vecn_shift_op_traits!(IVec2, IVec2, XYI32);
+impl_vecn_shift_op_traits!(IVec2, UVec2, XYI32);
+
+impl_vecn_scalar_bit_op_traits!(IVec2, i32, XYI32);
+
+impl_vecn_bit_op_traits!(IVec2, XYI32);
+
+type XYU32 = XY<u32>;
+
+/// A 2-dimensional vector.
+#[derive(Clone, Copy)]
+#[cfg_attr(feature = "cuda", repr(C, align(8)))]
+#[cfg_attr(not(feature = "cuda"), repr(transparent))]
+pub struct UVec2(pub(crate) XYU32);
+
+impl UVec2 {
+ impl_vec2_common_methods!(u32, UVec2, UVec3, BVec2, XYU32);
+ impl_as_vec2!();
+ impl_as_dvec2!();
+ impl_as_ivec2!();
+}
+impl_vec2_unsigned_traits!(u32, uvec2, UVec2, UVec3, BVec2, XYU32);
+impl_vecn_eq_hash_traits!(u32, 2, UVec2);
+
+impl_vecn_scalar_shift_op_traits!(UVec2, i8, XYU32);
+impl_vecn_scalar_shift_op_traits!(UVec2, i16, XYU32);
+impl_vecn_scalar_shift_op_traits!(UVec2, i32, XYU32);
+impl_vecn_scalar_shift_op_traits!(UVec2, u8, XYU32);
+impl_vecn_scalar_shift_op_traits!(UVec2, u16, XYU32);
+impl_vecn_scalar_shift_op_traits!(UVec2, u32, XYU32);
+
+impl_vecn_shift_op_traits!(UVec2, IVec2, XYU32);
+impl_vecn_shift_op_traits!(UVec2, UVec2, XYU32);
+
+impl_vecn_scalar_bit_op_traits!(UVec2, u32, XYU32);
+
+impl_vecn_bit_op_traits!(UVec2, XYU32);
+
+mod const_test_vec2 {
+ #[cfg(not(feature = "cuda"))]
+ const_assert_eq!(
+ core::mem::align_of::<f32>(),
+ core::mem::align_of::<super::Vec2>()
+ );
+ #[cfg(feature = "cuda")]
+ const_assert_eq!(8, core::mem::align_of::<super::Vec2>());
+ const_assert_eq!(8, core::mem::size_of::<super::Vec2>());
+}
+
+mod const_test_dvec2 {
+ #[cfg(not(feature = "cuda"))]
+ const_assert_eq!(
+ core::mem::align_of::<f64>(),
+ core::mem::align_of::<super::DVec2>()
+ );
+ #[cfg(feature = "cuda")]
+ const_assert_eq!(16, core::mem::align_of::<super::DVec2>());
+ const_assert_eq!(16, core::mem::size_of::<super::DVec2>());
+}
+
+mod const_test_ivec2 {
+ #[cfg(not(feature = "cuda"))]
+ const_assert_eq!(
+ core::mem::align_of::<i32>(),
+ core::mem::align_of::<super::IVec2>()
+ );
+ #[cfg(feature = "cuda")]
+ const_assert_eq!(8, core::mem::align_of::<super::IVec2>());
+ const_assert_eq!(8, core::mem::size_of::<super::IVec2>());
+}
+
+mod const_test_uvec2 {
+ #[cfg(not(feature = "cuda"))]
+ const_assert_eq!(
+ core::mem::align_of::<u32>(),
+ core::mem::align_of::<super::UVec2>()
+ );
+ #[cfg(feature = "cuda")]
+ const_assert_eq!(8, core::mem::align_of::<super::UVec2>());
+ const_assert_eq!(8, core::mem::size_of::<super::UVec2>());
+}
diff --git a/src/vec3.rs b/src/vec3.rs
new file mode 100644
index 0000000..56978d3
--- /dev/null
+++ b/src/vec3.rs
@@ -0,0 +1,454 @@
+use crate::core::traits::vector::*;
+#[cfg(all(
+ any(target_feature = "sse2", target_feature = "simd128"),
+ not(feature = "scalar-math")
+))]
+use crate::BVec3A;
+use crate::{BVec3, DVec2, DVec4, IVec2, IVec4, UVec2, UVec4, Vec2, Vec4, XYZ};
+#[cfg(not(target_arch = "spirv"))]
+use core::fmt;
+use core::iter::{Product, Sum};
+use core::{f32, ops::*};
+
+#[cfg(not(feature = "std"))]
+use num_traits::Float;
+
+#[cfg(all(
+ target_arch = "x86",
+ target_feature = "sse2",
+ not(feature = "scalar-math")
+))]
+use core::arch::x86::*;
+#[cfg(all(
+ target_arch = "x86_64",
+ target_feature = "sse2",
+ not(feature = "scalar-math")
+))]
+use core::arch::x86_64::*;
+
+#[cfg(all(target_feature = "simd128", not(feature = "scalar-math")))]
+use core::arch::wasm32::v128;
+
+macro_rules! impl_vec3_common_methods {
+ ($t:ty, $vec2:ident, $vec3:ident, $vec4:ident, $mask:ident, $inner:ident) => {
+ /// All zeroes.
+ pub const ZERO: Self = Self(VectorConst::ZERO);
+
+ /// All ones.
+ pub const ONE: Self = Self(VectorConst::ONE);
+
+ /// `[1, 0, 0]`: a unit-length vector pointing along the positive X axis.
+ pub const X: Self = Self(Vector3Const::X);
+
+ /// `[0, 1, 0]`: a unit-length vector pointing along the positive Y axis.
+ pub const Y: Self = Self(Vector3Const::Y);
+
+ /// `[0, 0, 1]`: a unit-length vector pointing along the positive Z axis.
+ pub const Z: Self = Self(Vector3Const::Z);
+
+ /// The unit axes.
+ pub const AXES: [Self; 3] = [Self::X, Self::Y, Self::Z];
+
+ /// Creates a new 3D vector.
+ #[inline(always)]
+ pub fn new(x: $t, y: $t, z: $t) -> Self {
+ Self(Vector3::new(x, y, z))
+ }
+
+ /// Creates a 4D vector from `self` and the given `w` value.
+ #[inline(always)]
+ pub fn extend(self, w: $t) -> $vec4 {
+ // TODO: Optimize?
+ $vec4(Vector4::new(self.x, self.y, self.z, w))
+ }
+
+ /// Creates a `Vec2` from the `x` and `y` elements of `self`, discarding `z`.
+ ///
+ /// Truncation may also be performed by using `self.xy()` or `Vec2::from()`.
+ #[inline(always)]
+ pub fn truncate(self) -> $vec2 {
+ $vec2(Vector3::into_xy(self.0))
+ }
+
+ /// Returns the dot product result in all elements of the vector
+ #[inline(always)]
+ #[allow(dead_code)]
+ pub(crate) fn dot_as_vec3(self, other: Self) -> Self {
+ Self(Vector3::dot_into_vec(self.0, other.0))
+ }
+
+ /// Computes the cross product of `self` and `other`.
+ #[inline(always)]
+ pub fn cross(self, other: Self) -> Self {
+ Self(self.0.cross(other.0))
+ }
+
+ /// `[x, y, z]`
+ #[inline(always)]
+ pub fn to_array(&self) -> [$t; 3] {
+ [self.x, self.y, self.z]
+ }
+
+ impl_vecn_common_methods!($t, $vec3, $mask, $inner, Vector3);
+ };
+}
+
+macro_rules! impl_vec3_common_traits {
+ ($t:ty, $new:ident, $vec2:ident, $vec3:ident, $vec4:ident, $inner:ident) => {
+ /// Creates a 3-dimensional vector.
+ #[inline(always)]
+ pub fn $new(x: $t, y: $t, z: $t) -> $vec3 {
+ $vec3::new(x, y, z)
+ }
+
+ impl Index<usize> for $vec3 {
+ type Output = $t;
+ #[inline(always)]
+ fn index(&self, index: usize) -> &Self::Output {
+ match index {
+ 0 => &self.x,
+ 1 => &self.y,
+ 2 => &self.z,
+ _ => panic!("index out of bounds"),
+ }
+ }
+ }
+
+ impl IndexMut<usize> for $vec3 {
+ #[inline(always)]
+ fn index_mut(&mut self, index: usize) -> &mut Self::Output {
+ match index {
+ 0 => &mut self.x,
+ 1 => &mut self.y,
+ 2 => &mut self.z,
+ _ => panic!("index out of bounds"),
+ }
+ }
+ }
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl fmt::Debug for $vec3 {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_tuple(stringify!($vec3))
+ .field(&self.x)
+ .field(&self.y)
+ .field(&self.z)
+ .finish()
+ }
+ }
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl fmt::Display for $vec3 {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "[{}, {}, {}]", self.x, self.y, self.z)
+ }
+ }
+
+ impl From<($vec2, $t)> for $vec3 {
+ #[inline(always)]
+ fn from((v, z): ($vec2, $t)) -> Self {
+ Self::new(v.x, v.y, z)
+ }
+ }
+
+ impl From<($t, $t, $t)> for $vec3 {
+ #[inline(always)]
+ fn from(t: ($t, $t, $t)) -> Self {
+ Self(Vector3::from_tuple(t))
+ }
+ }
+
+ impl From<$vec3> for ($t, $t, $t) {
+ #[inline(always)]
+ fn from(v: $vec3) -> Self {
+ v.into_tuple()
+ }
+ }
+
+ impl Deref for $vec3 {
+ type Target = XYZ<$t>;
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ self.0.as_ref_xyz()
+ }
+ }
+
+ impl DerefMut for $vec3 {
+ #[inline(always)]
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ self.0.as_mut_xyz()
+ }
+ }
+
+ impl_vecn_common_traits!($t, 3, $vec3, $inner, Vector3);
+ };
+}
+
+macro_rules! impl_vec3_signed_methods {
+ ($t:ty, $vec2:ident, $vec3:ident, $vec4:ident, $mask:ident, $inner:ident) => {
+ impl_vec3_common_methods!($t, $vec2, $vec3, $vec4, $mask, $inner);
+ impl_vecn_signed_methods!($t, $vec3, $mask, $inner, SignedVector3);
+ };
+}
+
+macro_rules! impl_vec3_float_methods {
+ ($t:ty, $vec2:ident, $vec3:ident, $vec4:ident, $mask:ident, $inner:ident) => {
+ impl_vec3_signed_methods!($t, $vec2, $vec3, $vec4, $mask, $inner);
+ impl_vecn_float_methods!($t, $vec3, $mask, $inner, FloatVector3);
+
+ /// Returns the angle (in radians) between two vectors.
+ ///
+ /// The input vectors do not need to be unit length however they must be non-zero.
+ #[inline(always)]
+ pub fn angle_between(self, other: Self) -> $t {
+ self.0.angle_between(other.0)
+ }
+
+ /// Returns some vector that is orthogonal to the given one.
+ ///
+ /// The input vector must be finite and non-zero.
+ ///
+ /// The output vector is not necessarily unit-length.
+ /// For that use [`Self::any_orthonormal_vector`] instead.
+ #[inline]
+ pub fn any_orthogonal_vector(&self) -> Self {
+ // This can probably be optimized
+ if self.x.abs() > self.y.abs() {
+ Self::new(-self.z, 0.0, self.x) // self.cross(Self::Y)
+ } else {
+ Self::new(0.0, self.z, -self.y) // self.cross(Self::X)
+ }
+ }
+
+ /// Returns any unit-length vector that is orthogonal to the given one.
+ /// The input vector must be finite and non-zero.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `self` is not normalized when `glam_assert` is enabled.
+ #[inline]
+ pub fn any_orthonormal_vector(&self) -> Self {
+ glam_assert!(self.is_normalized());
+ // From https://graphics.pixar.com/library/OrthonormalB/paper.pdf
+ #[cfg(feature = "std")]
+ let sign = (1.0 as $t).copysign(self.z);
+ #[cfg(not(feature = "std"))]
+ let sign = self.z.signum();
+ let a = -1.0 / (sign + self.z);
+ let b = self.x * self.y * a;
+ Self::new(b, sign + self.y * self.y * a, -self.y)
+ }
+
+ /// Given a unit-length vector return two other vectors that together form an orthonormal
+ /// basis. That is, all three vectors are orthogonal to each other and are normalized.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `self` is not normalized when `glam_assert` is enabled.
+ #[inline]
+ pub fn any_orthonormal_pair(&self) -> (Self, Self) {
+ glam_assert!(self.is_normalized());
+ // From https://graphics.pixar.com/library/OrthonormalB/paper.pdf
+ #[cfg(feature = "std")]
+ let sign = (1.0 as $t).copysign(self.z);
+ #[cfg(not(feature = "std"))]
+ let sign = self.z.signum();
+ let a = -1.0 / (sign + self.z);
+ let b = self.x * self.y * a;
+ (
+ Self::new(1.0 + sign * self.x * self.x * a, sign * b, -sign * self.x),
+ Self::new(b, sign + self.y * self.y * a, -self.y),
+ )
+ }
+ };
+}
+
+// implements traits that are common between `Vec3`, `Vec3A` and `Vec4` types.
+macro_rules! impl_vec3_float_traits {
+ ($t:ty, $new:ident, $vec2:ident, $vec3:ident, $vec4:ident, $inner:ident) => {
+ impl_vec3_common_traits!($t, $new, $vec2, $vec3, $vec4, $inner);
+ impl_vecn_signed_traits!($t, 3, $vec3, $inner, SignedVector3);
+ };
+}
+
+// implements f32 functionality common between `Vec3` and `Vec3A` types.
+macro_rules! impl_f32_vec3 {
+ ($new:ident, $vec2:ident, $vec3:ident, $vec4:ident, $mask:ident, $inner:ident) => {
+ impl $vec3 {
+ impl_vec3_float_methods!(f32, $vec2, $vec3, $vec4, $mask, $inner);
+ impl_as_dvec3!();
+ impl_as_ivec3!();
+ impl_as_uvec3!();
+ }
+ impl_vec3_float_traits!(f32, $new, $vec2, $vec3, $vec4, $inner);
+ };
+}
+
+type XYZF32 = XYZ<f32>;
+
+/// A 3-dimensional vector without SIMD support.
+#[derive(Clone, Copy)]
+#[repr(transparent)]
+pub struct Vec3(pub(crate) XYZF32);
+impl_f32_vec3!(vec3, Vec2, Vec3, Vec4, BVec3, XYZF32);
+
+#[cfg(all(target_feature = "sse2", not(feature = "scalar-math")))]
+type XYZF32A = __m128;
+#[cfg(all(target_feature = "simd128", not(feature = "scalar-math")))]
+type XYZF32A = v128;
+
+#[cfg(any(
+ not(any(target_feature = "sse2", target_feature = "simd128")),
+ feature = "scalar-math"
+))]
+type XYZF32A = crate::core::storage::XYZF32A16;
+
+/// A 3-dimensional vector with SIMD support.
+///
+/// This type is 16 byte aligned. A SIMD vector type is used for storage on supported platforms for
+/// better performance than the `Vec3` type.
+///
+/// It is possible to convert between `Vec3` and `Vec3A` types using `From` trait implementations.
+#[derive(Clone, Copy)]
+#[repr(transparent)]
+pub struct Vec3A(pub(crate) XYZF32A);
+
+#[cfg(all(
+ any(target_feature = "sse2", target_feature = "simd128"),
+ not(feature = "scalar-math")
+))]
+impl_f32_vec3!(vec3a, Vec2, Vec3A, Vec4, BVec3A, XYZF32A);
+
+#[cfg(any(
+ not(any(target_feature = "sse2", target_feature = "simd128")),
+ feature = "scalar-math"
+))]
+impl_f32_vec3!(vec3a, Vec2, Vec3A, Vec4, BVec3, XYZF32A);
+
+impl From<Vec3> for Vec3A {
+ #[inline(always)]
+ fn from(v: Vec3) -> Self {
+ Self(v.0.into())
+ }
+}
+
+impl From<Vec3A> for Vec3 {
+ #[inline(always)]
+ fn from(v: Vec3A) -> Self {
+ Self(v.0.into())
+ }
+}
+
+type XYZF64 = XYZ<f64>;
+
+/// A 3-dimensional vector.
+#[derive(Clone, Copy)]
+#[repr(transparent)]
+pub struct DVec3(pub(crate) XYZF64);
+
+impl DVec3 {
+ impl_vec3_float_methods!(f64, DVec2, DVec3, DVec4, BVec3, XYZF64);
+ impl_as_vec3!();
+ impl_as_ivec3!();
+ impl_as_uvec3!();
+}
+impl_vec3_float_traits!(f64, dvec3, DVec2, DVec3, DVec4, XYZF64);
+
+type XYZI32 = XYZ<i32>;
+
+/// A 3-dimensional vector.
+#[derive(Clone, Copy)]
+#[repr(transparent)]
+pub struct IVec3(pub(crate) XYZI32);
+
+impl IVec3 {
+ impl_vec3_common_methods!(i32, IVec2, IVec3, IVec4, BVec3, XYZI32);
+ impl_vecn_signed_methods!(i32, IVec3, BVec3, XYZI32, SignedVector3);
+ impl_as_vec3!();
+ impl_as_dvec3!();
+ impl_as_uvec3!();
+}
+impl_vec3_common_traits!(i32, ivec3, IVec2, IVec3, IVec4, XYZI32);
+impl_vecn_signed_traits!(i32, 3, IVec3, XYZI32, SignedVector3);
+impl_vecn_eq_hash_traits!(i32, 3, IVec3);
+
+impl_vecn_scalar_shift_op_traits!(IVec3, i8, XYZI32);
+impl_vecn_scalar_shift_op_traits!(IVec3, i16, XYZI32);
+impl_vecn_scalar_shift_op_traits!(IVec3, i32, XYZI32);
+impl_vecn_scalar_shift_op_traits!(IVec3, u8, XYZI32);
+impl_vecn_scalar_shift_op_traits!(IVec3, u16, XYZI32);
+impl_vecn_scalar_shift_op_traits!(IVec3, u32, XYZI32);
+
+impl_vecn_shift_op_traits!(IVec3, IVec3, XYZI32);
+impl_vecn_shift_op_traits!(IVec3, UVec3, XYZI32);
+
+impl_vecn_scalar_bit_op_traits!(IVec3, i32, XYZI32);
+
+impl_vecn_bit_op_traits!(IVec3, XYZI32);
+
+type XYZU32 = XYZ<u32>;
+
+/// A 3-dimensional vector.
+#[derive(Clone, Copy)]
+#[repr(transparent)]
+pub struct UVec3(pub(crate) XYZU32);
+
+impl UVec3 {
+ impl_vec3_common_methods!(u32, UVec2, UVec3, UVec4, BVec3, XYZU32);
+ impl_as_vec3!();
+ impl_as_dvec3!();
+ impl_as_ivec3!();
+}
+impl_vec3_common_traits!(u32, uvec3, UVec2, UVec3, UVec4, XYZU32);
+impl_vecn_eq_hash_traits!(u32, 3, UVec3);
+
+impl_vecn_scalar_shift_op_traits!(UVec3, i8, XYZU32);
+impl_vecn_scalar_shift_op_traits!(UVec3, i16, XYZU32);
+impl_vecn_scalar_shift_op_traits!(UVec3, i32, XYZU32);
+impl_vecn_scalar_shift_op_traits!(UVec3, u8, XYZU32);
+impl_vecn_scalar_shift_op_traits!(UVec3, u16, XYZU32);
+impl_vecn_scalar_shift_op_traits!(UVec3, u32, XYZU32);
+
+impl_vecn_shift_op_traits!(UVec3, IVec3, XYZU32);
+impl_vecn_shift_op_traits!(UVec3, UVec3, XYZU32);
+
+impl_vecn_scalar_bit_op_traits!(UVec3, u32, XYZU32);
+
+impl_vecn_bit_op_traits!(UVec3, XYZU32);
+
+mod const_test_vec3 {
+ const_assert_eq!(
+ core::mem::align_of::<f32>(),
+ core::mem::align_of::<super::Vec3>()
+ );
+ const_assert_eq!(12, core::mem::size_of::<super::Vec3>());
+}
+
+mod const_test_vec3a {
+ const_assert_eq!(16, core::mem::align_of::<super::Vec3A>());
+ const_assert_eq!(16, core::mem::size_of::<super::Vec3A>());
+}
+
+mod const_test_dvec3 {
+ const_assert_eq!(
+ core::mem::align_of::<f64>(),
+ core::mem::align_of::<super::DVec3>()
+ );
+ const_assert_eq!(24, core::mem::size_of::<super::DVec3>());
+}
+
+mod const_test_ivec3 {
+ const_assert_eq!(
+ core::mem::align_of::<i32>(),
+ core::mem::align_of::<super::IVec3>()
+ );
+ const_assert_eq!(12, core::mem::size_of::<super::IVec3>());
+}
+
+mod const_test_uvec3 {
+ const_assert_eq!(
+ core::mem::align_of::<u32>(),
+ core::mem::align_of::<super::UVec3>()
+ );
+ const_assert_eq!(12, core::mem::size_of::<super::UVec3>());
+}
diff --git a/src/vec4.rs b/src/vec4.rs
new file mode 100644
index 0000000..9f12b89
--- /dev/null
+++ b/src/vec4.rs
@@ -0,0 +1,434 @@
+use crate::core::traits::vector::*;
+
+#[cfg(all(
+ any(target_feature = "sse2", target_feature = "simd128"),
+ not(feature = "scalar-math")
+))]
+use crate::BVec4A;
+use crate::{BVec4, DVec2, DVec3, IVec2, IVec3, UVec2, UVec3, Vec2, Vec3, Vec3A, XYZW};
+use core::f32;
+#[cfg(not(target_arch = "spirv"))]
+use core::fmt;
+use core::iter::{Product, Sum};
+use core::ops::*;
+
+#[cfg(not(feature = "std"))]
+use num_traits::Float;
+
+#[cfg(all(
+ target_arch = "x86",
+ target_feature = "sse2",
+ not(feature = "scalar-math")
+))]
+use core::arch::x86::*;
+#[cfg(all(
+ target_arch = "x86_64",
+ target_feature = "sse2",
+ not(feature = "scalar-math")
+))]
+use core::arch::x86_64::*;
+
+#[cfg(all(target_feature = "simd128", not(feature = "scalar-math")))]
+use core::arch::wasm32::v128;
+
+macro_rules! impl_vec4_common_methods {
+ ($t:ty, $vec2:ident, $vec3:ident, $vec4:ident, $mask:ident, $inner:ident) => {
+ /// All zeroes.
+ pub const ZERO: Self = Self(VectorConst::ZERO);
+
+ /// All ones.
+ pub const ONE: Self = Self(VectorConst::ONE);
+
+ /// `[1, 0, 0, 0]`: a unit-length vector pointing along the positive X axis.
+ pub const X: Self = Self(Vector4Const::X);
+
+ /// `[0, 1, 0, 0]`: a unit-length vector pointing along the positive Y axis.
+ pub const Y: Self = Self(Vector4Const::Y);
+
+ /// `[0, 0, 1, 0]`: a unit-length vector pointing along the positive Z axis.
+ pub const Z: Self = Self(Vector4Const::Z);
+
+ /// `[0, 0, 0, 1]`: a unit-length vector pointing along the positive W axis.
+ pub const W: Self = Self(Vector4Const::W);
+
+ /// The unit axes.
+ pub const AXES: [Self; 4] = [Self::X, Self::Y, Self::Z, Self::W];
+
+ /// Creates a new 4D vector.
+ #[inline(always)]
+ pub fn new(x: $t, y: $t, z: $t, w: $t) -> Self {
+ Self(Vector4::new(x, y, z, w))
+ }
+
+ /// Creates a `Vec3` from the `x`, `y` and `z` elements of `self`, discarding `w`.
+ ///
+ /// Truncation to `Vec3` may also be performed by using `self.xyz()` or `Vec3::from()`.
+ ///
+ /// To truncate to `Vec3A` use `Vec3A::from()`.
+ #[inline(always)]
+ pub fn truncate(self) -> $vec3 {
+ $vec3::new(self.x, self.y, self.z)
+ }
+
+ /// `[x, y, z, w]`
+ #[inline(always)]
+ pub fn to_array(&self) -> [$t; 4] {
+ [self.x, self.y, self.z, self.w]
+ }
+
+ impl_vecn_common_methods!($t, $vec4, $mask, $inner, Vector4);
+ };
+}
+
+macro_rules! impl_vec4_common_traits {
+ ($t:ty, $new:ident, $vec2:ident, $vec3:ident, $vec4:ident, $mask:ident, $inner:ident) => {
+ /// Creates a 4-dimensional vector.
+ #[inline(always)]
+ pub fn $new(x: $t, y: $t, z: $t, w: $t) -> $vec4 {
+ $vec4::new(x, y, z, w)
+ }
+
+ impl Index<usize> for $vec4 {
+ type Output = $t;
+ #[inline(always)]
+ fn index(&self, index: usize) -> &Self::Output {
+ match index {
+ 0 => &self.x,
+ 1 => &self.y,
+ 2 => &self.z,
+ 3 => &self.w,
+ _ => panic!("index out of bounds"),
+ }
+ }
+ }
+
+ impl IndexMut<usize> for $vec4 {
+ #[inline(always)]
+ fn index_mut(&mut self, index: usize) -> &mut Self::Output {
+ match index {
+ 0 => &mut self.x,
+ 1 => &mut self.y,
+ 2 => &mut self.z,
+ 3 => &mut self.w,
+ _ => panic!("index out of bounds"),
+ }
+ }
+ }
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl fmt::Debug for $vec4 {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_tuple(stringify!($vec4))
+ .field(&self.x)
+ .field(&self.y)
+ .field(&self.z)
+ .field(&self.w)
+ .finish()
+ }
+ }
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl fmt::Display for $vec4 {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "[{}, {}, {}, {}]", self.x, self.y, self.z, self.w)
+ }
+ }
+
+ impl From<($t, $t, $t, $t)> for $vec4 {
+ #[inline(always)]
+ fn from(t: ($t, $t, $t, $t)) -> Self {
+ Self(Vector4::from_tuple(t))
+ }
+ }
+
+ impl From<$vec4> for ($t, $t, $t, $t) {
+ #[inline(always)]
+ fn from(v: $vec4) -> Self {
+ Vector4::into_tuple(v.0)
+ }
+ }
+
+ impl From<($vec3, $t)> for $vec4 {
+ #[inline(always)]
+ fn from((v, w): ($vec3, $t)) -> Self {
+ Self::new(v.x, v.y, v.z, w)
+ }
+ }
+
+ impl From<($t, $vec3)> for $vec4 {
+ #[inline(always)]
+ fn from((x, v): ($t, $vec3)) -> Self {
+ Self::new(x, v.x, v.y, v.z)
+ }
+ }
+
+ impl From<($vec2, $t, $t)> for $vec4 {
+ #[inline(always)]
+ fn from((v, z, w): ($vec2, $t, $t)) -> Self {
+ Self::new(v.x, v.y, z, w)
+ }
+ }
+
+ impl From<($vec2, $vec2)> for $vec4 {
+ #[inline(always)]
+ fn from((v, u): ($vec2, $vec2)) -> Self {
+ Self::new(v.x, v.y, u.x, u.y)
+ }
+ }
+
+ impl Deref for $vec4 {
+ type Target = XYZW<$t>;
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ self.0.as_ref_xyzw()
+ }
+ }
+
+ impl DerefMut for $vec4 {
+ #[inline(always)]
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ self.0.as_mut_xyzw()
+ }
+ }
+
+ impl_vecn_common_traits!($t, 4, $vec4, $inner, Vector4);
+ };
+}
+
+macro_rules! impl_vec4_signed_methods {
+ ($t:ty, $vec2:ident, $vec3:ident, $vec4:ident, $mask:ident, $inner:ident) => {
+ impl_vec4_common_methods!($t, $vec2, $vec3, $vec4, $mask, $inner);
+ impl_vecn_signed_methods!($t, $vec4, $mask, $inner, SignedVector4);
+ };
+}
+
+macro_rules! impl_vec4_signed_traits {
+ ($t:ty, $new:ident, $vec2:ident, $vec3:ident, $vec4:ident, $mask:ident, $inner:ident) => {
+ impl_vec4_common_traits!($t, $new, $vec2, $vec3, $vec4, $mask, $inner);
+ impl_vecn_signed_traits!($t, 4, $vec4, $inner, SignedVector4);
+ };
+}
+
+macro_rules! impl_vec4_float_methods {
+ ($t:ty, $vec2:ident, $vec3:ident, $vec4:ident, $mask:ident, $inner:ident) => {
+ impl_vec4_signed_methods!($t, $vec2, $vec3, $vec4, $mask, $inner);
+ impl_vecn_float_methods!($t, $vec4, $mask, $inner, FloatVector4);
+ };
+}
+
+// implement `Vec4` functionality
+macro_rules! impl_f32_vec4 {
+ ($new:ident, $vec2:ident, $vec3:ident, $vec4:ident, $mask:ident, $inner:ident) => {
+ impl $vec4 {
+ impl_vec4_float_methods!(f32, $vec2, $vec3, $vec4, $mask, $inner);
+ impl_as_dvec4!();
+ impl_as_ivec4!();
+ impl_as_uvec4!();
+ }
+ impl_vec4_signed_traits!(f32, $new, $vec2, $vec3, $vec4, $mask, $inner);
+ };
+}
+
+#[cfg(any(
+ not(any(target_feature = "sse2", target_feature = "simd128")),
+ feature = "scalar-math"
+))]
+type XYZWF32 = XYZW<f32>;
+
+#[cfg(all(target_feature = "sse2", not(feature = "scalar-math")))]
+type XYZWF32 = __m128;
+
+#[cfg(all(target_feature = "simd128", not(feature = "scalar-math")))]
+type XYZWF32 = v128;
+
+/// A 4-dimensional vector.
+///
+/// This type uses 16 byte aligned SIMD vector type for storage on supported platforms.
+#[derive(Clone, Copy)]
+#[cfg_attr(
+ any(
+ not(any(
+ feature = "scalar-math",
+ target_arch = "spirv",
+ target_feature = "sse2",
+ target_feature = "simd128"
+ )),
+ feature = "cuda"
+ ),
+ repr(C, align(16))
+)]
+#[cfg_attr(
+ all(
+ any(
+ feature = "scalar-math",
+ target_arch = "spirv",
+ target_feature = "sse2",
+ target_feature = "simd128"
+ ),
+ not(feature = "cuda")
+ ),
+ repr(transparent)
+)]
+pub struct Vec4(pub(crate) XYZWF32);
+
+#[cfg(any(
+ not(any(target_feature = "sse2", target_feature = "simd128")),
+ feature = "scalar-math"
+))]
+impl_f32_vec4!(vec4, Vec2, Vec3, Vec4, BVec4, XYZWF32);
+
+#[cfg(all(
+ any(target_feature = "sse2", target_feature = "simd128"),
+ not(feature = "scalar-math")
+))]
+impl_f32_vec4!(vec4, Vec2, Vec3, Vec4, BVec4A, XYZWF32);
+
+impl From<Vec4> for Vec3A {
+ /// Creates a `Vec3A` from the `x`, `y` and `z` elements of `self` discarding `w`.
+ ///
+ /// On architectures where SIMD is supported such as SSE2 on `x86_64` this conversion is a noop.
+ #[inline(always)]
+ fn from(v: Vec4) -> Self {
+ #[allow(clippy::useless_conversion)]
+ Self(v.0.into())
+ }
+}
+
+impl From<(Vec3A, f32)> for Vec4 {
+ #[inline(always)]
+ fn from((v, w): (Vec3A, f32)) -> Self {
+ v.extend(w)
+ }
+}
+
+impl From<(f32, Vec3A)> for Vec4 {
+ #[inline(always)]
+ fn from((x, v): (f32, Vec3A)) -> Self {
+ Self::new(x, v.x, v.y, v.z)
+ }
+}
+
+type XYZWF64 = XYZW<f64>;
+
+/// A 4-dimensional vector.
+#[derive(Clone, Copy)]
+#[cfg_attr(not(feature = "cuda"), repr(transparent))]
+#[cfg_attr(feature = "cuda", repr(C, align(16)))]
+pub struct DVec4(pub(crate) XYZWF64);
+
+impl DVec4 {
+ impl_vec4_float_methods!(f64, DVec2, DVec3, DVec4, BVec4, XYZWF64);
+ impl_as_vec4!();
+ impl_as_ivec4!();
+ impl_as_uvec4!();
+}
+impl_vec4_signed_traits!(f64, dvec4, DVec2, DVec3, DVec4, BVec4, XYZWF64);
+
+type XYZWI32 = XYZW<i32>;
+
+/// A 4-dimensional vector.
+#[derive(Clone, Copy)]
+#[cfg_attr(not(feature = "cuda"), repr(transparent))]
+#[cfg_attr(feature = "cuda", repr(C, align(16)))]
+pub struct IVec4(pub(crate) XYZWI32);
+
+impl IVec4 {
+ impl_vec4_signed_methods!(i32, IVec2, IVec3, IVec4, BVec4, XYZWI32);
+ impl_as_vec4!();
+ impl_as_dvec4!();
+ impl_as_uvec4!();
+}
+impl_vec4_signed_traits!(i32, ivec4, IVec2, IVec3, IVec4, BVec4, XYZWI32);
+impl_vecn_eq_hash_traits!(i32, 4, IVec4);
+
+impl_vecn_scalar_shift_op_traits!(IVec4, i8, XYZWI32);
+impl_vecn_scalar_shift_op_traits!(IVec4, i16, XYZWI32);
+impl_vecn_scalar_shift_op_traits!(IVec4, i32, XYZWI32);
+impl_vecn_scalar_shift_op_traits!(IVec4, u8, XYZWI32);
+impl_vecn_scalar_shift_op_traits!(IVec4, u16, XYZWI32);
+impl_vecn_scalar_shift_op_traits!(IVec4, u32, XYZWI32);
+
+impl_vecn_shift_op_traits!(IVec4, IVec4, XYZWI32);
+impl_vecn_shift_op_traits!(IVec4, UVec4, XYZWI32);
+
+impl_vecn_scalar_bit_op_traits!(IVec4, i32, XYZWI32);
+
+impl_vecn_bit_op_traits!(IVec4, XYZWI32);
+
+type XYZWU32 = XYZW<u32>;
+
+/// A 4-dimensional vector.
+#[derive(Clone, Copy)]
+#[cfg_attr(not(feature = "cuda"), repr(transparent))]
+#[cfg_attr(feature = "cuda", repr(C, align(16)))]
+pub struct UVec4(pub(crate) XYZWU32);
+
+impl UVec4 {
+ impl_vec4_common_methods!(u32, UVec2, UVec3, UVec4, BVec4, XYZWU32);
+ impl_as_vec4!();
+ impl_as_dvec4!();
+ impl_as_ivec4!();
+}
+impl_vec4_common_traits!(u32, uvec4, UVec2, UVec3, UVec4, BVec4, XYZWU32);
+impl_vecn_eq_hash_traits!(u32, 4, UVec4);
+
+impl_vecn_scalar_shift_op_traits!(UVec4, i8, XYZWU32);
+impl_vecn_scalar_shift_op_traits!(UVec4, i16, XYZWU32);
+impl_vecn_scalar_shift_op_traits!(UVec4, i32, XYZWU32);
+impl_vecn_scalar_shift_op_traits!(UVec4, u8, XYZWU32);
+impl_vecn_scalar_shift_op_traits!(UVec4, u16, XYZWU32);
+impl_vecn_scalar_shift_op_traits!(UVec4, u32, XYZWU32);
+
+impl_vecn_shift_op_traits!(UVec4, IVec4, XYZWU32);
+impl_vecn_shift_op_traits!(UVec4, UVec4, XYZWU32);
+
+impl_vecn_scalar_bit_op_traits!(UVec4, u32, XYZWU32);
+
+impl_vecn_bit_op_traits!(UVec4, XYZWU32);
+
+mod const_test_vec4 {
+ #[cfg(all(
+ any(feature = "scalar-math", target_arch = "spirv"),
+ not(feature = "cuda")
+ ))]
+ const_assert_eq!(
+ core::mem::align_of::<f32>(),
+ core::mem::align_of::<super::Vec4>()
+ );
+ #[cfg(not(any(feature = "scalar-math", target_arch = "spirv")))]
+ const_assert_eq!(16, core::mem::align_of::<super::Vec4>());
+ const_assert_eq!(16, core::mem::size_of::<super::Vec4>());
+}
+
+mod const_test_dvec4 {
+ #[cfg(not(feature = "cuda"))]
+ const_assert_eq!(
+ core::mem::align_of::<f64>(),
+ core::mem::align_of::<super::DVec4>()
+ );
+ #[cfg(feature = "cuda")]
+ const_assert_eq!(16, core::mem::align_of::<super::DVec4>());
+ const_assert_eq!(32, core::mem::size_of::<super::DVec4>());
+}
+
+mod const_test_ivec4 {
+ #[cfg(not(feature = "cuda"))]
+ const_assert_eq!(
+ core::mem::align_of::<i32>(),
+ core::mem::align_of::<super::IVec4>()
+ );
+ #[cfg(feature = "cuda")]
+ const_assert_eq!(16, core::mem::align_of::<super::IVec4>());
+ const_assert_eq!(16, core::mem::size_of::<super::IVec4>());
+}
+
+mod const_test_uvec4 {
+ #[cfg(not(feature = "cuda"))]
+ const_assert_eq!(
+ core::mem::align_of::<u32>(),
+ core::mem::align_of::<super::UVec4>()
+ );
+ #[cfg(feature = "cuda")]
+ const_assert_eq!(16, core::mem::align_of::<super::UVec4>());
+ const_assert_eq!(16, core::mem::size_of::<super::UVec4>());
+}
diff --git a/src/vec_mask.rs b/src/vec_mask.rs
new file mode 100644
index 0000000..882ead3
--- /dev/null
+++ b/src/vec_mask.rs
@@ -0,0 +1,460 @@
+use crate::core::traits::vector::{
+ MaskVector, MaskVector2, MaskVector3, MaskVector4, MaskVectorConst,
+};
+#[cfg(not(target_arch = "spirv"))]
+use core::fmt;
+use core::{hash, ops::*};
+
+#[cfg(all(
+ target_arch = "x86",
+ target_feature = "sse2",
+ not(feature = "scalar-math")
+))]
+use core::arch::x86::*;
+#[cfg(all(
+ target_arch = "x86_64",
+ target_feature = "sse2",
+ not(feature = "scalar-math")
+))]
+use core::arch::x86_64::*;
+
+#[cfg(all(target_feature = "simd128", not(feature = "scalar-math")))]
+use core::arch::wasm32::v128;
+
+macro_rules! impl_vecnmask_methods {
+ ($vecnmask:ident, $trait:ident) => {
+ /// Returns a bitmask with the lowest two bits set from the elements of `self`.
+ ///
+ /// A true element results in a `1` bit and a false element in a `0` bit. Element `x` goes
+ /// into the first lowest bit, element `y` into the second, etc.
+ #[inline]
+ pub fn bitmask(self) -> u32 {
+ $trait::bitmask(self.0)
+ }
+
+ /// Returns true if any of the elements are true, false otherwise.
+ #[inline]
+ pub fn any(self) -> bool {
+ $trait::any(self.0)
+ }
+
+ /// Returns true if all the elements are true, false otherwise.
+ #[inline]
+ pub fn all(self) -> bool {
+ $trait::all(self.0)
+ }
+ };
+}
+
+macro_rules! impl_vecnmask_traits {
+ ($vecnmask:ident, $inner:ident) => {
+ impl Default for $vecnmask {
+ #[inline]
+ fn default() -> Self {
+ Self($inner::FALSE)
+ }
+ }
+
+ impl PartialEq for $vecnmask {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ self.bitmask().eq(&other.bitmask())
+ }
+ }
+
+ impl Eq for $vecnmask {}
+
+ impl hash::Hash for $vecnmask {
+ #[inline]
+ fn hash<H: hash::Hasher>(&self, state: &mut H) {
+ self.bitmask().hash(state);
+ }
+ }
+
+ impl BitAnd for $vecnmask {
+ type Output = Self;
+ #[inline]
+ fn bitand(self, other: Self) -> Self {
+ Self(MaskVector::bitand(self.0, other.0))
+ }
+ }
+
+ impl BitAndAssign for $vecnmask {
+ #[inline]
+ fn bitand_assign(&mut self, other: Self) {
+ self.0 = MaskVector::bitand(self.0, other.0);
+ }
+ }
+
+ impl BitOr for $vecnmask {
+ type Output = Self;
+ #[inline]
+ fn bitor(self, other: Self) -> Self {
+ Self(MaskVector::bitor(self.0, other.0))
+ }
+ }
+
+ impl BitOrAssign for $vecnmask {
+ #[inline]
+ fn bitor_assign(&mut self, other: Self) {
+ self.0 = MaskVector::bitor(self.0, other.0);
+ }
+ }
+
+ impl Not for $vecnmask {
+ type Output = Self;
+ #[inline]
+ fn not(self) -> Self {
+ Self(MaskVector::not(self.0))
+ }
+ }
+
+ impl From<$vecnmask> for $inner {
+ #[inline]
+ fn from(t: $vecnmask) -> Self {
+ t.0
+ }
+ }
+ };
+}
+
+macro_rules! impl_vec2mask {
+ ($vec2mask:ident, $t:ty, $inner:ident) => {
+ impl $vec2mask {
+ /// Creates a new vector mask.
+ #[inline]
+ pub fn new(x: bool, y: bool) -> Self {
+ Self(MaskVector2::new(x, y))
+ }
+
+ impl_vecnmask_methods!($vec2mask, MaskVector2);
+ }
+
+ impl_vecnmask_traits!($vec2mask, $inner);
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl fmt::Debug for $vec2mask {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let arr = self.0.into_u32_array();
+ write!(f, "{}({:#x}, {:#x})", stringify!($vec2mask), arr[0], arr[1])
+ }
+ }
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl fmt::Display for $vec2mask {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let arr = self.0.into_bool_array();
+ write!(f, "[{}, {}]", arr[0], arr[1])
+ }
+ }
+
+ impl From<$vec2mask> for [bool; 2] {
+ #[inline]
+ fn from(mask: $vec2mask) -> Self {
+ mask.0.into_bool_array()
+ }
+ }
+
+ impl From<$vec2mask> for [u32; 2] {
+ #[inline]
+ fn from(mask: $vec2mask) -> Self {
+ mask.0.into_u32_array()
+ }
+ }
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl AsRef<[$t; 2]> for $vec2mask {
+ #[inline]
+ fn as_ref(&self) -> &[$t; 2] {
+ unsafe { &*(self as *const Self as *const [$t; 2]) }
+ }
+ }
+ };
+}
+
+macro_rules! impl_vec3mask {
+ ($vec3mask:ident, $t:ty, $inner:ident) => {
+ impl $vec3mask {
+ /// Creates a new vector mask.
+ #[inline]
+ pub fn new(x: bool, y: bool, z: bool) -> Self {
+ Self(MaskVector3::new(x, y, z))
+ }
+
+ impl_vecnmask_methods!($vec3mask, MaskVector3);
+ }
+
+ impl_vecnmask_traits!($vec3mask, $inner);
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl fmt::Debug for $vec3mask {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let arr = MaskVector3::into_u32_array(self.0);
+ write!(
+ f,
+ "{}({:#x}, {:#x}, {:#x})",
+ stringify!($vec3mask),
+ arr[0],
+ arr[1],
+ arr[2]
+ )
+ }
+ }
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl fmt::Display for $vec3mask {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let arr = MaskVector3::into_bool_array(self.0);
+ write!(f, "[{}, {}, {}]", arr[0], arr[1], arr[2])
+ }
+ }
+
+ impl From<$vec3mask> for [bool; 3] {
+ #[inline]
+ fn from(mask: $vec3mask) -> Self {
+ MaskVector3::into_bool_array(mask.0)
+ }
+ }
+
+ impl From<$vec3mask> for [u32; 3] {
+ #[inline]
+ fn from(mask: $vec3mask) -> Self {
+ MaskVector3::into_u32_array(mask.0)
+ }
+ }
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl AsRef<[$t; 3]> for $vec3mask {
+ #[inline]
+ fn as_ref(&self) -> &[$t; 3] {
+ unsafe { &*(self as *const Self as *const [$t; 3]) }
+ }
+ }
+ };
+}
+
+macro_rules! impl_vec4mask {
+ ($vec4mask:ident, $t:ty, $inner:ident) => {
+ impl $vec4mask {
+ /// Creates a new vector mask.
+ #[inline]
+ pub fn new(x: bool, y: bool, z: bool, w: bool) -> Self {
+ Self(MaskVector4::new(x, y, z, w))
+ }
+
+ impl_vecnmask_methods!($vec4mask, MaskVector4);
+ }
+
+ impl_vecnmask_traits!($vec4mask, $inner);
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl fmt::Debug for $vec4mask {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let arr = MaskVector4::into_u32_array(self.0);
+ write!(
+ f,
+ "{}({:#x}, {:#x}, {:#x}, {:#x})",
+ stringify!($vec4mask),
+ arr[0],
+ arr[1],
+ arr[2],
+ arr[3]
+ )
+ }
+ }
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl fmt::Display for $vec4mask {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let arr = MaskVector4::into_bool_array(self.0);
+ write!(f, "[{}, {}, {}, {}]", arr[0], arr[1], arr[2], arr[3])
+ }
+ }
+
+ impl From<$vec4mask> for [bool; 4] {
+ #[inline]
+ fn from(mask: $vec4mask) -> Self {
+ MaskVector4::into_bool_array(mask.0)
+ }
+ }
+
+ impl From<$vec4mask> for [u32; 4] {
+ #[inline]
+ fn from(mask: $vec4mask) -> Self {
+ MaskVector4::into_u32_array(mask.0)
+ }
+ }
+
+ #[cfg(not(target_arch = "spirv"))]
+ impl AsRef<[$t; 4]> for $vec4mask {
+ #[inline]
+ fn as_ref(&self) -> &[$t; 4] {
+ unsafe { &*(self as *const Self as *const [$t; 4]) }
+ }
+ }
+ };
+}
+
+// BVec3A /////////////////////////////////////////////////////////////////////////////////////////
+
+#[cfg(all(target_feature = "sse2", not(feature = "scalar-math")))]
+type Mask128 = __m128;
+#[cfg(all(target_feature = "simd128", not(feature = "scalar-math")))]
+type Mask128 = v128;
+
+/// A 3-dimensional SIMD vector mask.
+///
+/// This type is 16 byte aligned and is backed by a SIMD vector. If SIMD is not available `BVec3A`
+/// will be a type alias for `BVec3`.
+#[cfg(all(
+ any(target_feature = "sse2", target_feature = "simd128"),
+ not(feature = "scalar-math")
+))]
+#[derive(Clone, Copy)]
+#[repr(transparent)]
+pub struct BVec3A(pub(crate) Mask128);
+
+#[cfg(all(
+ any(target_feature = "sse2", target_feature = "simd128"),
+ not(feature = "scalar-math")
+))]
+impl_vec3mask!(BVec3A, u32, Mask128);
+
+#[cfg(any(
+ not(any(target_feature = "sse2", target_feature = "simd128")),
+ feature = "scalar-math"
+))]
+pub type BVec3A = BVec3;
+
+#[cfg(all(
+ any(target_feature = "sse2", target_feature = "simd128"),
+ not(feature = "scalar-math")
+))]
+impl From<BVec3> for BVec3A {
+ #[inline]
+ fn from(b: BVec3) -> Self {
+ Self::new(b.0.x, b.0.y, b.0.z)
+ }
+}
+
+#[cfg(all(
+ any(target_feature = "sse2", target_feature = "simd128"),
+ not(feature = "scalar-math")
+))]
+impl From<BVec3A> for BVec3 {
+ #[inline]
+ fn from(b: BVec3A) -> Self {
+ let b: [bool; 3] = b.into();
+ Self::new(b[0], b[1], b[2])
+ }
+}
+
+// BVec4A ////////////////////////////////////////////////////////////////////////////////////////
+
+/// A 4-dimensional SIMD vector mask.
+///
+/// This type is 16 byte aligned and is backed by a SIMD vector. If SIMD is not available `BVec4A`
+/// will be a type alias for `BVec4`.
+#[cfg(all(
+ any(target_feature = "sse2", target_feature = "simd128"),
+ not(feature = "scalar-math")
+))]
+#[derive(Clone, Copy)]
+#[repr(transparent)]
+pub struct BVec4A(pub(crate) Mask128);
+
+#[cfg(all(
+ any(target_feature = "sse2", target_feature = "simd128"),
+ not(feature = "scalar-math")
+))]
+impl_vec4mask!(BVec4A, u32, Mask128);
+
+#[cfg(any(
+ not(any(target_feature = "sse2", target_feature = "simd128")),
+ feature = "scalar-math"
+))]
+pub type BVec4A = BVec4;
+
+#[cfg(all(
+ any(target_feature = "sse2", target_feature = "simd128"),
+ not(feature = "scalar-math")
+))]
+impl From<BVec4> for BVec4A {
+ #[inline]
+ fn from(b: BVec4) -> Self {
+ Self::new(b.0.x, b.0.y, b.0.z, b.0.w)
+ }
+}
+
+#[cfg(all(
+ any(target_feature = "sse2", target_feature = "simd128"),
+ not(feature = "scalar-math")
+))]
+impl From<BVec4A> for BVec4 {
+ #[inline]
+ fn from(b: BVec4A) -> Self {
+ let b: [bool; 4] = b.into();
+ Self::new(b[0], b[1], b[2], b[3])
+ }
+}
+
+// boolean vectors ////////////////////////////////////////////////////////////////////////////////
+type XYBool = crate::XY<bool>;
+
+/// A 2-dimensional boolean vector.
+#[derive(Copy, Clone)]
+#[repr(transparent)]
+pub struct BVec2(pub(crate) XYBool);
+impl_vec2mask!(BVec2, bool, XYBool);
+
+type XYZBool = crate::XYZ<bool>;
+
+/// A 3-dimensional boolean vector.
+#[derive(Copy, Clone)]
+#[repr(transparent)]
+pub struct BVec3(pub(crate) XYZBool);
+impl_vec3mask!(BVec3, bool, XYZBool);
+
+type XYZWBool = crate::XYZW<bool>;
+
+/// A 4-dimensional boolean vector.
+#[derive(Copy, Clone)]
+#[repr(transparent)]
+pub struct BVec4(pub(crate) XYZWBool);
+impl_vec4mask!(BVec4, bool, XYZWBool);
+
+mod const_test_bvec2 {
+ const_assert_eq!(
+ core::mem::align_of::<bool>(),
+ core::mem::align_of::<super::BVec2>()
+ );
+ const_assert_eq!(2, core::mem::size_of::<super::BVec2>());
+}
+
+mod const_test_bvec3 {
+ const_assert_eq!(
+ core::mem::align_of::<bool>(),
+ core::mem::align_of::<super::BVec3>()
+ );
+ const_assert_eq!(3, core::mem::size_of::<super::BVec3>());
+}
+
+#[cfg(all(target_feature = "sse2", not(feature = "scalar-math")))]
+mod const_test_bvec3a {
+ const_assert_eq!(16, core::mem::align_of::<super::BVec3A>());
+ const_assert_eq!(16, core::mem::size_of::<super::BVec3A>());
+}
+
+mod const_test_bvec4 {
+ const_assert_eq!(
+ core::mem::align_of::<bool>(),
+ core::mem::align_of::<super::BVec4>()
+ );
+ const_assert_eq!(4, core::mem::size_of::<super::BVec4>());
+}
+
+#[cfg(all(target_feature = "sse2", not(feature = "scalar-math")))]
+mod const_test_bvec4a {
+ const_assert_eq!(16, core::mem::align_of::<super::BVec4A>());
+ const_assert_eq!(16, core::mem::size_of::<super::BVec4A>());
+}
diff --git a/tests/affine2.rs b/tests/affine2.rs
new file mode 100644
index 0000000..35424a1
--- /dev/null
+++ b/tests/affine2.rs
@@ -0,0 +1,217 @@
+#[macro_use]
+mod support;
+
+macro_rules! impl_affine2_tests {
+ ($t:ident, $affine2:ident, $vec2:ident) => {
+ const MATRIX2D: [[$t; 2]; 3] = [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]];
+
+ use core::$t::NAN;
+ use core::$t::NEG_INFINITY;
+
+ glam_test!(test_affine2_identity, {
+ assert_eq!($affine2::IDENTITY, $affine2::IDENTITY * $affine2::IDENTITY);
+ assert_eq!($affine2::IDENTITY, $affine2::default());
+ });
+
+ glam_test!(test_affine2_zero, {
+ assert_eq!(
+ $affine2::ZERO.transform_point2($vec2::new(1., 2.)),
+ $vec2::ZERO
+ );
+ });
+
+ glam_test!(test_affine2_nan, {
+ assert!($affine2::NAN.is_nan());
+ assert!(!$affine2::NAN.is_finite());
+ });
+
+ glam_test!(test_affine2_translation, {
+ let translate = $affine2::from_translation($vec2::new(1.0, 2.0));
+ assert_eq!(translate.translation, $vec2::new(1.0, 2.0).into());
+ assert_eq!(
+ translate.transform_point2($vec2::new(2.0, 3.0)),
+ $vec2::new(3.0, 5.0),
+ );
+ });
+
+ glam_test!(test_affine2_mul, {
+ let m = $affine2::from_angle(deg(90.0));
+ let result3 = m.transform_vector2($vec2::Y);
+ assert_approx_eq!($vec2::new(-1.0, 0.0), result3);
+
+ let m = $affine2::from_scale_angle_translation(
+ $vec2::new(0.5, 1.5),
+ deg(90.0),
+ $vec2::new(1.0, 2.0),
+ );
+ let result3 = m.transform_vector2($vec2::Y);
+ assert_approx_eq!($vec2::new(-1.5, 0.0), result3, 1.0e-6);
+
+ let result3 = m.transform_point2($vec2::Y);
+ assert_approx_eq!($vec2::new(-0.5, 2.0), result3, 1.0e-6);
+ });
+
+ glam_test!(test_from_scale, {
+ let m = $affine2::from_scale($vec2::new(2.0, 4.0));
+ assert_approx_eq!(
+ m.transform_point2($vec2::new(1.0, 1.0)),
+ $vec2::new(2.0, 4.0)
+ );
+ });
+
+ glam_test!(test_affine2_inverse, {
+ let inv = $affine2::IDENTITY.inverse();
+ assert_approx_eq!($affine2::IDENTITY, inv);
+
+ let rot = $affine2::from_angle(deg(90.0));
+ let rot_inv = rot.inverse();
+ assert_approx_eq!($affine2::IDENTITY, rot * rot_inv);
+ assert_approx_eq!($affine2::IDENTITY, rot_inv * rot);
+
+ let trans = $affine2::from_translation($vec2::new(1.0, 2.0));
+ let trans_inv = trans.inverse();
+ assert_approx_eq!($affine2::IDENTITY, trans * trans_inv);
+ assert_approx_eq!($affine2::IDENTITY, trans_inv * trans);
+
+ let scale = $affine2::from_scale($vec2::new(4.0, 5.0));
+ let scale_inv = scale.inverse();
+ assert_approx_eq!($affine2::IDENTITY, scale * scale_inv);
+ assert_approx_eq!($affine2::IDENTITY, scale_inv * scale);
+
+ let m = scale * rot * trans;
+ let m_inv = m.inverse();
+ assert_approx_eq!($affine2::IDENTITY, m * m_inv, 1.0e-5);
+ assert_approx_eq!($affine2::IDENTITY, m_inv * m, 1.0e-5);
+ assert_approx_eq!(m_inv, trans_inv * rot_inv * scale_inv, 1.0e-6);
+
+ // Make sure we can invert a shear matrix:
+ let m = $affine2::from_angle(0.5)
+ * $affine2::from_scale($vec2::new(1.0, 0.5))
+ * $affine2::from_angle(-0.5);
+ let m_inv = m.inverse();
+ assert_approx_eq!($affine2::IDENTITY, m * m_inv, 1.0e-5);
+ assert_approx_eq!($affine2::IDENTITY, m_inv * m, 1.0e-5);
+
+ should_glam_assert!({ $affine2::ZERO.inverse() });
+ });
+
+ glam_test!(test_affine2_ops, {
+ let m0 = $affine2::from_cols_array_2d(&MATRIX2D);
+ let m0x2 = $affine2::from_cols_array_2d(&[[2.0, 4.0], [6.0, 8.0], [10.0, 12.0]]);
+ assert_eq!(m0x2, m0 * 2.0);
+ assert_eq!(m0x2, 2.0 * m0);
+ assert_eq!(m0x2, m0 + m0);
+ assert_eq!($affine2::ZERO, m0 - m0);
+ assert_approx_eq!(m0, m0 * $affine2::IDENTITY);
+ assert_approx_eq!(m0, $affine2::IDENTITY * m0);
+ });
+
+ glam_test!(test_affine2_fmt, {
+ let a = $affine2::from_cols_array_2d(&MATRIX2D);
+ assert_eq!(format!("{}", a), "[[1, 2], [3, 4], [5, 6]]");
+ });
+
+ glam_test!(test_affine2_to_from_slice, {
+ const MATRIX1D: [$t; 6] = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0];
+ let m = $affine2::from_cols_slice(&MATRIX1D);
+ assert_eq!($affine2::from_cols_array(&MATRIX1D), m);
+ assert_eq!(MATRIX1D, m.to_cols_array());
+ assert_eq!(MATRIX2D, m.to_cols_array_2d());
+ let mut out: [$t; 6] = Default::default();
+ m.write_cols_to_slice(&mut out);
+ assert_eq!(MATRIX1D, out);
+ assert_eq!(
+ m,
+ $affine2::from_cols(MATRIX2D[0].into(), MATRIX2D[1].into(), MATRIX2D[2].into())
+ );
+
+ should_panic!({ $affine2::from_cols_slice(&[0.0; 5]) });
+ should_panic!({ $affine2::IDENTITY.write_cols_to_slice(&mut [0.0; 5]) });
+ });
+
+ glam_test!(test_product, {
+ let ident = $affine2::IDENTITY;
+ assert_eq!(
+ vec![ident, ident].iter().product::<$affine2>(),
+ ident * ident
+ );
+ });
+
+ glam_test!(test_affine2_is_finite, {
+ assert!($affine2::from_scale($vec2::new(1.0, 1.0)).is_finite());
+ assert!($affine2::from_scale($vec2::new(0.0, 1.0)).is_finite());
+ assert!(!$affine2::from_scale($vec2::new(1.0, NAN)).is_finite());
+ assert!(!$affine2::from_scale($vec2::new(1.0, NEG_INFINITY)).is_finite());
+ });
+ };
+}
+
+mod affine2 {
+ use super::support::{deg, FloatCompare};
+ use glam::{Affine2, Vec2};
+
+ impl FloatCompare for Affine2 {
+ #[inline]
+ fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
+ self.abs_diff_eq(*other, max_abs_diff)
+ }
+ #[inline]
+ fn abs_diff(&self, other: &Self) -> Self {
+ Self {
+ matrix2: self.matrix2.abs_diff(&other.matrix2),
+ translation: self.translation.abs_diff(&other.translation),
+ }
+ }
+ }
+
+ glam_test!(test_align, {
+ use std::mem;
+ if cfg!(not(feature = "scalar-math")) {
+ assert_eq!(32, mem::size_of::<Affine2>());
+ assert_eq!(16, mem::align_of::<Affine2>());
+ } else if cfg!(feature = "cuda") {
+ assert_eq!(24, mem::size_of::<Affine2>());
+ assert_eq!(8, mem::align_of::<Affine2>());
+ } else {
+ assert_eq!(24, mem::size_of::<Affine2>());
+ assert_eq!(4, mem::align_of::<Affine2>());
+ }
+ });
+
+ impl_affine2_tests!(f32, Affine2, Vec2);
+}
+
+mod daffine2 {
+ use super::support::{deg, FloatCompare};
+ use glam::{DAffine2, DVec2};
+
+ impl FloatCompare for DAffine2 {
+ #[inline]
+ fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
+ self.abs_diff_eq(*other, max_abs_diff as f64)
+ }
+ #[inline]
+ fn abs_diff(&self, other: &Self) -> Self {
+ Self {
+ matrix2: self.matrix2.abs_diff(&other.matrix2),
+ translation: self.translation.abs_diff(&other.translation),
+ }
+ }
+ }
+
+ #[cfg(not(feature = "cuda"))]
+ glam_test!(test_align, {
+ use std::mem;
+ assert_eq!(48, mem::size_of::<DAffine2>());
+ assert_eq!(mem::align_of::<f64>(), mem::align_of::<DAffine2>());
+ });
+
+ #[cfg(feature = "cuda")]
+ glam_test!(test_align, {
+ use std::mem;
+ assert_eq!(48, mem::size_of::<DAffine2>());
+ assert_eq!(16, mem::align_of::<DAffine2>());
+ });
+
+ impl_affine2_tests!(f64, DAffine2, DVec2);
+}
diff --git a/tests/affine3.rs b/tests/affine3.rs
new file mode 100644
index 0000000..06fd7ca
--- /dev/null
+++ b/tests/affine3.rs
@@ -0,0 +1,344 @@
+#[macro_use]
+mod support;
+
+macro_rules! impl_affine3_tests {
+ ($t:ident, $affine3:ident, $quat:ident, $vec3:ident) => {
+ const MATRIX2D: [[$t; 3]; 4] = [
+ [1.0, 2.0, 3.0],
+ [4.0, 5.0, 6.0],
+ [7.0, 8.0, 9.0],
+ [10.0, 11.0, 12.0],
+ ];
+
+ use core::$t::NAN;
+ use core::$t::NEG_INFINITY;
+
+ glam_test!(test_affine3_identity, {
+ assert_eq!($affine3::IDENTITY, $affine3::IDENTITY * $affine3::IDENTITY);
+ assert_eq!($affine3::IDENTITY, $affine3::default());
+ });
+
+ glam_test!(test_affine3_zero, {
+ assert_eq!(
+ $affine3::ZERO.transform_point3($vec3::new(1., 2., 3.)),
+ $vec3::ZERO
+ );
+ });
+
+ glam_test!(test_affine3_nan, {
+ assert!($affine3::NAN.is_nan());
+ assert!(!$affine3::NAN.is_finite());
+ });
+
+ glam_test!(test_affine3_translation, {
+ let translate = $affine3::from_translation($vec3::new(1.0, 2.0, 3.0));
+ assert_eq!(translate.translation, $vec3::new(1.0, 2.0, 3.0).into());
+ assert_eq!(
+ translate.transform_point3($vec3::new(2.0, 3.0, 4.0)),
+ $vec3::new(3.0, 5.0, 7.0),
+ );
+ });
+
+ glam_test!(test_from_rotation, {
+ let eps = 2.0 * core::f32::EPSILON;
+ let rot_x1 = $affine3::from_rotation_x(deg(180.0));
+ let rot_x2 = $affine3::from_axis_angle($vec3::X, deg(180.0));
+ assert_approx_eq!(rot_x1, rot_x2, eps);
+ let rot_y1 = $affine3::from_rotation_y(deg(180.0));
+ let rot_y2 = $affine3::from_axis_angle($vec3::Y, deg(180.0));
+ assert_approx_eq!(rot_y1, rot_y2, eps);
+ let rot_z1 = $affine3::from_rotation_z(deg(180.0));
+ let rot_z2 = $affine3::from_axis_angle($vec3::Z, deg(180.0));
+ assert_approx_eq!(rot_z1, rot_z2, eps);
+
+ should_glam_assert!({ $affine3::from_axis_angle($vec3::ZERO, 0.0) });
+ should_glam_assert!({ $affine3::from_quat($quat::IDENTITY * 2.0) });
+ });
+
+ glam_test!(test_affine3_mul, {
+ let m = $affine3::from_axis_angle($vec3::Z, deg(90.0));
+ let result3 = m.transform_vector3($vec3::Y);
+ assert_approx_eq!($vec3::new(-1.0, 0.0, 0.0), result3);
+
+ let m = $affine3::from_scale_rotation_translation(
+ $vec3::new(0.5, 1.5, 2.0),
+ $quat::from_rotation_x(deg(90.0)),
+ $vec3::new(1.0, 2.0, 3.0),
+ );
+ let result3 = m.transform_vector3($vec3::Y);
+ assert_approx_eq!($vec3::new(0.0, 0.0, 1.5), result3, 1.0e-6);
+
+ let result3 = m.transform_point3($vec3::Y);
+ assert_approx_eq!($vec3::new(1.0, 2.0, 4.5), result3, 1.0e-6);
+ });
+
+ glam_test!(test_from_scale, {
+ let m = $affine3::from_scale($vec3::new(2.0, 4.0, 8.0));
+ assert_approx_eq!(
+ m.transform_point3($vec3::new(1.0, 1.0, 1.0)),
+ $vec3::new(2.0, 4.0, 8.0)
+ );
+ });
+
+ glam_test!(test_affine3_inverse, {
+ let inv = $affine3::IDENTITY.inverse();
+ assert_approx_eq!($affine3::IDENTITY, inv);
+
+ let rotz = $affine3::from_rotation_z(deg(90.0));
+ let rotz_inv = rotz.inverse();
+ assert_approx_eq!($affine3::IDENTITY, rotz * rotz_inv);
+ assert_approx_eq!($affine3::IDENTITY, rotz_inv * rotz);
+
+ let trans = $affine3::from_translation($vec3::new(1.0, 2.0, 3.0));
+ let trans_inv = trans.inverse();
+ assert_approx_eq!($affine3::IDENTITY, trans * trans_inv);
+ assert_approx_eq!($affine3::IDENTITY, trans_inv * trans);
+
+ let scale = $affine3::from_scale($vec3::new(4.0, 5.0, 6.0));
+ let scale_inv = scale.inverse();
+ assert_approx_eq!($affine3::IDENTITY, scale * scale_inv);
+ assert_approx_eq!($affine3::IDENTITY, scale_inv * scale);
+
+ let m = scale * rotz * trans;
+ let m_inv = m.inverse();
+ assert_approx_eq!($affine3::IDENTITY, m * m_inv, 1.0e-5);
+ assert_approx_eq!($affine3::IDENTITY, m_inv * m, 1.0e-5);
+ assert_approx_eq!(m_inv, trans_inv * rotz_inv * scale_inv, 1.0e-6);
+
+ // Make sure we can invert a shear matrix:
+ let m = $affine3::from_axis_angle($vec3::X, 0.5)
+ * $affine3::from_scale($vec3::new(1.0, 0.5, 2.0))
+ * $affine3::from_axis_angle($vec3::X, -0.5);
+ let m_inv = m.inverse();
+ assert_approx_eq!($affine3::IDENTITY, m * m_inv, 1.0e-5);
+ assert_approx_eq!($affine3::IDENTITY, m_inv * m, 1.0e-5);
+
+ should_glam_assert!({ $affine3::ZERO.inverse() });
+ });
+
+ glam_test!(test_affine3_decompose, {
+ // identity
+ let (out_scale, out_rotation, out_translation) =
+ $affine3::IDENTITY.to_scale_rotation_translation();
+ assert_approx_eq!($vec3::ONE, out_scale);
+ assert!(out_rotation.is_near_identity());
+ assert_approx_eq!($vec3::ZERO, out_translation);
+
+ // no scale
+ let in_scale = $vec3::ONE;
+ let in_translation = $vec3::new(-2.0, 4.0, -0.125);
+ let in_rotation = $quat::from_euler(
+ glam::EulerRot::YXZ,
+ $t::to_radians(-45.0),
+ $t::to_radians(180.0),
+ $t::to_radians(270.0),
+ );
+ let in_mat =
+ $affine3::from_scale_rotation_translation(in_scale, in_rotation, in_translation);
+ let (out_scale, out_rotation, out_translation) = in_mat.to_scale_rotation_translation();
+ assert_approx_eq!(in_scale, out_scale, 1e-6);
+ // out_rotation is different but produces the same matrix
+ // assert_approx_eq!(in_rotation, out_rotation);
+ assert_approx_eq!(in_translation, out_translation);
+ assert_approx_eq!(
+ in_mat,
+ $affine3::from_scale_rotation_translation(out_scale, out_rotation, out_translation),
+ 1e-6
+ );
+
+ // positive scale
+ let in_scale = $vec3::new(1.0, 2.0, 4.0);
+ let in_mat =
+ $affine3::from_scale_rotation_translation(in_scale, in_rotation, in_translation);
+ let (out_scale, out_rotation, out_translation) = in_mat.to_scale_rotation_translation();
+ assert_approx_eq!(in_scale, out_scale, 1e-6);
+ // out_rotation is different but produces the same matrix
+ // assert_approx_eq!(in_rotation, out_rotation);
+ assert_approx_eq!(in_translation, out_translation);
+ assert_approx_eq!(
+ in_mat,
+ $affine3::from_scale_rotation_translation(out_scale, out_rotation, out_translation),
+ 1e-6
+ );
+
+ // negative scale
+ let in_scale = $vec3::new(-4.0, 1.0, 2.0);
+ let in_mat =
+ $affine3::from_scale_rotation_translation(in_scale, in_rotation, in_translation);
+ let (out_scale, out_rotation, out_translation) = in_mat.to_scale_rotation_translation();
+ assert_approx_eq!(in_scale, out_scale, 1e-6);
+ // out_rotation is different but produces the same matrix
+ // assert_approx_eq!(in_rotation, out_rotation);
+ assert_approx_eq!(in_translation, out_translation);
+ assert_approx_eq!(
+ in_mat,
+ $affine3::from_scale_rotation_translation(out_scale, out_rotation, out_translation),
+ 1e-5
+ );
+
+ // negative scale
+ let in_scale = $vec3::new(4.0, -1.0, -2.0);
+ let in_mat =
+ $affine3::from_scale_rotation_translation(in_scale, in_rotation, in_translation);
+ let (out_scale, out_rotation, out_translation) = in_mat.to_scale_rotation_translation();
+ // out_scale and out_rotation are different but they produce the same matrix
+ // assert_approx_eq!(in_scale, out_scale, 1e-6);
+ // assert_approx_eq!(in_rotation, out_rotation);
+ assert_approx_eq!(in_translation, out_translation);
+ assert_approx_eq!(
+ in_mat,
+ $affine3::from_scale_rotation_translation(out_scale, out_rotation, out_translation),
+ 1e-6
+ );
+ });
+
+ glam_test!(test_affine3_look_at, {
+ let eye = $vec3::new(0.0, 0.0, -5.0);
+ let center = $vec3::new(0.0, 0.0, 0.0);
+ let up = $vec3::new(1.0, 0.0, 0.0);
+ let lh = $affine3::look_at_lh(eye, center, up);
+ let rh = $affine3::look_at_rh(eye, center, up);
+ let point = $vec3::new(1.0, 0.0, 0.0);
+ assert_approx_eq!(lh.transform_point3(point), $vec3::new(0.0, 1.0, 5.0));
+ assert_approx_eq!(rh.transform_point3(point), $vec3::new(0.0, 1.0, -5.0));
+
+ should_glam_assert!({ $affine3::look_at_lh($vec3::ONE, $vec3::ZERO, $vec3::ZERO) });
+ should_glam_assert!({ $affine3::look_at_rh($vec3::ONE, $vec3::ZERO, $vec3::ZERO) });
+ });
+
+ glam_test!(test_affine3_ops, {
+ let m0 = $affine3::from_cols_array_2d(&MATRIX2D);
+ let m0x2 = $affine3::from_cols_array_2d(&[
+ [2.0, 4.0, 6.0],
+ [8.0, 10.0, 12.0],
+ [14.0, 16.0, 18.0],
+ [20.0, 22.0, 24.0],
+ ]);
+ assert_eq!(m0x2, m0 * 2.0);
+ assert_eq!(m0x2, 2.0 * m0);
+ assert_eq!(m0x2, m0 + m0);
+ assert_eq!($affine3::ZERO, m0 - m0);
+ assert_approx_eq!(m0, m0 * $affine3::IDENTITY);
+ assert_approx_eq!(m0, $affine3::IDENTITY * m0);
+ });
+
+ glam_test!(test_affine3_fmt, {
+ let a = $affine3::from_cols_array_2d(&MATRIX2D);
+ assert_eq!(
+ format!("{}", a),
+ "[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]"
+ );
+ });
+
+ glam_test!(test_affine3_to_from_slice, {
+ const MATRIX1D: [$t; 12] = [
+ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
+ ];
+ let m = $affine3::from_cols_slice(&MATRIX1D);
+ assert_eq!($affine3::from_cols_array(&MATRIX1D), m);
+ assert_eq!(MATRIX1D, m.to_cols_array());
+ assert_eq!(MATRIX2D, m.to_cols_array_2d());
+ let mut out: [$t; 12] = Default::default();
+ m.write_cols_to_slice(&mut out);
+ assert_eq!(MATRIX1D, out);
+ assert_eq!(
+ m,
+ $affine3::from_cols(
+ MATRIX2D[0].into(),
+ MATRIX2D[1].into(),
+ MATRIX2D[2].into(),
+ MATRIX2D[3].into()
+ )
+ );
+
+ should_panic!({ $affine3::from_cols_slice(&[0.0; 11]) });
+ should_panic!({ $affine3::IDENTITY.write_cols_to_slice(&mut [0.0; 11]) });
+ });
+
+ glam_test!(test_product, {
+ let ident = $affine3::IDENTITY;
+ assert_eq!(
+ vec![ident, ident].iter().product::<$affine3>(),
+ ident * ident
+ );
+ });
+
+ glam_test!(test_affine3_is_finite, {
+ assert!($affine3::from_scale($vec3::new(1.0, 1.0, 1.0)).is_finite());
+ assert!($affine3::from_scale($vec3::new(0.0, 1.0, 1.0)).is_finite());
+ assert!(!$affine3::from_scale($vec3::new(1.0, NAN, 1.0)).is_finite());
+ assert!(!$affine3::from_scale($vec3::new(1.0, 1.0, NEG_INFINITY)).is_finite());
+ });
+ };
+}
+
+mod affine3a {
+ use super::support::{deg, FloatCompare};
+ use glam::{Affine3A, Quat, Vec3, Vec3A};
+
+ impl FloatCompare for Affine3A {
+ #[inline]
+ fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
+ self.abs_diff_eq(*other, max_abs_diff)
+ }
+ #[inline]
+ fn abs_diff(&self, other: &Self) -> Self {
+ Self {
+ matrix3: self.matrix3.abs_diff(&other.matrix3),
+ translation: self.translation.abs_diff(&other.translation),
+ }
+ }
+ }
+
+ glam_test!(test_align, {
+ use std::mem;
+ assert_eq!(64, mem::size_of::<Affine3A>());
+ assert_eq!(mem::align_of::<Vec3A>(), mem::align_of::<Affine3A>());
+ });
+
+ glam_test!(test_affine3_mul_vec3a, {
+ let m = Affine3A::from_axis_angle(Vec3::Z, deg(90.0));
+ let result3 = m.transform_vector3a(Vec3A::Y);
+ assert_approx_eq!(Vec3A::new(-1.0, 0.0, 0.0), result3);
+
+ let m = Affine3A::from_scale_rotation_translation(
+ Vec3::new(0.5, 1.5, 2.0),
+ Quat::from_rotation_x(deg(90.0)),
+ Vec3::new(1.0, 2.0, 3.0),
+ );
+ let result3 = m.transform_vector3a(Vec3A::Y);
+ assert_approx_eq!(Vec3A::new(0.0, 0.0, 1.5), result3, 1.0e-6);
+
+ let result3 = m.transform_point3a(Vec3A::Y);
+ assert_approx_eq!(Vec3A::new(1.0, 2.0, 4.5), result3, 1.0e-6);
+ });
+
+ impl_affine3_tests!(f32, Affine3A, Quat, Vec3);
+}
+
+mod daffine3 {
+ use super::support::{deg, FloatCompare};
+ use glam::{DAffine3, DQuat, DVec3};
+
+ impl FloatCompare for DAffine3 {
+ #[inline]
+ fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
+ self.abs_diff_eq(*other, max_abs_diff as f64)
+ }
+ #[inline]
+ fn abs_diff(&self, other: &Self) -> Self {
+ Self {
+ matrix3: self.matrix3.abs_diff(&other.matrix3),
+ translation: self.translation.abs_diff(&other.translation),
+ }
+ }
+ }
+
+ glam_test!(test_align, {
+ use std::mem;
+ assert_eq!(96, mem::size_of::<DAffine3>());
+ assert_eq!(mem::align_of::<f64>(), mem::align_of::<DAffine3>());
+ });
+
+ impl_affine3_tests!(f64, DAffine3, DQuat, DVec3);
+}
diff --git a/tests/euler.rs b/tests/euler.rs
new file mode 100644
index 0000000..f6cab9e
--- /dev/null
+++ b/tests/euler.rs
@@ -0,0 +1,209 @@
+#[macro_use]
+mod support;
+
+use glam::{DQuat, Quat};
+
+/// Helper to calculate the inner angle in the range [0, 2*PI)
+trait AngleDiff {
+ type Output;
+ fn angle_diff(self, other: Self) -> Self::Output;
+}
+#[macro_export]
+macro_rules! impl_angle_diff {
+ ($t:ty, $pi:expr) => {
+ impl AngleDiff for $t {
+ type Output = $t;
+ fn angle_diff(self, other: $t) -> $t {
+ const PI2: $t = $pi + $pi;
+ let s = self.rem_euclid(PI2);
+ let o = other.rem_euclid(PI2);
+ if s > o {
+ (s - o).min(PI2 + o - s)
+ } else {
+ (o - s).min(PI2 + s - o)
+ }
+ }
+ }
+ };
+}
+impl_angle_diff!(f32, std::f32::consts::PI);
+impl_angle_diff!(f64, std::f64::consts::PI);
+
+#[macro_export]
+macro_rules! assert_approx_angle {
+ ($a:expr, $b:expr, $eps:expr) => {{
+ let (a, b) = ($a, $b);
+ let eps = $eps;
+ let diff = a.angle_diff(b);
+ assert!(
+ diff < $eps,
+ "assertion failed: `(left !== right)` \
+ (left: `{:?}`, right: `{:?}`, expect diff: `{:?}`, real diff: `{:?}`)",
+ a,
+ b,
+ eps,
+ diff
+ );
+ }};
+}
+
+trait EqApprox {
+ type EPS;
+ fn eq_approx(self, other: Self, axis_eps: Self::EPS, rot_axis: Self::EPS) -> bool;
+}
+
+macro_rules! impl_eq_approx {
+ ($t:ty, $quat:ident, $pi:expr) => {
+ impl EqApprox for $quat {
+ type EPS = $t;
+ fn eq_approx(self, other: Self, axis_eps: Self::EPS, rot_axis: Self::EPS) -> bool {
+ let (s_axis, s_angle) = self.to_axis_angle();
+ let (o_axis, o_angle) = other.to_axis_angle();
+ if s_angle.abs() < rot_axis && o_angle.abs() < rot_axis {
+ // No rotation
+ true
+ } else {
+ let a = s_axis.angle_between(o_axis);
+ if a < axis_eps {
+ // Same axes
+ (s_angle - o_angle).abs() < rot_axis
+ } else if ($pi - a).abs() < axis_eps {
+ // Inverted axes (180°)
+ (s_angle + o_angle).abs() < rot_axis
+ } else {
+ // Other
+ false
+ }
+ }
+ }
+ }
+ };
+}
+impl_eq_approx!(f32, Quat, std::f32::consts::PI);
+impl_eq_approx!(f64, DQuat, std::f64::consts::PI);
+
+#[macro_export]
+macro_rules! impl_3axis_test {
+ ($name:ident, $t:ty, $quat:ident, $euler:path, $U:path, $V:path, $W:path, $vec:ident) => {
+ glam_test!($name, {
+ let euler = $euler;
+ assert!($U != $W); // First and last axis must be different for three axis
+ for u in (-176..=176).step_by(44) {
+ for v in (-88..=88).step_by(44) {
+ for w in (-176..=176).step_by(44) {
+ let u1 = (u as $t).to_radians();
+ let v1 = (v as $t).to_radians();
+ let w1 = (w as $t).to_radians();
+
+ let q1: $quat = ($quat::from_axis_angle($U, u1)
+ * $quat::from_axis_angle($V, v1)
+ * $quat::from_axis_angle($W, w1))
+ .normalize();
+
+ // Test if the rotation is the expected
+ let q2: $quat = $quat::from_euler(euler, u1, v1, w1).normalize();
+ assert_approx_eq!(q1, q2, 1e-5);
+
+ // Test angle reconstruction
+ let (u2, v2, w2) = q1.to_euler(euler);
+ let q3 = $quat::from_euler(euler, u2, v2, w2).normalize();
+
+ assert_approx_angle!(u1, u2, 1e-4 as $t);
+ assert_approx_angle!(v1, v2, 1e-4 as $t);
+ assert_approx_angle!(w1, w2, 1e-4 as $t);
+
+ assert_approx_eq!(q1 * $vec::X, q3 * $vec::X, 1e-4);
+ assert_approx_eq!(q1 * $vec::Y, q3 * $vec::Y, 1e-4);
+ assert_approx_eq!(q1 * $vec::Z, q3 * $vec::Z, 1e-4);
+ }
+ }
+ }
+ });
+ };
+}
+
+#[macro_export]
+macro_rules! impl_2axis_test {
+ ($name:ident, $t:ty, $quat:ident, $euler:path, $U:path, $V:path, $W:path, $vec:ident) => {
+ glam_test!($name, {
+ #[allow(deprecated)]
+ let euler = $euler;
+ assert!($U == $W); // First and last axis must be different for three axis
+ for u in (-176..=176).step_by(44) {
+ for v in (-176..=176).step_by(44) {
+ for w in (-176..=176).step_by(44) {
+ let u1 = (u as $t).to_radians();
+ let v1 = (v as $t).to_radians();
+ let w1 = (w as $t).to_radians();
+
+ let q1: $quat = ($quat::from_axis_angle($U, u1)
+ * $quat::from_axis_angle($V, v1)
+ * $quat::from_axis_angle($W, w1))
+ .normalize();
+
+ // Test if the rotation is the expected
+ let q2 = $quat::from_euler(euler, u1, v1, w1).normalize();
+ assert_approx_eq!(q1, q2, 1e-5);
+
+ // Test angle reconstruction
+ let (u2, v2, w2) = q1.to_euler(euler);
+ let _q3 = $quat::from_euler(euler, u2, v2, w2).normalize();
+
+ // Disabled tests, since no generic tests for ambiguous results in the two-axis results...
+ // assert_approx_angle!(u1, u2, 1e-4 as $t);
+ // assert_approx_angle!(v1, v2, 1e-4 as $t);
+ // assert_approx_angle!(w1, w2, 1e-4 as $t);
+
+ // assert_approx_eq!(q1 * $vec::X, q3 * $vec::X, 1e-4);
+ // assert_approx_eq!(q1 * $vec::Y, q3 * $vec::Y, 1e-4);
+ // assert_approx_eq!(q1 * $vec::Z, q3 * $vec::Z, 1e-4);
+ }
+ }
+ }
+ });
+ };
+}
+
+macro_rules! impl_all_quat_tests_three_axis {
+ ($t:ty, $q:ident, $v:ident) => {
+ impl_3axis_test!(test_euler_zyx, $t, $q, ER::ZYX, $v::Z, $v::Y, $v::X, $v);
+ impl_3axis_test!(test_euler_zxy, $t, $q, ER::ZXY, $v::Z, $v::X, $v::Y, $v);
+ impl_3axis_test!(test_euler_yxz, $t, $q, ER::YXZ, $v::Y, $v::X, $v::Z, $v);
+ impl_3axis_test!(test_euler_yzx, $t, $q, ER::YZX, $v::Y, $v::Z, $v::X, $v);
+ impl_3axis_test!(test_euler_xyz, $t, $q, ER::XYZ, $v::X, $v::Y, $v::Z, $v);
+ impl_3axis_test!(test_euler_xzy, $t, $q, ER::XZY, $v::X, $v::Z, $v::Y, $v);
+ };
+}
+
+macro_rules! impl_all_quat_tests_two_axis {
+ ($t:ty, $q:ident, $v:ident) => {
+ impl_2axis_test!(test_euler_zyz, $t, $q, ER::ZYZ, $v::Z, $v::Y, $v::Z, $v);
+ impl_2axis_test!(test_euler_zxz, $t, $q, ER::ZXZ, $v::Z, $v::X, $v::Z, $v);
+ impl_2axis_test!(test_euler_yxy, $t, $q, ER::YXY, $v::Y, $v::X, $v::Y, $v);
+ impl_2axis_test!(test_euler_yzy, $t, $q, ER::YZY, $v::Y, $v::Z, $v::Y, $v);
+ impl_2axis_test!(test_euler_xyx, $t, $q, ER::XYX, $v::X, $v::Y, $v::X, $v);
+ impl_2axis_test!(test_euler_xzx, $t, $q, ER::XZX, $v::X, $v::Z, $v::X, $v);
+ };
+}
+
+mod euler {
+ use super::AngleDiff;
+ use glam::*;
+ type ER = EulerRot;
+
+ mod quat {
+ use super::*;
+
+ impl_all_quat_tests_three_axis!(f32, Quat, Vec3);
+
+ impl_all_quat_tests_two_axis!(f32, Quat, Vec3);
+ }
+
+ mod dquat {
+ use super::*;
+
+ impl_all_quat_tests_three_axis!(f64, DQuat, DVec3);
+
+ impl_all_quat_tests_two_axis!(f64, DQuat, DVec3);
+ }
+}
diff --git a/tests/mat2.rs b/tests/mat2.rs
new file mode 100644
index 0000000..860f25a
--- /dev/null
+++ b/tests/mat2.rs
@@ -0,0 +1,262 @@
+#[macro_use]
+mod support;
+
+macro_rules! impl_mat2_tests {
+ ($t:ident, $const_new:ident, $newmat2:ident, $mat2:ident, $mat3:ident, $newvec2:ident, $vec2:ident) => {
+ const IDENTITY: [[$t; 2]; 2] = [[1.0, 0.0], [0.0, 1.0]];
+
+ const MATRIX: [[$t; 2]; 2] = [[1.0, 2.0], [3.0, 4.0]];
+
+ glam_test!(test_const, {
+ const M0: $mat2 = $const_new!([0.0; 4]);
+ const M1: $mat2 = $const_new!([1.0, 2.0, 3.0, 4.0]);
+ const M2: $mat2 = $const_new!([1.0, 2.0], [3.0, 4.0]);
+ assert_eq!($mat2::ZERO, M0);
+ assert_eq!($mat2::from_cols_array(&[1.0, 2.0, 3.0, 4.0]), M1);
+ assert_eq!($mat2::from_cols_array(&[1.0, 2.0, 3.0, 4.0]), M2);
+ });
+
+ glam_test!(test_mat2_identity, {
+ assert_eq!($mat2::IDENTITY, $mat2::from_cols_array(&[1., 0., 0., 1.]));
+ let identity = $mat2::IDENTITY;
+ assert_eq!(IDENTITY, identity.to_cols_array_2d());
+ assert_eq!($mat2::from_cols_array_2d(&IDENTITY), identity);
+ assert_eq!(identity, identity * identity);
+ assert_eq!(identity, $mat2::default());
+ });
+
+ glam_test!(test_mat2_zero, {
+ assert_eq!($mat2::ZERO, $mat2::from_cols_array(&[0., 0., 0., 0.]));
+ });
+
+ glam_test!(test_mat2_nan, {
+ assert!($mat2::NAN.is_nan());
+ assert!(!$mat2::NAN.is_finite());
+ });
+
+ glam_test!(test_mat2_accessors, {
+ let mut m = $mat2::ZERO;
+ m.x_axis = $vec2::new(1.0, 2.0);
+ m.y_axis = $vec2::new(3.0, 4.0);
+ assert_eq!($mat2::from_cols_array_2d(&MATRIX), m);
+ assert_eq!($vec2::new(1.0, 2.0), m.x_axis);
+ assert_eq!($vec2::new(3.0, 4.0), m.y_axis);
+
+ assert_eq!($vec2::new(1.0, 2.0), m.col(0));
+ assert_eq!($vec2::new(3.0, 4.0), m.col(1));
+
+ assert_eq!($newvec2(1.0, 3.0), m.row(0));
+ assert_eq!($newvec2(2.0, 4.0), m.row(1));
+
+ *m.col_mut(0) = m.col(0).yx();
+ *m.col_mut(1) = m.col(1).yx();
+ assert_eq!($vec2::new(2.0, 1.0), m.col(0));
+ assert_eq!($vec2::new(4.0, 3.0), m.col(1));
+
+ should_panic!({ $mat2::ZERO.col(2) });
+ should_panic!({
+ let mut m = $mat2::ZERO;
+ m.col_mut(2);
+ });
+ should_panic!({ $mat2::ZERO.row(2) });
+ });
+
+ glam_test!(test_mat2_from_axes, {
+ let a = $mat2::from_cols_array_2d(&[[1.0, 2.0], [3.0, 4.0]]);
+ assert_eq!(MATRIX, a.to_cols_array_2d());
+ let b = $mat2::from_cols($newvec2(1.0, 2.0), $newvec2(3.0, 4.0));
+ assert_eq!(a, b);
+ let c = $newmat2($newvec2(1.0, 2.0), $newvec2(3.0, 4.0));
+ assert_eq!(a, c);
+ let d = b.to_cols_array();
+ let f = $mat2::from_cols_array(&d);
+ assert_eq!(b, f);
+ });
+
+ glam_test!(test_mat2_mul, {
+ let mat_a = $mat2::from_angle(deg(90.0));
+ let res_a = mat_a * $vec2::Y;
+ assert_approx_eq!($newvec2(-1.0, 0.0), res_a);
+ let res_b = mat_a * $vec2::X;
+ assert_approx_eq!($newvec2(0.0, 1.0), res_b);
+ });
+
+ glam_test!(test_from_scale_angle, {
+ let rot = $mat2::from_scale_angle($vec2::new(4.0, 2.0), deg(180.0));
+ assert_approx_eq!($vec2::X * -4.0, rot * $vec2::X, 1.0e-6);
+ assert_approx_eq!($vec2::Y * -2.0, rot * $vec2::Y, 1.0e-6);
+ });
+
+ glam_test!(test_from_diagonal, {
+ let m = $mat2::from_diagonal($vec2::new(2 as $t, 4 as $t));
+ assert_eq!(
+ $mat2::from_cols_array_2d(&[[2 as $t, 0 as $t], [0 as $t, 4 as $t]]),
+ m
+ );
+ assert_approx_eq!(m * $vec2::new(1.0, 1.0), $vec2::new(2.0, 4.0));
+ assert_approx_eq!($vec2::X * 2.0, m.x_axis);
+ assert_approx_eq!($vec2::Y * 4.0, m.y_axis);
+ });
+
+ glam_test!(test_from_mat3, {
+ let m3 =
+ $mat3::from_cols_array_2d(&[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]);
+ let m2 = $mat2::from_mat3(m3);
+ assert_eq!($mat2::from_cols_array_2d(&[[1.0, 2.0], [4.0, 5.0]]), m2);
+ });
+
+ glam_test!(test_mat2_transpose, {
+ let m = $newmat2($newvec2(1.0, 2.0), $newvec2(3.0, 4.0));
+ let mt = m.transpose();
+ assert_eq!($newvec2(1.0, 3.0), mt.x_axis);
+ assert_eq!($newvec2(2.0, 4.0), mt.y_axis);
+ });
+
+ glam_test!(test_mat2_det, {
+ assert_eq!(0.0, $mat2::ZERO.determinant());
+ assert_eq!(1.0, $mat2::IDENTITY.determinant());
+ assert_eq!(1.0, $mat2::from_angle(deg(90.0)).determinant());
+ assert_eq!(1.0, $mat2::from_angle(deg(180.0)).determinant());
+ assert_eq!(1.0, $mat2::from_angle(deg(270.0)).determinant());
+ assert_eq!(
+ 2.0 * 2.0,
+ $mat2::from_diagonal($newvec2(2.0, 2.0)).determinant()
+ );
+ assert_eq!(
+ 1.0 * 4.0 - 2.0 * 3.0,
+ $mat2::from_cols_array(&[1.0, 2.0, 3.0, 4.0]).determinant()
+ );
+ });
+
+ glam_test!(test_mat2_inverse, {
+ let inv = $mat2::IDENTITY.inverse();
+ assert_approx_eq!($mat2::IDENTITY, inv);
+
+ let rot = $mat2::from_angle(deg(90.0));
+ let rot_inv = rot.inverse();
+ assert_approx_eq!($mat2::IDENTITY, rot * rot_inv);
+ assert_approx_eq!($mat2::IDENTITY, rot_inv * rot);
+
+ let scale = $mat2::from_diagonal($newvec2(4.0, 5.0));
+ let scale_inv = scale.inverse();
+ assert_approx_eq!($mat2::IDENTITY, scale * scale_inv);
+ assert_approx_eq!($mat2::IDENTITY, scale_inv * scale);
+
+ let m = scale * rot;
+ let m_inv = m.inverse();
+ assert_approx_eq!($mat2::IDENTITY, m * m_inv);
+ assert_approx_eq!($mat2::IDENTITY, m_inv * m);
+ assert_approx_eq!(m_inv, rot_inv * scale_inv);
+
+ should_glam_assert!({ $mat2::ZERO.inverse() });
+ });
+
+ glam_test!(test_mat2_ops, {
+ let m0 = $mat2::from_cols_array_2d(&MATRIX);
+ let m0x2 = $mat2::from_cols_array_2d(&[[2.0, 4.0], [6.0, 8.0]]);
+ let m0_neg = $mat2::from_cols_array_2d(&[[-1.0, -2.0], [-3.0, -4.0]]);
+ assert_eq!(m0x2, m0 * 2.0);
+ assert_eq!(m0x2, 2.0 * m0);
+ assert_eq!(m0x2, m0 + m0);
+ assert_eq!($mat2::ZERO, m0 - m0);
+ assert_eq!(m0_neg, -m0);
+ assert_approx_eq!(m0, m0 * $mat2::IDENTITY);
+ assert_approx_eq!(m0, $mat2::IDENTITY * m0);
+
+ let mut m1 = m0;
+ m1 *= 2.0;
+ assert_eq!(m0x2, m1);
+
+ let mut m1 = m0;
+ m1 += m0;
+ assert_eq!(m0x2, m1);
+
+ let mut m1 = m0;
+ m1 -= m0;
+ assert_eq!($mat2::ZERO, m1);
+
+ let mut m1 = $mat2::IDENTITY;
+ m1 *= m0;
+ assert_approx_eq!(m0, m1);
+ });
+
+ glam_test!(test_mat2_fmt, {
+ let a = $mat2::from_cols_array_2d(&MATRIX);
+ assert_eq!(format!("{}", a), "[[1, 2], [3, 4]]");
+ });
+
+ glam_test!(test_mat2_to_from_slice, {
+ const MATRIX1D: [$t; 4] = [1.0, 2.0, 3.0, 4.0];
+ let m = $mat2::from_cols_slice(&MATRIX1D);
+ assert_eq!($mat2::from_cols_array(&MATRIX1D), m);
+ let mut out: [$t; 4] = Default::default();
+ m.write_cols_to_slice(&mut out);
+ assert_eq!(MATRIX1D, out);
+
+ should_panic!({ $mat2::from_cols_slice(&[0.0; 3]) });
+ should_panic!({ $mat2::IDENTITY.write_cols_to_slice(&mut [0.0; 3]) });
+ });
+
+ glam_test!(test_sum, {
+ let id = $mat2::IDENTITY;
+ assert_eq!(vec![id, id].iter().sum::<$mat2>(), id + id);
+ });
+
+ glam_test!(test_product, {
+ let two = $mat2::IDENTITY + $mat2::IDENTITY;
+ assert_eq!(vec![two, two].iter().product::<$mat2>(), two * two);
+ });
+
+ glam_test!(test_mat2_is_finite, {
+ use std::$t::INFINITY;
+ use std::$t::NAN;
+ use std::$t::NEG_INFINITY;
+ assert!($mat2::IDENTITY.is_finite());
+ assert!(!($mat2::IDENTITY * INFINITY).is_finite());
+ assert!(!($mat2::IDENTITY * NEG_INFINITY).is_finite());
+ assert!(!($mat2::IDENTITY * NAN).is_finite());
+ });
+ };
+}
+
+mod mat2 {
+ use super::support::deg;
+ use glam::{const_mat2, mat2, swizzles::*, vec2, Mat2, Mat3, Vec2};
+
+ glam_test!(test_align, {
+ use std::mem;
+ assert_eq!(16, mem::size_of::<Mat2>());
+ if cfg!(feature = "scalar-math") {
+ assert_eq!(mem::align_of::<Vec2>(), mem::align_of::<Mat2>());
+ } else {
+ assert_eq!(16, mem::align_of::<Mat2>());
+ }
+ });
+
+ glam_test!(test_as, {
+ use glam::DMat2;
+ assert_eq!(
+ DMat2::from_cols_array(&[1.0, 2.0, 3.0, 4.0]),
+ Mat2::from_cols_array(&[1.0, 2.0, 3.0, 4.0]).as_dmat2()
+ );
+ assert_eq!(
+ Mat2::from_cols_array(&[1.0, 2.0, 3.0, 4.0]),
+ DMat2::from_cols_array(&[1.0, 2.0, 3.0, 4.0]).as_mat2()
+ );
+ });
+
+ impl_mat2_tests!(f32, const_mat2, mat2, Mat2, Mat3, vec2, Vec2);
+}
+
+mod dmat2 {
+ use super::support::deg;
+ use glam::{const_dmat2, dmat2, dvec2, swizzles::*, DMat2, DMat3, DVec2};
+
+ glam_test!(test_align, {
+ use std::mem;
+ assert_eq!(32, mem::size_of::<DMat2>());
+ assert_eq!(mem::align_of::<DVec2>(), mem::align_of::<DMat2>());
+ });
+
+ impl_mat2_tests!(f64, const_dmat2, dmat2, DMat2, DMat3, dvec2, DVec2);
+}
diff --git a/tests/mat3.rs b/tests/mat3.rs
new file mode 100644
index 0000000..0f015df
--- /dev/null
+++ b/tests/mat3.rs
@@ -0,0 +1,455 @@
+#[macro_use]
+mod support;
+
+macro_rules! impl_mat3_tests {
+ ($t:ident, $const_new:ident, $newmat3:ident, $mat3:ident, $mat2:ident, $mat4:ident, $quat:ident, $newvec3:ident, $vec3:ident, $vec2:ident) => {
+ use core::$t::INFINITY;
+ use core::$t::NAN;
+ use core::$t::NEG_INFINITY;
+
+ const IDENTITY: [[$t; 3]; 3] = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]];
+
+ const MATRIX: [[$t; 3]; 3] = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]];
+
+ glam_test!(test_const, {
+ const M0: $mat3 = $const_new!([0.0; 9]);
+ const M1: $mat3 = $const_new!([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]);
+ const M2: $mat3 = $const_new!([1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]);
+ assert_eq!($mat3::ZERO, M0);
+ assert_eq!(
+ $mat3::from_cols_array(&[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]),
+ M1
+ );
+ assert_eq!(
+ $mat3::from_cols_array(&[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]),
+ M2
+ );
+ });
+
+ glam_test!(test_mat3_identity, {
+ assert_eq!(
+ $mat3::IDENTITY,
+ $mat3::from_cols_array(&[
+ 1., 0., 0., //
+ 0., 1., 0., //
+ 0., 0., 1., //
+ ])
+ );
+ let identity = $mat3::IDENTITY;
+ assert_eq!(IDENTITY, identity.to_cols_array_2d());
+ assert_eq!($mat3::from_cols_array_2d(&IDENTITY), identity);
+ assert_eq!(identity, identity * identity);
+ assert_eq!(identity, $mat3::default());
+ });
+
+ glam_test!(test_mat3_zero, {
+ assert_eq!(
+ $mat3::ZERO,
+ $mat3::from_cols_array(&[0., 0., 0., 0., 0., 0., 0., 0., 0.])
+ );
+ });
+
+ glam_test!(test_mat3_nan, {
+ assert!($mat3::NAN.is_nan());
+ assert!(!$mat3::NAN.is_finite());
+ });
+
+ glam_test!(test_mat3_accessors, {
+ let mut m = $mat3::ZERO;
+ m.x_axis = $newvec3(1.0, 2.0, 3.0);
+ m.y_axis = $newvec3(4.0, 5.0, 6.0);
+ m.z_axis = $newvec3(7.0, 8.0, 9.0);
+ assert_eq!($mat3::from_cols_array_2d(&MATRIX), m);
+ assert_eq!($newvec3(1.0, 2.0, 3.0), m.x_axis);
+ assert_eq!($newvec3(4.0, 5.0, 6.0), m.y_axis);
+ assert_eq!($newvec3(7.0, 8.0, 9.0), m.z_axis);
+
+ assert_eq!($newvec3(1.0, 2.0, 3.0), m.col(0));
+ assert_eq!($newvec3(4.0, 5.0, 6.0), m.col(1));
+ assert_eq!($newvec3(7.0, 8.0, 9.0), m.col(2));
+
+ assert_eq!($newvec3(1.0, 4.0, 7.0), m.row(0));
+ assert_eq!($newvec3(2.0, 5.0, 8.0), m.row(1));
+ assert_eq!($newvec3(3.0, 6.0, 9.0), m.row(2));
+
+ *m.col_mut(0) = m.col(0).zyx();
+ *m.col_mut(1) = m.col(1).zyx();
+ *m.col_mut(2) = m.col(2).zyx();
+ assert_eq!($newvec3(3.0, 2.0, 1.0), m.col(0));
+ assert_eq!($newvec3(6.0, 5.0, 4.0), m.col(1));
+ assert_eq!($newvec3(9.0, 8.0, 7.0), m.col(2));
+
+ should_panic!({ $mat3::ZERO.col(3) });
+ should_panic!({
+ let mut m = $mat3::ZERO;
+ m.col_mut(3);
+ });
+ should_panic!({ $mat3::ZERO.row(3) });
+ });
+
+ glam_test!(test_mat3_from_axes, {
+ let a = $mat3::from_cols_array_2d(&[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]);
+ assert_eq!(MATRIX, a.to_cols_array_2d());
+ let b = $mat3::from_cols(
+ $newvec3(1.0, 2.0, 3.0),
+ $newvec3(4.0, 5.0, 6.0),
+ $newvec3(7.0, 8.0, 9.0),
+ );
+ assert_eq!(a, b);
+ let c = $newmat3(
+ $newvec3(1.0, 2.0, 3.0),
+ $newvec3(4.0, 5.0, 6.0),
+ $newvec3(7.0, 8.0, 9.0),
+ );
+ assert_eq!(a, c);
+ let d = b.to_cols_array();
+ let f = $mat3::from_cols_array(&d);
+ assert_eq!(b, f);
+ });
+
+ glam_test!(test_from_rotation, {
+ let rot_x1 = $mat3::from_rotation_x(deg(180.0));
+ let rot_x2 = $mat3::from_axis_angle($vec3::X, deg(180.0));
+ assert_approx_eq!(rot_x1, rot_x2);
+ let rot_x3 = $mat3::from_quat($quat::from_rotation_x(deg(180.0)));
+ assert_approx_eq!(rot_x1, rot_x3);
+
+ let rot_y1 = $mat3::from_rotation_y(deg(180.0));
+ let rot_y2 = $mat3::from_axis_angle($vec3::Y, deg(180.0));
+ assert_approx_eq!(rot_y1, rot_y2);
+ let rot_y3 = $mat3::from_quat($quat::from_rotation_y(deg(180.0)));
+ assert_approx_eq!(rot_y1, rot_y3);
+
+ let rot_z1 = $mat3::from_rotation_z(deg(180.0));
+ let rot_z2 = $mat3::from_axis_angle($vec3::Z, deg(180.0));
+ assert_approx_eq!(rot_z1, rot_z2);
+ let rot_z3 = $mat3::from_quat($quat::from_rotation_z(deg(180.0)));
+ assert_approx_eq!(rot_z1, rot_z3);
+
+ should_glam_assert!({ $mat3::from_axis_angle($vec3::ZERO, 0.0) });
+ should_glam_assert!({ $mat3::from_quat($quat::from_xyzw(0.0, 0.0, 0.0, 0.0)) });
+ });
+
+ glam_test!(test_mat3_mul, {
+ let mat_a = $mat3::from_axis_angle($vec3::Z, deg(90.0));
+ assert_approx_eq!($newvec3(-1.0, 0.0, 0.0), mat_a * $newvec3(0.0, 1.0, 0.0));
+ assert_approx_eq!(
+ $vec3::new(-1.0, 0.0, 0.0),
+ mat_a.mul_vec3($vec3::new(0.0, 1.0, 0.0))
+ );
+ });
+
+ glam_test!(test_mat3_transform2d, {
+ let m = $mat3::from_translation($vec2::new(2.0, 4.0));
+ assert_eq!($vec2::ZERO, m.transform_vector2($vec2::ZERO));
+ assert_eq!($vec2::new(2.0, 4.0), m.transform_point2($vec2::ZERO));
+ assert_eq!($vec2::ZERO, m.transform_point2($vec2::new(-2.0, -4.0)));
+
+ let m = $mat3::from_angle($t::to_radians(90.0));
+ assert_approx_eq!($vec2::Y, m.transform_vector2($vec2::X), 1e-7);
+ assert_approx_eq!($vec2::Y, m.transform_point2($vec2::X), 1e-7);
+
+ let m = $mat3::from_scale($vec2::new(2.0, 4.0));
+ assert_eq!($vec2::new(2.0, 0.0), m.transform_vector2($vec2::X));
+ assert_eq!($vec2::new(0.0, 4.0), m.transform_vector2($vec2::Y));
+ assert_eq!($vec2::new(2.0, 0.0), m.transform_point2($vec2::X));
+ assert_eq!($vec2::new(0.0, 4.0), m.transform_point2($vec2::Y));
+
+ should_glam_assert!({ $mat3::from_scale($vec2::ZERO) });
+
+ let m = $mat3::from_scale_angle_translation(
+ $vec2::new(0.5, 1.5),
+ $t::to_radians(90.0),
+ $vec2::new(1.0, 2.0),
+ );
+ let result2 = m.transform_vector2($vec2::Y);
+ assert_approx_eq!($vec2::new(-1.5, 0.0), result2, 1.0e-6);
+ assert_approx_eq!(result2, (m * $vec2::Y.extend(0.0)).truncate());
+
+ let result2 = m.transform_point2($vec2::Y);
+ assert_approx_eq!($vec2::new(-0.5, 2.0), result2, 1.0e-6);
+ assert_approx_eq!(result2, (m * $vec2::Y.extend(1.0)).truncate());
+ });
+
+ glam_test!(test_from_ypr, {
+ use glam::EulerRot;
+ let zero = deg(0.0);
+ let yaw = deg(30.0);
+ let pitch = deg(60.0);
+ let roll = deg(90.0);
+ let y0 = $mat3::from_rotation_y(yaw);
+ let y1 = $mat3::from_euler(EulerRot::YXZ, yaw, zero, zero);
+ assert_approx_eq!(y0, y1);
+
+ let x0 = $mat3::from_rotation_x(pitch);
+ let x1 = $mat3::from_euler(EulerRot::YXZ, zero, pitch, zero);
+ assert_approx_eq!(x0, x1);
+
+ let z0 = $mat3::from_rotation_z(roll);
+ let z1 = $mat3::from_euler(EulerRot::YXZ, zero, zero, roll);
+ assert_approx_eq!(z0, z1);
+
+ let yx0 = y0 * x0;
+ let yx1 = $mat3::from_euler(EulerRot::YXZ, yaw, pitch, zero);
+ assert_approx_eq!(yx0, yx1);
+
+ let yxz0 = y0 * x0 * z0;
+ let yxz1 = $mat3::from_euler(EulerRot::YXZ, yaw, pitch, roll);
+ assert_approx_eq!(yxz0, yxz1, 1e-6);
+ });
+
+ glam_test!(test_from_diagonal, {
+ let m = $mat3::from_diagonal($vec3::new(2.0, 4.0, 8.0));
+ assert_approx_eq!(m * $vec3::new(1.0, 1.0, 1.0), $vec3::new(2.0, 4.0, 8.0));
+ assert_approx_eq!($newvec3(2.0, 0.0, 0.0), m.x_axis);
+ assert_approx_eq!($newvec3(0.0, 4.0, 0.0), m.y_axis);
+ assert_approx_eq!($newvec3(0.0, 0.0, 8.0), m.z_axis);
+ });
+
+ glam_test!(test_from_mat2, {
+ let m2 = $mat2::from_cols_array_2d(&[[1.0, 2.0], [3.0, 4.0]]);
+ let m3 = $mat3::from_mat2(m2);
+ assert_eq!(
+ $mat3::from_cols_array_2d(&[[1.0, 2.0, 0.0], [3.0, 4.0, 0.0], [0.0, 0.0, 1.0]]),
+ m3
+ );
+ });
+
+ glam_test!(test_from_mat4, {
+ let m4 = $mat4::from_cols_array_2d(&[
+ [1.0, 2.0, 3.0, 4.0],
+ [5.0, 6.0, 7.0, 8.0],
+ [9.0, 10.0, 11.0, 12.0],
+ [13.0, 14.0, 15.0, 16.0],
+ ]);
+ let m3 = $mat3::from_mat4(m4);
+ assert_eq!(
+ $mat3::from_cols_array_2d(&[[1.0, 2.0, 3.0], [5.0, 6.0, 7.0], [9.0, 10.0, 11.0]]),
+ m3
+ );
+ });
+
+ glam_test!(test_mat3_transpose, {
+ let m = $newmat3(
+ $newvec3(1.0, 2.0, 3.0),
+ $newvec3(4.0, 5.0, 6.0),
+ $newvec3(7.0, 8.0, 9.0),
+ );
+ let mt = m.transpose();
+ assert_eq!($newvec3(1.0, 4.0, 7.0), mt.x_axis);
+ assert_eq!($newvec3(2.0, 5.0, 8.0), mt.y_axis);
+ assert_eq!($newvec3(3.0, 6.0, 9.0), mt.z_axis);
+ });
+
+ glam_test!(test_mat3_det, {
+ assert_eq!(0.0, $mat3::ZERO.determinant());
+ assert_eq!(1.0, $mat3::IDENTITY.determinant());
+ assert_eq!(1.0, $mat3::from_rotation_x(deg(90.0)).determinant());
+ assert_eq!(1.0, $mat3::from_rotation_y(deg(180.0)).determinant());
+ assert_eq!(1.0, $mat3::from_rotation_z(deg(270.0)).determinant());
+ assert_eq!(
+ 2.0 * 2.0 * 2.0,
+ $mat3::from_diagonal($vec3::new(2.0, 2.0, 2.0)).determinant()
+ );
+ });
+
+ glam_test!(test_mat3_inverse, {
+ // assert_eq!(None, $mat3::ZERO.inverse());
+ let inv = $mat3::IDENTITY.inverse();
+ // assert_ne!(None, inv);
+ assert_approx_eq!($mat3::IDENTITY, inv);
+
+ let rotz = $mat3::from_rotation_z(deg(90.0));
+ let rotz_inv = rotz.inverse();
+ // assert_ne!(None, rotz_inv);
+ // let rotz_inv = rotz_inv.unwrap();
+ assert_approx_eq!($mat3::IDENTITY, rotz * rotz_inv);
+ assert_approx_eq!($mat3::IDENTITY, rotz_inv * rotz);
+
+ let scale = $mat3::from_diagonal($vec3::new(4.0, 5.0, 6.0));
+ let scale_inv = scale.inverse();
+ // assert_ne!(None, scale_inv);
+ // let scale_inv = scale_inv.unwrap();
+ assert_approx_eq!($mat3::IDENTITY, scale * scale_inv);
+ assert_approx_eq!($mat3::IDENTITY, scale_inv * scale);
+
+ let m = scale * rotz;
+ let m_inv = m.inverse();
+ // assert_ne!(None, m_inv);
+ // let m_inv = m_inv.unwrap();
+ assert_approx_eq!($mat3::IDENTITY, m * m_inv);
+ assert_approx_eq!($mat3::IDENTITY, m_inv * m);
+ assert_approx_eq!(m_inv, rotz_inv * scale_inv);
+
+ should_glam_assert!({ $mat3::ZERO.inverse() });
+ });
+
+ glam_test!(test_mat3_ops, {
+ let m0 = $mat3::from_cols_array_2d(&MATRIX);
+ let m0x2 = $mat3::from_cols_array_2d(&[
+ [2.0, 4.0, 6.0],
+ [8.0, 10.0, 12.0],
+ [14.0, 16.0, 18.0],
+ ]);
+ let m0_neg = $mat3::from_cols_array_2d(&[
+ [-1.0, -2.0, -3.0],
+ [-4.0, -5.0, -6.0],
+ [-7.0, -8.0, -9.0],
+ ]);
+ assert_eq!(m0x2, m0 * 2.0);
+ assert_eq!(m0x2, 2.0 * m0);
+ assert_eq!(m0x2, m0 + m0);
+ assert_eq!($mat3::ZERO, m0 - m0);
+ assert_eq!(m0_neg, -m0);
+ assert_approx_eq!(m0, m0 * $mat3::IDENTITY);
+ assert_approx_eq!(m0, $mat3::IDENTITY * m0);
+
+ let mut m1 = m0;
+ m1 *= 2.0;
+ assert_eq!(m0x2, m1);
+
+ let mut m1 = m0;
+ m1 += m0;
+ assert_eq!(m0x2, m1);
+
+ let mut m1 = m0;
+ m1 -= m0;
+ assert_eq!($mat3::ZERO, m1);
+
+ let mut m1 = $mat3::IDENTITY;
+ m1 *= m0;
+ assert_approx_eq!(m0, m1);
+ });
+
+ glam_test!(test_mat3_fmt, {
+ let a = $mat3::from_cols_array_2d(&MATRIX);
+ assert_eq!(format!("{}", a), "[[1, 2, 3], [4, 5, 6], [7, 8, 9]]");
+ });
+
+ glam_test!(test_mat3_to_from_slice, {
+ const MATRIX1D: [$t; 9] = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0];
+ let m = $mat3::from_cols_slice(&MATRIX1D);
+ assert_eq!($mat3::from_cols_array(&MATRIX1D), m);
+ let mut out: [$t; 9] = Default::default();
+ m.write_cols_to_slice(&mut out);
+ assert_eq!(MATRIX1D, out);
+
+ should_panic!({ $mat3::from_cols_slice(&[0.0; 8]) });
+ should_panic!({ $mat3::IDENTITY.write_cols_to_slice(&mut [0.0; 8]) });
+ });
+
+ glam_test!(test_sum, {
+ let id = $mat3::IDENTITY;
+ assert_eq!(vec![id, id].iter().sum::<$mat3>(), id + id);
+ });
+
+ glam_test!(test_product, {
+ let two = $mat3::IDENTITY + $mat3::IDENTITY;
+ assert_eq!(vec![two, two].iter().product::<$mat3>(), two * two);
+ });
+
+ glam_test!(test_mat3_is_finite, {
+ assert!($mat3::IDENTITY.is_finite());
+ assert!(!($mat3::IDENTITY * INFINITY).is_finite());
+ assert!(!($mat3::IDENTITY * NEG_INFINITY).is_finite());
+ assert!(!($mat3::IDENTITY * NAN).is_finite());
+ });
+ };
+}
+
+mod mat3 {
+ use super::support::deg;
+ use glam::{
+ const_mat3, mat3, swizzles::*, vec3, vec3a, Mat2, Mat3, Mat4, Quat, Vec2, Vec3, Vec3A,
+ };
+
+ glam_test!(test_align, {
+ use std::mem;
+ assert_eq!(36, mem::size_of::<Mat3>());
+ assert_eq!(mem::align_of::<Vec3>(), mem::align_of::<Mat3>());
+ });
+
+ glam_test!(test_mul_vec3a, {
+ let mat_a = Mat3::from_axis_angle(Vec3::Z, deg(90.0));
+ assert_approx_eq!(vec3a(-1.0, 0.0, 0.0), mat_a * Vec3A::Y);
+ assert_approx_eq!(vec3a(-1.0, 0.0, 0.0), mat_a.mul_vec3a(Vec3A::Y));
+ });
+
+ glam_test!(test_as, {
+ use glam::DMat3;
+ assert_eq!(
+ DMat3::from_cols_array(&[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]),
+ Mat3::from_cols_array(&[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]).as_dmat3()
+ );
+ assert_eq!(
+ Mat3::from_cols_array(&[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]),
+ DMat3::from_cols_array(&[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]).as_mat3()
+ );
+ });
+
+ impl_mat3_tests!(f32, const_mat3, mat3, Mat3, Mat2, Mat4, Quat, vec3, Vec3, Vec2);
+}
+
+mod mat3a {
+ use super::support::deg;
+ use glam::{
+ const_mat3a, mat3a, swizzles::*, vec3a, Mat2, Mat3A, Mat4, Quat, Vec2, Vec3, Vec3A,
+ };
+
+ glam_test!(test_align, {
+ use std::mem;
+ assert_eq!(48, mem::size_of::<Mat3A>());
+ assert_eq!(mem::align_of::<Vec3A>(), mem::align_of::<Mat3A>());
+ });
+
+ glam_test!(test_mul_vec3a, {
+ let mat_a = Mat3A::from_axis_angle(Vec3::Z, deg(90.0));
+ assert_approx_eq!(vec3a(-1.0, 0.0, 0.0), mat_a * Vec3A::Y);
+ assert_approx_eq!(vec3a(-1.0, 0.0, 0.0), mat_a.mul_vec3a(Vec3A::Y));
+ });
+
+ glam_test!(test_as, {
+ use glam::DMat3;
+ assert_eq!(
+ DMat3::from_cols_array(&[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]),
+ Mat3A::from_cols_array(&[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]).as_dmat3()
+ );
+ });
+
+ impl_mat3_tests!(
+ f32,
+ const_mat3a,
+ mat3a,
+ Mat3A,
+ Mat2,
+ Mat4,
+ Quat,
+ vec3a,
+ Vec3,
+ Vec2
+ );
+}
+
+mod dmat3 {
+ use super::support::deg;
+ use glam::{const_dmat3, dmat3, dvec3, swizzles::*, DMat2, DMat3, DMat4, DQuat, DVec2, DVec3};
+
+ glam_test!(test_align, {
+ use std::mem;
+ assert_eq!(72, mem::size_of::<DMat3>());
+ assert_eq!(mem::align_of::<DVec3>(), mem::align_of::<DMat3>());
+ });
+
+ impl_mat3_tests!(
+ f64,
+ const_dmat3,
+ dmat3,
+ DMat3,
+ DMat2,
+ DMat4,
+ DQuat,
+ dvec3,
+ DVec3,
+ DVec2
+ );
+}
diff --git a/tests/mat4.rs b/tests/mat4.rs
new file mode 100644
index 0000000..85d670d
--- /dev/null
+++ b/tests/mat4.rs
@@ -0,0 +1,745 @@
+#[macro_use]
+mod support;
+
+macro_rules! impl_mat4_tests {
+ ($t:ident, $const_new:ident, $newmat4:ident, $newvec4:ident, $newvec3:ident, $mat4:ident, $mat3:ident, $quat:ident, $vec4:ident, $vec3:ident) => {
+ use core::$t::INFINITY;
+ use core::$t::NAN;
+ use core::$t::NEG_INFINITY;
+
+ const IDENTITY: [[$t; 4]; 4] = [
+ [1.0, 0.0, 0.0, 0.0],
+ [0.0, 1.0, 0.0, 0.0],
+ [0.0, 0.0, 1.0, 0.0],
+ [0.0, 0.0, 0.0, 1.0],
+ ];
+ const MATRIX: [[$t; 4]; 4] = [
+ [1.0, 2.0, 3.0, 4.0],
+ [5.0, 6.0, 7.0, 8.0],
+ [9.0, 10.0, 11.0, 12.0],
+ [13.0, 14.0, 15.0, 16.0],
+ ];
+
+ glam_test!(test_const, {
+ const M0: $mat4 = $const_new!([0.0; 16]);
+ const M1: $mat4 = $const_new!([
+ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0,
+ 16.0
+ ]);
+ const M2: $mat4 = $const_new!(
+ [1.0, 2.0, 3.0, 4.0],
+ [5.0, 6.0, 7.0, 8.0],
+ [9.0, 10.0, 11.0, 12.0],
+ [13.0, 14.0, 15.0, 16.0]
+ );
+ assert_eq!($mat4::ZERO, M0);
+ assert_eq!(
+ $mat4::from_cols_array_2d(&[
+ [1.0, 2.0, 3.0, 4.0],
+ [5.0, 6.0, 7.0, 8.0],
+ [9.0, 10.0, 11.0, 12.0],
+ [13.0, 14.0, 15.0, 16.0]
+ ]),
+ M1
+ );
+ assert_eq!(
+ $mat4::from_cols_array_2d(&[
+ [1.0, 2.0, 3.0, 4.0],
+ [5.0, 6.0, 7.0, 8.0],
+ [9.0, 10.0, 11.0, 12.0],
+ [13.0, 14.0, 15.0, 16.0]
+ ]),
+ M2
+ );
+ });
+
+ glam_test!(test_mat4_identity, {
+ assert_eq!(
+ $mat4::IDENTITY,
+ $mat4::from_cols_array(&[
+ 1., 0., 0., 0., //
+ 0., 1., 0., 0., //
+ 0., 0., 1., 0., //
+ 0., 0., 0., 1., //
+ ])
+ );
+ let identity = $mat4::IDENTITY;
+ assert_eq!(IDENTITY, identity.to_cols_array_2d());
+ assert_eq!($mat4::from_cols_array_2d(&IDENTITY), identity);
+ assert_eq!(identity, identity * identity);
+ assert_eq!(identity, $mat4::default());
+ });
+
+ glam_test!(test_mat4_zero, {
+ assert_eq!(
+ $mat4::ZERO,
+ $mat4::from_cols_array(&[
+ 0., 0., 0., 0., //
+ 0., 0., 0., 0., //
+ 0., 0., 0., 0., //
+ 0., 0., 0., 0., //
+ ])
+ );
+ });
+
+ glam_test!(test_mat4_nan, {
+ assert!($mat4::NAN.is_nan());
+ assert!(!$mat4::NAN.is_finite());
+ });
+
+ glam_test!(test_mat4_accessors, {
+ let mut m = $mat4::ZERO;
+ m.x_axis = $vec4::new(1.0, 2.0, 3.0, 4.0);
+ m.y_axis = $vec4::new(5.0, 6.0, 7.0, 8.0);
+ m.z_axis = $vec4::new(9.0, 10.0, 11.0, 12.0);
+ m.w_axis = $vec4::new(13.0, 14.0, 15.0, 16.0);
+ assert_eq!($mat4::from_cols_array_2d(&MATRIX), m);
+ assert_eq!($vec4::new(1.0, 2.0, 3.0, 4.0), m.x_axis);
+ assert_eq!($vec4::new(5.0, 6.0, 7.0, 8.0), m.y_axis);
+ assert_eq!($vec4::new(9.0, 10.0, 11.0, 12.0), m.z_axis);
+ assert_eq!($vec4::new(13.0, 14.0, 15.0, 16.0), m.w_axis);
+
+ assert_eq!($vec4::new(1.0, 2.0, 3.0, 4.0), m.col(0));
+ assert_eq!($vec4::new(5.0, 6.0, 7.0, 8.0), m.col(1));
+ assert_eq!($vec4::new(9.0, 10.0, 11.0, 12.0), m.col(2));
+ assert_eq!($vec4::new(13.0, 14.0, 15.0, 16.0), m.col(3));
+
+ assert_eq!($newvec4(1.0, 5.0, 9.0, 13.0), m.row(0));
+ assert_eq!($newvec4(2.0, 6.0, 10.0, 14.0), m.row(1));
+ assert_eq!($newvec4(3.0, 7.0, 11.0, 15.0), m.row(2));
+ assert_eq!($newvec4(4.0, 8.0, 12.0, 16.0), m.row(3));
+
+ *m.col_mut(0) = m.col(0).wzyx();
+ *m.col_mut(1) = m.col(1).wzyx();
+ *m.col_mut(2) = m.col(2).wzyx();
+ *m.col_mut(3) = m.col(3).wzyx();
+ assert_eq!($newvec4(4.0, 3.0, 2.0, 1.0), m.col(0));
+ assert_eq!($newvec4(8.0, 7.0, 6.0, 5.0), m.col(1));
+ assert_eq!($newvec4(12.0, 11.0, 10.0, 9.0), m.col(2));
+ assert_eq!($newvec4(16.0, 15.0, 14.0, 13.0), m.col(3));
+
+ should_panic!({ $mat4::ZERO.col(4) });
+ should_panic!({
+ let mut m = $mat4::ZERO;
+ m.col_mut(4);
+ });
+ should_panic!({ $mat4::ZERO.row(4) });
+ });
+
+ glam_test!(test_mat4_from_axes, {
+ let a = $mat4::from_cols_array_2d(&[
+ [1.0, 2.0, 3.0, 4.0],
+ [5.0, 6.0, 7.0, 8.0],
+ [9.0, 10.0, 11.0, 12.0],
+ [13.0, 14.0, 15.0, 16.0],
+ ]);
+ assert_eq!(MATRIX, a.to_cols_array_2d());
+ let b = $mat4::from_cols(
+ $newvec4(1.0, 2.0, 3.0, 4.0),
+ $newvec4(5.0, 6.0, 7.0, 8.0),
+ $newvec4(9.0, 10.0, 11.0, 12.0),
+ $newvec4(13.0, 14.0, 15.0, 16.0),
+ );
+ assert_eq!(a, b);
+ let c = $newmat4(
+ $newvec4(1.0, 2.0, 3.0, 4.0),
+ $newvec4(5.0, 6.0, 7.0, 8.0),
+ $newvec4(9.0, 10.0, 11.0, 12.0),
+ $newvec4(13.0, 14.0, 15.0, 16.0),
+ );
+ assert_eq!(a, c);
+ let d = b.to_cols_array();
+ let f = $mat4::from_cols_array(&d);
+ assert_eq!(b, f);
+ });
+
+ glam_test!(test_mat4_translation, {
+ let translate = $mat4::from_translation($newvec3(1.0, 2.0, 3.0));
+ assert_eq!(
+ $mat4::from_cols(
+ $newvec4(1.0, 0.0, 0.0, 0.0),
+ $newvec4(0.0, 1.0, 0.0, 0.0),
+ $newvec4(0.0, 0.0, 1.0, 0.0),
+ $newvec4(1.0, 2.0, 3.0, 1.0)
+ ),
+ translate
+ );
+ });
+
+ glam_test!(test_from_rotation, {
+ let rot_x1 = $mat4::from_rotation_x(deg(180.0));
+ let rot_x2 = $mat4::from_axis_angle($vec3::X, deg(180.0));
+ assert_approx_eq!(rot_x1, rot_x2);
+ let rot_y1 = $mat4::from_rotation_y(deg(180.0));
+ let rot_y2 = $mat4::from_axis_angle($vec3::Y, deg(180.0));
+ assert_approx_eq!(rot_y1, rot_y2);
+ let rot_z1 = $mat4::from_rotation_z(deg(180.0));
+ let rot_z2 = $mat4::from_axis_angle($vec3::Z, deg(180.0));
+ assert_approx_eq!(rot_z1, rot_z2);
+
+ assert_approx_eq!($mat4::IDENTITY, $mat4::from_quat($quat::IDENTITY));
+
+ should_glam_assert!({ $mat4::from_axis_angle($vec3::ZERO, 0.0) });
+ should_glam_assert!({ $mat4::from_quat($quat::from_xyzw(0.0, 0.0, 0.0, 0.0)) });
+ });
+
+ glam_test!(test_from_mat3, {
+ let m3 =
+ $mat3::from_cols_array_2d(&[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]);
+ let m4 = $mat4::from_mat3(m3);
+ assert_eq!(
+ $mat4::from_cols_array_2d(&[
+ [1.0, 2.0, 3.0, 0.0],
+ [4.0, 5.0, 6.0, 0.0],
+ [7.0, 8.0, 9.0, 0.0],
+ [0.0, 0.0, 0.0, 1.0]
+ ]),
+ m4
+ );
+ });
+
+ glam_test!(test_mat4_mul, {
+ let m = $mat4::from_axis_angle($vec3::Z, deg(90.0));
+ let result3 = m.transform_vector3($vec3::Y);
+ assert_approx_eq!($newvec3(-1.0, 0.0, 0.0), result3);
+ assert_approx_eq!(result3, (m * $vec3::Y.extend(0.0)).truncate().into());
+ let result4 = m * $vec4::Y;
+ assert_approx_eq!($newvec4(-1.0, 0.0, 0.0, 0.0), result4);
+ assert_approx_eq!(result4, m * $vec4::Y);
+
+ let m = $mat4::from_scale_rotation_translation(
+ $vec3::new(0.5, 1.5, 2.0),
+ $quat::from_rotation_x(deg(90.0)),
+ $vec3::new(1.0, 2.0, 3.0),
+ );
+ let result3 = m.transform_vector3($vec3::Y);
+ assert_approx_eq!($newvec3(0.0, 0.0, 1.5), result3, 1.0e-6);
+ assert_approx_eq!(result3, (m * $vec3::Y.extend(0.0)).truncate().into());
+
+ let result3 = m.transform_point3($vec3::Y);
+ assert_approx_eq!($newvec3(1.0, 2.0, 4.5), result3, 1.0e-6);
+ assert_approx_eq!(result3, (m * $vec3::Y.extend(1.0)).truncate().into());
+
+ let m = $mat4::from_cols(
+ $newvec4(8.0, 0.0, 0.0, 0.0),
+ $newvec4(0.0, 4.0, 0.0, 0.0),
+ $newvec4(0.0, 0.0, 2.0, 2.0),
+ $newvec4(0.0, 0.0, 0.0, 0.0),
+ );
+ assert_approx_eq!(
+ $newvec3(4.0, 2.0, 1.0),
+ m.project_point3($newvec3(2.0, 2.0, 2.0))
+ );
+
+ should_glam_assert!({ $mat4::ZERO.transform_vector3($vec3::X) });
+ should_glam_assert!({ $mat4::ZERO.transform_point3($vec3::X) });
+ });
+
+ glam_test!(test_from_ypr, {
+ use glam::EulerRot;
+ let zero = deg(0.0);
+ let yaw = deg(30.0);
+ let pitch = deg(60.0);
+ let roll = deg(90.0);
+ let y0 = $mat4::from_rotation_y(yaw);
+ let y1 = $mat4::from_euler(EulerRot::YXZ, yaw, zero, zero);
+ assert_approx_eq!(y0, y1);
+
+ let x0 = $mat4::from_rotation_x(pitch);
+ let x1 = $mat4::from_euler(EulerRot::YXZ, zero, pitch, zero);
+ assert_approx_eq!(x0, x1);
+
+ let z0 = $mat4::from_rotation_z(roll);
+ let z1 = $mat4::from_euler(EulerRot::YXZ, zero, zero, roll);
+ assert_approx_eq!(z0, z1);
+
+ let yx0 = y0 * x0;
+ let yx1 = $mat4::from_euler(EulerRot::YXZ, yaw, pitch, zero);
+ assert_approx_eq!(yx0, yx1);
+
+ let yxz0 = y0 * x0 * z0;
+ let yxz1 = $mat4::from_euler(EulerRot::YXZ, yaw, pitch, roll);
+ assert_approx_eq!(yxz0, yxz1, 1e-6);
+ });
+
+ glam_test!(test_from_scale, {
+ let m = $mat4::from_scale($vec3::new(2.0, 4.0, 8.0));
+ assert_approx_eq!($vec4::X * 2.0, m.x_axis);
+ assert_approx_eq!($vec4::Y * 4.0, m.y_axis);
+ assert_approx_eq!($vec4::Z * 8.0, m.z_axis);
+ assert_approx_eq!($vec4::W, m.w_axis);
+ assert_approx_eq!(
+ m.transform_point3($vec3::new(1.0, 1.0, 1.0)),
+ $vec3::new(2.0, 4.0, 8.0)
+ );
+
+ should_glam_assert!({ $mat4::from_scale($vec3::ZERO) });
+ });
+
+ glam_test!(test_mat4_transpose, {
+ let m = $newmat4(
+ $newvec4(1.0, 2.0, 3.0, 4.0),
+ $newvec4(5.0, 6.0, 7.0, 8.0),
+ $newvec4(9.0, 10.0, 11.0, 12.0),
+ $newvec4(13.0, 14.0, 15.0, 16.0),
+ );
+ let mt = m.transpose();
+ assert_eq!($newvec4(1.0, 5.0, 9.0, 13.0), mt.x_axis);
+ assert_eq!($newvec4(2.0, 6.0, 10.0, 14.0), mt.y_axis);
+ assert_eq!($newvec4(3.0, 7.0, 11.0, 15.0), mt.z_axis);
+ assert_eq!($newvec4(4.0, 8.0, 12.0, 16.0), mt.w_axis);
+ });
+
+ glam_test!(test_mat4_det, {
+ assert_eq!(0.0, $mat4::ZERO.determinant());
+ assert_eq!(1.0, $mat4::IDENTITY.determinant());
+ assert_eq!(1.0, $mat4::from_rotation_x(deg(90.0)).determinant());
+ assert_eq!(1.0, $mat4::from_rotation_y(deg(180.0)).determinant());
+ assert_eq!(1.0, $mat4::from_rotation_z(deg(270.0)).determinant());
+ assert_eq!(
+ 2.0 * 2.0 * 2.0,
+ $mat4::from_scale($newvec3(2.0, 2.0, 2.0)).determinant()
+ );
+ assert_eq!(
+ 1.0,
+ $newmat4(
+ $newvec4(0.0, 0.0, 0.0, 1.0),
+ $newvec4(1.0, 0.0, 0.0, 0.0),
+ $newvec4(0.0, 0.0, 1.0, 0.0),
+ $newvec4(0.0, 1.0, 0.0, 0.0),
+ )
+ .determinant()
+ );
+ });
+
+ glam_test!(test_mat4_inverse, {
+ // assert_eq!(None, $mat4::ZERO.inverse());
+ let inv = $mat4::IDENTITY.inverse();
+ // assert_ne!(None, inv);
+ assert_approx_eq!($mat4::IDENTITY, inv);
+
+ let rotz = $mat4::from_rotation_z(deg(90.0));
+ let rotz_inv = rotz.inverse();
+ // assert_ne!(None, rotz_inv);
+ // let rotz_inv = rotz_inv.unwrap();
+ assert_approx_eq!($mat4::IDENTITY, rotz * rotz_inv);
+ assert_approx_eq!($mat4::IDENTITY, rotz_inv * rotz);
+
+ let trans = $mat4::from_translation($newvec3(1.0, 2.0, 3.0));
+ let trans_inv = trans.inverse();
+ // assert_ne!(None, trans_inv);
+ // let trans_inv = trans_inv.unwrap();
+ assert_approx_eq!($mat4::IDENTITY, trans * trans_inv);
+ assert_approx_eq!($mat4::IDENTITY, trans_inv * trans);
+
+ let scale = $mat4::from_scale($newvec3(4.0, 5.0, 6.0));
+ let scale_inv = scale.inverse();
+ // assert_ne!(None, scale_inv);
+ // let scale_inv = scale_inv.unwrap();
+ assert_approx_eq!($mat4::IDENTITY, scale * scale_inv);
+ assert_approx_eq!($mat4::IDENTITY, scale_inv * scale);
+
+ let m = scale * rotz * trans;
+ let m_inv = m.inverse();
+ // assert_ne!(None, m_inv);
+ // let m_inv = m_inv.unwrap();
+ assert_approx_eq!($mat4::IDENTITY, m * m_inv, 1.0e-5);
+ assert_approx_eq!($mat4::IDENTITY, m_inv * m, 1.0e-5);
+ assert_approx_eq!(m_inv, trans_inv * rotz_inv * scale_inv, 1.0e-6);
+
+ // Make sure we can invert a "random" matrix:
+ let m = $mat4::from_cols(
+ $newvec4(1.0, -0.3, 1.0, 1.0),
+ $newvec4(0.5, 0.6, 0.7, 0.8),
+ $newvec4(-0.9, -0.3, 0.0, 12.0),
+ $newvec4(0.13, 0.14, 0.15, 0.16),
+ );
+ let m_inv = m.inverse();
+ assert_approx_eq!($mat4::IDENTITY, m * m_inv, 1.0e-5);
+ assert_approx_eq!($mat4::IDENTITY, m_inv * m, 1.0e-5);
+
+ should_glam_assert!({ $mat4::ZERO.inverse() });
+ });
+
+ glam_test!(test_mat4_decompose, {
+ // identity
+ let (out_scale, out_rotation, out_translation) =
+ $mat4::IDENTITY.to_scale_rotation_translation();
+ assert_approx_eq!($vec3::ONE, out_scale);
+ assert!(out_rotation.is_near_identity());
+ assert_approx_eq!($vec3::ZERO, out_translation);
+
+ // no scale
+ let in_scale = $vec3::ONE;
+ let in_translation = $vec3::new(-2.0, 4.0, -0.125);
+ let in_rotation = $quat::from_euler(
+ glam::EulerRot::YXZ,
+ $t::to_radians(-45.0),
+ $t::to_radians(180.0),
+ $t::to_radians(270.0),
+ );
+ let in_mat =
+ $mat4::from_scale_rotation_translation(in_scale, in_rotation, in_translation);
+ let (out_scale, out_rotation, out_translation) = in_mat.to_scale_rotation_translation();
+ assert_approx_eq!(in_scale, out_scale, 1e-6);
+ // out_rotation is different but produces the same matrix
+ // assert_approx_eq!(in_rotation, out_rotation);
+ assert_approx_eq!(in_translation, out_translation);
+ assert_approx_eq!(
+ in_mat,
+ $mat4::from_scale_rotation_translation(out_scale, out_rotation, out_translation),
+ 1e-6
+ );
+
+ // positive scale
+ let in_scale = $vec3::new(1.0, 2.0, 4.0);
+ let in_mat =
+ $mat4::from_scale_rotation_translation(in_scale, in_rotation, in_translation);
+ let (out_scale, out_rotation, out_translation) = in_mat.to_scale_rotation_translation();
+ assert_approx_eq!(in_scale, out_scale, 1e-6);
+ // out_rotation is different but produces the same matrix
+ // assert_approx_eq!(in_rotation, out_rotation);
+ assert_approx_eq!(in_translation, out_translation);
+ assert_approx_eq!(
+ in_mat,
+ $mat4::from_scale_rotation_translation(out_scale, out_rotation, out_translation),
+ 1e-6
+ );
+
+ // negative scale
+ let in_scale = $vec3::new(-4.0, 1.0, 2.0);
+ let in_mat =
+ $mat4::from_scale_rotation_translation(in_scale, in_rotation, in_translation);
+ let (out_scale, out_rotation, out_translation) = in_mat.to_scale_rotation_translation();
+ assert_approx_eq!(in_scale, out_scale, 1e-6);
+ // out_rotation is different but produces the same matrix
+ // assert_approx_eq!(in_rotation, out_rotation);
+ assert_approx_eq!(in_translation, out_translation);
+ assert_approx_eq!(
+ in_mat,
+ $mat4::from_scale_rotation_translation(out_scale, out_rotation, out_translation),
+ 1e-5
+ );
+
+ // negative scale
+ let in_scale = $vec3::new(4.0, -1.0, -2.0);
+ let in_mat =
+ $mat4::from_scale_rotation_translation(in_scale, in_rotation, in_translation);
+ let (out_scale, out_rotation, out_translation) = in_mat.to_scale_rotation_translation();
+ // out_scale and out_rotation are different but they produce the same matrix
+ // assert_approx_eq!(in_scale, out_scale, 1e-6);
+ // assert_approx_eq!(in_rotation, out_rotation);
+ assert_approx_eq!(in_translation, out_translation);
+ assert_approx_eq!(
+ in_mat,
+ $mat4::from_scale_rotation_translation(out_scale, out_rotation, out_translation),
+ 1e-6
+ );
+
+ should_glam_assert!({
+ $mat4::from_scale_rotation_translation(
+ $vec3::ONE,
+ $quat::from_xyzw(0.0, 0.0, 0.0, 0.0),
+ $vec3::ZERO,
+ )
+ });
+ should_glam_assert!({
+ $mat4::from_rotation_translation($quat::from_xyzw(0.0, 0.0, 0.0, 0.0), $vec3::ZERO)
+ });
+ // TODO: should check scale
+ // should_glam_assert!({ $mat4::from_scale_rotation_translation($vec3::ZERO, $quat::IDENTITY, $vec3::ZERO) });
+ should_glam_assert!({ $mat4::ZERO.to_scale_rotation_translation() });
+ });
+
+ glam_test!(test_mat4_look_at, {
+ let eye = $vec3::new(0.0, 0.0, -5.0);
+ let center = $vec3::new(0.0, 0.0, 0.0);
+ let up = $vec3::new(1.0, 0.0, 0.0);
+ let lh = $mat4::look_at_lh(eye, center, up);
+ let rh = $mat4::look_at_rh(eye, center, up);
+ let point = $vec3::new(1.0, 0.0, 0.0);
+ assert_approx_eq!(lh.transform_point3(point), $vec3::new(0.0, 1.0, 5.0));
+ assert_approx_eq!(rh.transform_point3(point), $vec3::new(0.0, 1.0, -5.0));
+
+ should_glam_assert!({ $mat4::look_at_lh($vec3::ONE, $vec3::ZERO, $vec3::ZERO) });
+ should_glam_assert!({ $mat4::look_at_rh($vec3::ONE, $vec3::ZERO, $vec3::ZERO) });
+ });
+
+ glam_test!(test_mat4_perspective_gl_rh, {
+ let projection = $mat4::perspective_rh_gl($t::to_radians(90.0), 2.0, 5.0, 15.0);
+
+ let original = $vec3::new(5.0, 5.0, -15.0);
+ let projected = projection * original.extend(1.0);
+ assert_approx_eq!($vec4::new(2.5, 5.0, 15.0, 15.0), projected);
+
+ let original = $vec3::new(5.0, 5.0, -5.0);
+ let projected = projection * original.extend(1.0);
+ assert_approx_eq!($vec4::new(2.5, 5.0, -5.0, 5.0), projected);
+ });
+
+ glam_test!(test_mat4_perspective_lh, {
+ let projection = $mat4::perspective_lh($t::to_radians(90.0), 2.0, 5.0, 15.0);
+
+ let original = $vec3::new(5.0, 5.0, 15.0);
+ let projected = projection * original.extend(1.0);
+ assert_approx_eq!($vec4::new(2.5, 5.0, 15.0, 15.0), projected);
+
+ let original = $vec3::new(5.0, 5.0, 5.0);
+ let projected = projection * original.extend(1.0);
+ assert_approx_eq!($vec4::new(2.5, 5.0, 0.0, 5.0), projected);
+
+ should_glam_assert!({ $mat4::perspective_lh(0.0, 1.0, 1.0, 0.0) });
+ should_glam_assert!({ $mat4::perspective_lh(0.0, 1.0, 0.0, 1.0) });
+ });
+
+ glam_test!(test_mat4_perspective_infinite_lh, {
+ let projection = $mat4::perspective_infinite_lh($t::to_radians(90.0), 2.0, 5.0);
+
+ let original = $vec3::new(5.0, 5.0, 15.0);
+ let projected = projection * original.extend(1.0);
+ assert_approx_eq!($vec4::new(2.5, 5.0, 10.0, 15.0), projected);
+
+ let original = $vec3::new(5.0, 5.0, 5.0);
+ let projected = projection * original.extend(1.0);
+ assert_approx_eq!($vec4::new(2.5, 5.0, 0.0, 5.0), projected);
+
+ should_glam_assert!({ $mat4::perspective_infinite_lh(0.0, 1.0, 0.0) });
+ });
+
+ glam_test!(test_mat4_perspective_infinite_reverse_lh, {
+ let projection = $mat4::perspective_infinite_reverse_lh($t::to_radians(90.0), 2.0, 5.0);
+
+ let original = $vec3::new(5.0, 5.0, 15.0);
+ let projected = projection * original.extend(1.0);
+ assert_approx_eq!($vec4::new(2.5, 5.0, 5.0, 15.0), projected);
+
+ let original = $vec3::new(5.0, 5.0, 5.0);
+ let projected = projection * original.extend(1.0);
+ assert_approx_eq!($vec4::new(2.5, 5.0, 5.0, 5.0), projected);
+
+ should_glam_assert!({ $mat4::perspective_infinite_reverse_lh(0.0, 1.0, 0.0) });
+ });
+
+ glam_test!(test_mat4_perspective_rh, {
+ let projection = $mat4::perspective_rh($t::to_radians(90.0), 2.0, 5.0, 15.0);
+
+ let original = $vec3::new(5.0, 5.0, 15.0);
+ let projected = projection * original.extend(1.0);
+ assert_approx_eq!($vec4::new(2.5, 5.0, -30.0, -15.0), projected);
+
+ let original = $vec3::new(5.0, 5.0, 5.0);
+ let projected = projection * original.extend(1.0);
+ assert_approx_eq!($vec4::new(2.5, 5.0, -15.0, -5.0), projected);
+
+ should_glam_assert!({ $mat4::perspective_rh(0.0, 1.0, 1.0, 0.0) });
+ should_glam_assert!({ $mat4::perspective_rh(0.0, 1.0, 0.0, 1.0) });
+ });
+
+ glam_test!(test_mat4_perspective_infinite_rh, {
+ let projection = $mat4::perspective_infinite_rh($t::to_radians(90.0), 2.0, 5.0);
+
+ let original = $vec3::new(5.0, 5.0, 15.0);
+ let projected = projection * original.extend(1.0);
+ assert_approx_eq!($vec4::new(2.5, 5.0, -20.0, -15.0), projected);
+
+ let original = $vec3::new(5.0, 5.0, 5.0);
+ let projected = projection * original.extend(1.0);
+ assert_approx_eq!($vec4::new(2.5, 5.0, -10.0, -5.0), projected);
+
+ should_glam_assert!({ $mat4::perspective_infinite_rh(0.0, 1.0, 0.0) });
+ });
+
+ glam_test!(test_mat4_perspective_infinite_reverse_rh, {
+ let projection = $mat4::perspective_infinite_reverse_rh($t::to_radians(90.0), 2.0, 5.0);
+
+ let original = $vec3::new(5.0, 5.0, 15.0);
+ let projected = projection * original.extend(1.0);
+ assert_approx_eq!($vec4::new(2.5, 5.0, 5.0, -15.0), projected);
+
+ let original = $vec3::new(5.0, 5.0, 5.0);
+ let projected = projection * original.extend(1.0);
+ assert_approx_eq!($vec4::new(2.5, 5.0, 5.0, -5.0), projected);
+
+ should_glam_assert!({ $mat4::perspective_infinite_reverse_rh(0.0, 1.0, 0.0) });
+ });
+
+ glam_test!(test_mat4_orthographic_gl_rh, {
+ let projection = $mat4::orthographic_rh_gl(-10.0, 10.0, -5.0, 5.0, 0.0, -10.0);
+ let original = $vec4::new(5.0, 5.0, -5.0, 1.0);
+ let projected = projection.mul_vec4(original);
+ assert_approx_eq!(projected, $vec4::new(0.5, 1.0, -2.0, 1.0));
+ });
+
+ glam_test!(test_mat4_orthographic_rh, {
+ let projection = $mat4::orthographic_rh(-10.0, 10.0, -5.0, 5.0, -10.0, 10.0);
+ let original = $vec4::new(5.0, 5.0, -5.0, 1.0);
+ let projected = projection.mul_vec4(original);
+ assert_approx_eq!(projected, $vec4::new(0.5, 1.0, 0.75, 1.0));
+
+ let original = $vec4::new(5.0, 5.0, 5.0, 1.0);
+ let projected = projection.mul_vec4(original);
+ assert_approx_eq!(projected, $vec4::new(0.5, 1.0, 0.25, 1.0));
+ });
+
+ glam_test!(test_mat4_orthographic_lh, {
+ let projection = $mat4::orthographic_lh(-10.0, 10.0, -5.0, 5.0, -10.0, 10.0);
+ let original = $vec4::new(5.0, 5.0, -5.0, 1.0);
+ let projected = projection.mul_vec4(original);
+ assert_approx_eq!(projected, $vec4::new(0.5, 1.0, 0.25, 1.0));
+
+ let original = $vec4::new(5.0, 5.0, 5.0, 1.0);
+ let projected = projection.mul_vec4(original);
+ assert_approx_eq!(projected, $vec4::new(0.5, 1.0, 0.75, 1.0));
+ });
+
+ glam_test!(test_mat4_ops, {
+ let m0 = $mat4::from_cols_array_2d(&MATRIX);
+ let m0x2 = $mat4::from_cols_array_2d(&[
+ [2.0, 4.0, 6.0, 8.0],
+ [10.0, 12.0, 14.0, 16.0],
+ [18.0, 20.0, 22.0, 24.0],
+ [26.0, 28.0, 30.0, 32.0],
+ ]);
+ let m0_neg = $mat4::from_cols_array_2d(&[
+ [-1.0, -2.0, -3.0, -4.0],
+ [-5.0, -6.0, -7.0, -8.0],
+ [-9.0, -10.0, -11.0, -12.0],
+ [-13.0, -14.0, -15.0, -16.0],
+ ]);
+ assert_eq!(m0x2, m0 * 2.0);
+ assert_eq!(m0x2, 2.0 * m0);
+ assert_eq!(m0x2, m0 + m0);
+ assert_eq!($mat4::ZERO, m0 - m0);
+ assert_eq!(m0_neg, -m0);
+ assert_approx_eq!(m0, m0 * $mat4::IDENTITY);
+ assert_approx_eq!(m0, $mat4::IDENTITY * m0);
+
+ let mut m1 = m0;
+ m1 *= 2.0;
+ assert_eq!(m0x2, m1);
+
+ let mut m1 = m0;
+ m1 += m0;
+ assert_eq!(m0x2, m1);
+
+ let mut m1 = m0;
+ m1 -= m0;
+ assert_eq!($mat4::ZERO, m1);
+
+ let mut m1 = $mat4::IDENTITY;
+ m1 *= m0;
+ assert_approx_eq!(m0, m1);
+ });
+
+ glam_test!(test_mat4_fmt, {
+ let a = $mat4::from_cols_array_2d(&MATRIX);
+ assert_eq!(
+ format!("{}", a),
+ "[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]"
+ );
+ });
+
+ glam_test!(test_mat4_to_from_slice, {
+ const MATRIX1D: [$t; 16] = [
+ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0,
+ 16.0,
+ ];
+ let m = $mat4::from_cols_slice(&MATRIX1D);
+ assert_eq!($mat4::from_cols_array(&MATRIX1D), m);
+ let mut out: [$t; 16] = Default::default();
+ m.write_cols_to_slice(&mut out);
+ assert_eq!(MATRIX1D, out);
+
+ should_panic!({ $mat4::from_cols_slice(&[0.0; 15]) });
+ should_panic!({ $mat4::IDENTITY.write_cols_to_slice(&mut [0.0; 15]) });
+ });
+
+ glam_test!(test_sum, {
+ let id = $mat4::IDENTITY;
+ assert_eq!(vec![id, id].iter().sum::<$mat4>(), id + id);
+ });
+
+ glam_test!(test_product, {
+ let two = $mat4::IDENTITY + $mat4::IDENTITY;
+ assert_eq!(vec![two, two].iter().product::<$mat4>(), two * two);
+ });
+
+ glam_test!(test_mat4_is_finite, {
+ assert!($mat4::IDENTITY.is_finite());
+ assert!(!($mat4::IDENTITY * INFINITY).is_finite());
+ assert!(!($mat4::IDENTITY * NEG_INFINITY).is_finite());
+ assert!(!($mat4::IDENTITY * NAN).is_finite());
+ });
+ };
+}
+
+mod mat4 {
+ use super::support::deg;
+ use glam::{const_mat4, mat4, swizzles::*, vec3, vec4, Mat3, Mat4, Quat, Vec3, Vec4};
+
+ glam_test!(test_align, {
+ use std::mem;
+ assert_eq!(mem::align_of::<Vec4>(), mem::align_of::<Mat4>());
+ assert_eq!(64, mem::size_of::<Mat4>());
+ });
+
+ glam_test!(test_as, {
+ use glam::DMat4;
+ assert_eq!(
+ DMat4::from_cols_array_2d(&[
+ [1.0, 2.0, 3.0, 4.0],
+ [5.0, 6.0, 7.0, 8.0],
+ [9.0, 10.0, 11.0, 12.0],
+ [13.0, 14.0, 15.0, 16.0]
+ ]),
+ Mat4::from_cols_array_2d(&[
+ [1.0, 2.0, 3.0, 4.0],
+ [5.0, 6.0, 7.0, 8.0],
+ [9.0, 10.0, 11.0, 12.0],
+ [13.0, 14.0, 15.0, 16.0]
+ ])
+ .as_dmat4()
+ );
+ assert_eq!(
+ Mat4::from_cols_array_2d(&[
+ [1.0, 2.0, 3.0, 4.0],
+ [5.0, 6.0, 7.0, 8.0],
+ [9.0, 10.0, 11.0, 12.0],
+ [13.0, 14.0, 15.0, 16.0]
+ ]),
+ DMat4::from_cols_array_2d(&[
+ [1.0, 2.0, 3.0, 4.0],
+ [5.0, 6.0, 7.0, 8.0],
+ [9.0, 10.0, 11.0, 12.0],
+ [13.0, 14.0, 15.0, 16.0]
+ ])
+ .as_mat4()
+ );
+ });
+
+ impl_mat4_tests!(f32, const_mat4, mat4, vec4, vec3, Mat4, Mat3, Quat, Vec4, Vec3);
+}
+
+mod dmat4 {
+ use super::support::deg;
+ use glam::{const_dmat4, dmat4, dvec3, dvec4, swizzles::*, DMat3, DMat4, DQuat, DVec3, DVec4};
+
+ glam_test!(test_align, {
+ use std::mem;
+ assert_eq!(mem::align_of::<DVec4>(), mem::align_of::<DMat4>());
+ assert_eq!(128, mem::size_of::<DMat4>());
+ });
+
+ impl_mat4_tests!(
+ f64,
+ const_dmat4,
+ dmat4,
+ dvec4,
+ dvec3,
+ DMat4,
+ DMat3,
+ DQuat,
+ DVec4,
+ DVec3
+ );
+}
diff --git a/tests/quat.rs b/tests/quat.rs
new file mode 100644
index 0000000..831bf0f
--- /dev/null
+++ b/tests/quat.rs
@@ -0,0 +1,584 @@
+#[macro_use]
+mod support;
+
+macro_rules! impl_quat_tests {
+ ($t:ident, $const_new:ident, $new:ident, $mat3:ident, $mat4:ident, $quat:ident, $vec2:ident, $vec3:ident, $vec4:ident) => {
+ use core::$t::INFINITY;
+ use core::$t::NAN;
+ use core::$t::NEG_INFINITY;
+
+ glam_test!(test_const, {
+ const Q: $quat = $const_new!([1.0, 2.0, 3.0, 4.0]);
+ assert_eq!($quat::from_xyzw(1.0, 2.0, 3.0, 4.0), Q);
+ });
+
+ glam_test!(test_nan, {
+ assert!($quat::NAN.is_nan());
+ assert!(!$quat::NAN.is_finite());
+ });
+
+ glam_test!(test_new, {
+ let ytheta = deg(45.0);
+ let q0 = $quat::from_rotation_y(ytheta);
+
+ let v1 = $vec4::new(0.0, (ytheta * 0.5).sin(), 0.0, (ytheta * 0.5).cos());
+ assert_eq!(q0, $quat::from_vec4(v1));
+ let q1 = $quat::from_vec4(v1);
+ assert_eq!(v1, q1.into());
+
+ assert_eq!(q0, $new(v1.x, v1.y, v1.z, v1.w));
+
+ let a1: [$t; 4] = q1.into();
+ assert_eq!([v1.x, v1.y, v1.z, v1.w], a1);
+
+ assert_eq!(q1, $quat::from_array(a1));
+ });
+
+ glam_test!(test_funcs, {
+ let q0 = $quat::from_euler(EulerRot::YXZ, deg(45.0), deg(180.0), deg(90.0));
+ assert!(q0.is_normalized());
+ assert_approx_eq!(q0.length_squared(), 1.0);
+ assert_approx_eq!(q0.length(), 1.0);
+ assert_approx_eq!(q0.length_recip(), 1.0);
+ assert_approx_eq!(q0, q0.normalize());
+
+ assert_approx_eq!(q0.dot(q0), 1.0);
+ assert_approx_eq!(q0.dot(q0), 1.0);
+
+ let q1 = $quat::from_vec4($vec4::from(q0) * 2.0);
+ assert!(!q1.is_normalized());
+ assert_approx_eq!(q1.length_squared(), 4.0, 1.0e-6);
+ assert_approx_eq!(q1.length(), 2.0);
+ assert_approx_eq!(q1.length_recip(), 0.5);
+ assert_approx_eq!(q0, q1.normalize());
+ assert_approx_eq!(q0.dot(q1), 2.0, 1.0e-6);
+
+ should_glam_assert!({ ($quat::IDENTITY * 0.0).normalize() });
+ });
+
+ glam_test!(test_rotation, {
+ let zero = deg(0.0);
+ let yaw = deg(30.0);
+ let pitch = deg(60.0);
+ let roll = deg(90.0);
+ let y0 = $quat::from_rotation_y(yaw);
+ assert!(y0.is_normalized());
+ let (axis, angle) = y0.to_axis_angle();
+ assert_approx_eq!(axis, $vec3::Y, 1.0e-6);
+ assert_approx_eq!(angle, yaw);
+ let y1 = $quat::from_euler(EulerRot::YXZ, yaw, zero, zero);
+ assert_approx_eq!(y0, y1);
+ let y2 = $quat::from_axis_angle($vec3::Y, yaw);
+ assert_approx_eq!(y0, y2);
+ let y3 = $quat::from_mat3(&$mat3::from_rotation_y(yaw));
+ assert_approx_eq!(y0, y3);
+ let y4 = $quat::from_mat3(&$mat3::from_quat(y0));
+ assert_approx_eq!(y0, y4);
+
+ let x0 = $quat::from_rotation_x(pitch);
+ assert!(x0.is_normalized());
+ let (axis, angle) = x0.to_axis_angle();
+ assert_approx_eq!(axis, $vec3::X);
+ assert_approx_eq!(angle, pitch);
+ let x1 = $quat::from_euler(EulerRot::YXZ, zero, pitch, zero);
+ assert_approx_eq!(x0, x1);
+ let x2 = $quat::from_axis_angle($vec3::X, pitch);
+ assert_approx_eq!(x0, x2);
+ let x3 = $quat::from_mat4(&$mat4::from_rotation_x(deg(180.0)));
+ assert!(x3.is_normalized());
+ assert_approx_eq!($quat::from_rotation_x(deg(180.0)), x3);
+
+ let z0 = $quat::from_rotation_z(roll);
+ assert!(z0.is_normalized());
+ let (axis, angle) = z0.to_axis_angle();
+ assert_approx_eq!(axis, $vec3::Z);
+ assert_approx_eq!(angle, roll);
+ let z1 = $quat::from_euler(EulerRot::YXZ, zero, zero, roll);
+ assert_approx_eq!(z0, z1);
+ let z2 = $quat::from_axis_angle($vec3::Z, roll);
+ assert_approx_eq!(z0, z2);
+ let z3 = $quat::from_mat4(&$mat4::from_rotation_z(roll));
+ assert_approx_eq!(z0, z3);
+
+ let yx0 = y0 * x0;
+ assert!(yx0.is_normalized());
+ let yx1 = $quat::from_euler(EulerRot::YXZ, yaw, pitch, zero);
+ assert_approx_eq!(yx0, yx1);
+
+ let yxz0 = y0 * x0 * z0;
+ assert!(yxz0.is_normalized());
+ let yxz1 = $quat::from_euler(EulerRot::YXZ, yaw, pitch, roll);
+ assert_approx_eq!(yxz0, yxz1);
+
+ // use the conjugate of z0 to remove the rotation from yxz0
+ let yx2 = yxz0 * z0.conjugate();
+ assert_approx_eq!(yx0, yx2);
+ assert!((yxz0 * yxz0.conjugate()).is_near_identity());
+
+ // test inverse does the same
+ let yx2 = yxz0 * z0.inverse();
+ assert_approx_eq!(yx0, yx2);
+ assert!((yxz0 * yxz0.inverse()).is_near_identity());
+
+ let yxz2 = $quat::from_mat4(&$mat4::from_quat(yxz0));
+ assert_approx_eq!(yxz0, yxz2);
+
+ // if near identity, just returns x axis and 0 rotation
+ let (axis, angle) = $quat::IDENTITY.to_axis_angle();
+ assert_eq!(axis, $vec3::X);
+ assert_eq!(angle, rad(0.0));
+
+ should_glam_assert!({ ($quat::IDENTITY * 2.0).inverse() });
+ should_glam_assert!({ $quat::from_axis_angle($vec3::ZERO, 0.0) });
+ });
+
+ glam_test!(test_from_scaled_axis, {
+ assert_eq!($quat::from_scaled_axis($vec3::ZERO), $quat::IDENTITY);
+ assert_eq!(
+ $quat::from_scaled_axis($vec3::Y * 1e-10),
+ $quat::from_axis_angle($vec3::Y, 1e-10)
+ );
+ assert_eq!(
+ $quat::from_scaled_axis($vec3::X * 1.0),
+ $quat::from_axis_angle($vec3::X, 1.0)
+ );
+ assert_eq!(
+ $quat::from_scaled_axis($vec3::Z * 2.0),
+ $quat::from_axis_angle($vec3::Z, 2.0)
+ );
+
+ assert_eq!(
+ $quat::from_scaled_axis($vec3::ZERO).to_scaled_axis(),
+ $vec3::ZERO
+ );
+ for &v in &vec3_float_test_vectors!($vec3) {
+ if v.length() < core::$t::consts::PI {
+ assert!(($quat::from_scaled_axis(v).to_scaled_axis() - v).length() < 1e-6,);
+ }
+ }
+ });
+
+ glam_test!(test_mul_vec3, {
+ let qrz = $quat::from_rotation_z(deg(90.0));
+ assert_approx_eq!($vec3::Y, qrz * $vec3::X);
+ assert_approx_eq!($vec3::Y, qrz.mul_vec3($vec3::X));
+ assert_approx_eq!($vec3::Y, -qrz * $vec3::X);
+ assert_approx_eq!($vec3::Y, qrz.neg().mul_vec3($vec3::X));
+ assert_approx_eq!(-$vec3::X, qrz * $vec3::Y);
+ assert_approx_eq!(-$vec3::X, qrz.mul_vec3($vec3::Y));
+ assert_approx_eq!(-$vec3::X, -qrz * $vec3::Y);
+ assert_approx_eq!(-$vec3::X, qrz.neg().mul_vec3($vec3::Y));
+
+ // check vec3 * mat3 is the same
+ let mrz = $mat3::from_quat(qrz);
+ assert_approx_eq!($vec3::Y, mrz * $vec3::X);
+ assert_approx_eq!($vec3::Y, mrz.mul_vec3($vec3::X));
+ // assert_approx_eq!($vec3::Y, -mrz * $vec3::X);
+ assert_approx_eq!(-$vec3::X, mrz * $vec3::Y);
+ assert_approx_eq!(-$vec3::X, mrz.mul_vec3($vec3::Y));
+
+ let qrx = $quat::from_rotation_x(deg(90.0));
+ assert_approx_eq!($vec3::X, qrx * $vec3::X);
+ assert_approx_eq!($vec3::X, qrx.mul_vec3($vec3::X));
+ assert_approx_eq!($vec3::X, -qrx * $vec3::X);
+ assert_approx_eq!($vec3::X, qrx.neg().mul_vec3($vec3::X));
+ assert_approx_eq!($vec3::Z, qrx * $vec3::Y);
+ assert_approx_eq!($vec3::Z, qrx.mul_vec3($vec3::Y));
+ assert_approx_eq!($vec3::Z, -qrx * $vec3::Y);
+ assert_approx_eq!($vec3::Z, qrx.neg().mul_vec3($vec3::Y));
+
+ // check vec3 * mat3 is the same
+ let mrx = $mat3::from_quat(qrx);
+ assert_approx_eq!($vec3::X, mrx * $vec3::X);
+ assert_approx_eq!($vec3::X, mrx.mul_vec3($vec3::X));
+ assert_approx_eq!($vec3::Z, mrx * $vec3::Y);
+ assert_approx_eq!($vec3::Z, mrx.mul_vec3($vec3::Y));
+
+ let qrxz = qrz * qrx;
+ assert_approx_eq!($vec3::Y, qrxz * $vec3::X);
+ assert_approx_eq!($vec3::Y, qrxz.mul_vec3($vec3::X));
+ assert_approx_eq!($vec3::Z, qrxz * $vec3::Y);
+ assert_approx_eq!($vec3::Z, qrxz.mul_vec3($vec3::Y));
+
+ let mrxz = mrz * mrx;
+ assert_approx_eq!($vec3::Y, mrxz * $vec3::X);
+ assert_approx_eq!($vec3::Y, mrxz.mul_vec3($vec3::X));
+ assert_approx_eq!($vec3::Z, mrxz * $vec3::Y);
+ assert_approx_eq!($vec3::Z, mrxz.mul_vec3($vec3::Y));
+
+ let qrzx = qrx * qrz;
+ assert_approx_eq!($vec3::Z, qrzx * $vec3::X);
+ assert_approx_eq!($vec3::Z, qrzx.mul_vec3($vec3::X));
+ assert_approx_eq!(-$vec3::X, qrzx * $vec3::Y);
+ assert_approx_eq!(-$vec3::X, qrzx.mul_vec3($vec3::Y));
+
+ let mrzx = qrx * qrz;
+ assert_approx_eq!($vec3::Z, mrzx * $vec3::X);
+ assert_approx_eq!($vec3::Z, mrzx.mul_vec3($vec3::X));
+ assert_approx_eq!(-$vec3::X, mrzx * $vec3::Y);
+ assert_approx_eq!(-$vec3::X, mrzx.mul_vec3($vec3::Y));
+
+ should_glam_assert!({ ($quat::IDENTITY * 0.5).mul_vec3($vec3::X) });
+ should_glam_assert!({ ($quat::IDENTITY * 0.5) * $vec3::X });
+ should_glam_assert!({ ($quat::IDENTITY * 0.5).mul_quat($quat::IDENTITY) });
+ should_glam_assert!({ ($quat::IDENTITY * 0.5) * $quat::IDENTITY });
+ });
+
+ glam_test!(test_angle_between, {
+ const TAU: $t = 2.0 * core::$t::consts::PI;
+ let eps = 10.0 * core::$t::EPSILON as f32;
+ let q1 = $quat::from_euler(EulerRot::YXZ, 0.0, 0.0, 0.0);
+ let q2 = $quat::from_euler(EulerRot::YXZ, TAU * 0.25, 0.0, 0.0);
+ let q3 = $quat::from_euler(EulerRot::YXZ, TAU * 0.5, 0.0, 0.0);
+ let q4 = $quat::from_euler(EulerRot::YXZ, 0.0, 0.0, TAU * 0.25);
+ let q5 = $quat::from_axis_angle($vec3::new(1.0, 2.0, 3.0).normalize(), TAU * 0.3718);
+ let q6 = $quat::from_axis_angle($vec3::new(-1.0, 5.0, 3.0).normalize(), TAU * 0.94);
+ assert_approx_eq!(q1.angle_between(q2), TAU * 0.25, eps);
+ assert_approx_eq!(q1.angle_between(q3), TAU * 0.5, eps);
+ assert_approx_eq!(q3.angle_between(q3), 0.0, eps);
+ assert_approx_eq!(q3.angle_between(-q3), 0.0, eps);
+ assert_approx_eq!((q4 * q2 * q2).angle_between(q4 * q2), TAU * 0.25, eps);
+ assert_approx_eq!(q1.angle_between(q5), TAU * 0.3718, eps);
+ assert_approx_eq!(
+ (q5 * q2 * q1).angle_between(q5 * q2 * q5),
+ TAU * 0.3718,
+ eps
+ );
+ assert_approx_eq!((q3 * q3).angle_between(q1), 0.0, eps);
+ assert_approx_eq!((q3 * q3 * q3).angle_between(q3), 0.0, eps);
+ assert_approx_eq!((q3 * q3 * q3 * q3).angle_between(q1), 0.0, eps);
+ assert_approx_eq!(q1.angle_between(q6), TAU - TAU * 0.94, eps);
+ assert_approx_eq!((q5 * q1).angle_between(q5 * q6), TAU - TAU * 0.94, eps);
+ assert_approx_eq!((q1 * q5).angle_between(q6 * q5), TAU - TAU * 0.94, eps);
+ });
+
+ glam_test!(test_lerp, {
+ let q0 = $quat::from_rotation_y(deg(0.0));
+ let q1 = $quat::from_rotation_y(deg(90.0));
+ assert_approx_eq!(q0, q0.lerp(q1, 0.0));
+ assert_approx_eq!(q1, q0.lerp(q1, 1.0));
+ assert_approx_eq!($quat::from_rotation_y(deg(45.0)), q0.lerp(q1, 0.5));
+
+ should_glam_assert!({ $quat::lerp($quat::IDENTITY * 2.0, $quat::IDENTITY, 1.0) });
+ should_glam_assert!({ $quat::lerp($quat::IDENTITY, $quat::IDENTITY * 0.5, 1.0) });
+ });
+
+ glam_test!(test_slerp, {
+ let q0 = $quat::from_rotation_y(deg(0.0));
+ let q1 = $quat::from_rotation_y(deg(90.0));
+ assert_approx_eq!(q0, q0.slerp(q1, 0.0), 1.0e-3);
+ assert_approx_eq!(q1, q0.slerp(q1, 1.0), 1.0e-3);
+ assert_approx_eq!($quat::from_rotation_y(deg(45.0)), q0.slerp(q1, 0.5), 1.0e-3);
+
+ should_glam_assert!({ $quat::lerp($quat::IDENTITY * 2.0, $quat::IDENTITY, 1.0) });
+ should_glam_assert!({ $quat::lerp($quat::IDENTITY, $quat::IDENTITY * 0.5, 1.0) });
+ });
+
+ glam_test!(test_slerp_constant_speed, {
+ let step = 0.01;
+ let mut s = 0.0;
+ while s <= 1.0 {
+ let q0 = $quat::from_rotation_y(deg(0.0));
+ let q1 = $quat::from_rotation_y(deg(90.0));
+ assert_approx_eq!(
+ $quat::from_rotation_y(deg(s * 90.0)),
+ q0.slerp(q1, s),
+ 1.0e-3
+ );
+ s += step;
+ }
+ });
+
+ glam_test!(test_fmt, {
+ let a = $quat::IDENTITY;
+ assert_eq!(
+ format!("{:?}", a),
+ format!("{}(0.0, 0.0, 0.0, 1.0)", stringify!($quat))
+ );
+ // assert_eq!(
+ // format!("{:#?}", a),
+ // "$quat(\n 1.0,\n 2.0,\n 3.0,\n 4.0\n)"
+ // );
+ assert_eq!(format!("{}", a), "[0, 0, 0, 1]");
+ });
+
+ glam_test!(test_identity, {
+ let identity = $quat::IDENTITY;
+ assert!(identity.is_near_identity());
+ assert!(identity.is_normalized());
+ assert_eq!(identity, $quat::from_xyzw(0.0, 0.0, 0.0, 1.0));
+ assert_eq!(identity, identity * identity);
+ let q = $quat::from_euler(EulerRot::YXZ, deg(10.0), deg(-10.0), deg(45.0));
+ assert_eq!(q, q * identity);
+ assert_eq!(q, identity * q);
+ assert_eq!(identity, $quat::default());
+ });
+
+ glam_test!(test_slice, {
+ let a: [$t; 4] =
+ $quat::from_euler(EulerRot::YXZ, deg(30.0), deg(60.0), deg(90.0)).into();
+ let b = $quat::from_slice(&a);
+ let c: [$t; 4] = b.into();
+ assert_eq!(a, c);
+ let mut d = [0.0, 0.0, 0.0, 0.0];
+ b.write_to_slice(&mut d[..]);
+ assert_eq!(a, d);
+
+ should_panic!({ $quat::IDENTITY.write_to_slice(&mut [0 as $t; 3]) });
+ should_panic!({ $quat::from_slice(&[0 as $t; 3]) });
+ });
+
+ glam_test!(test_elements, {
+ let x = 1.0;
+ let y = 2.0;
+ let z = 3.0;
+ let w = 4.0;
+
+ let a = $quat::from_xyzw(x, y, z, w);
+ assert!(a.x == x);
+ assert!(a.y == y);
+ assert!(a.z == z);
+ assert!(a.w == w);
+
+ assert_eq!($vec3::new(1.0, 2.0, 3.0), a.xyz());
+ });
+
+ glam_test!(test_addition, {
+ let a = $quat::from_xyzw(1.0, 2.0, 3.0, 4.0);
+ let b = $quat::from_xyzw(5.0, 6.0, 7.0, -9.0);
+ assert_eq!(a + b, $quat::from_xyzw(6.0, 8.0, 10.0, -5.0));
+ });
+
+ glam_test!(test_subtraction, {
+ let a = $quat::from_xyzw(6.0, 8.0, 10.0, -5.0);
+ let b = $quat::from_xyzw(5.0, 6.0, 7.0, -9.0);
+ assert_eq!(a - b, $quat::from_xyzw(1.0, 2.0, 3.0, 4.0));
+ });
+
+ glam_test!(test_scalar_multiplication, {
+ let a = $quat::from_xyzw(1.0, 2.0, 3.0, 4.0);
+ assert_eq!(a * 2.0, $quat::from_xyzw(2.0, 4.0, 6.0, 8.0));
+ });
+
+ glam_test!(test_scalar_division, {
+ let a = $quat::from_xyzw(2.0, 4.0, 6.0, 8.0);
+ assert_eq!(a / 2.0, $quat::from_xyzw(1.0, 2.0, 3.0, 4.0));
+ });
+
+ glam_test!(test_sum, {
+ let two = $new(2.0, 2.0, 2.0, 2.0);
+ assert_eq!(vec![two, two].iter().sum::<$quat>(), two + two);
+ });
+
+ glam_test!(test_product, {
+ let two = $new(2.0, 2.0, 2.0, 2.0).normalize();
+ assert_eq!(vec![two, two].iter().product::<$quat>(), two * two);
+ });
+
+ glam_test!(test_is_finite, {
+ assert!($quat::from_xyzw(0.0, 0.0, 0.0, 0.0).is_finite());
+ assert!($quat::from_xyzw(-1e-10, 1.0, 1e10, 42.0).is_finite());
+ assert!(!$quat::from_xyzw(INFINITY, 0.0, 0.0, 0.0).is_finite());
+ assert!(!$quat::from_xyzw(0.0, NAN, 0.0, 0.0).is_finite());
+ assert!(!$quat::from_xyzw(0.0, 0.0, NEG_INFINITY, 0.0).is_finite());
+ assert!(!$quat::from_xyzw(0.0, 0.0, 0.0, NAN).is_finite());
+ });
+
+ glam_test!(test_rotation_arc, {
+ let eps = 2.0 * core::$t::EPSILON.sqrt();
+
+ for &from in &vec3_float_test_vectors!($vec3) {
+ let from = from.normalize();
+
+ {
+ let q = $quat::from_rotation_arc(from, from);
+ assert!(q.is_near_identity(), "from: {}, q: {}", from, q);
+ }
+
+ {
+ let q = $quat::from_rotation_arc_colinear(from, from);
+ assert!(q.is_near_identity(), "from: {}, q: {}", from, q);
+ }
+
+ {
+ let to = -from;
+ let q = $quat::from_rotation_arc(from, to);
+ assert!(q.is_normalized());
+ assert!((q * from - to).length() < eps);
+ }
+
+ {
+ let to = -from;
+ let q = $quat::from_rotation_arc_colinear(from, to);
+ assert!(q.is_near_identity(), "from: {}, q: {}", from, q);
+ }
+
+ for &to in &vec3_float_test_vectors!($vec3) {
+ let to = to.normalize();
+
+ let q = $quat::from_rotation_arc(from, to);
+ assert!(q.is_normalized());
+ assert!((q * from - to).length() < eps);
+
+ let q = $quat::from_rotation_arc_colinear(from, to);
+ assert!(q.is_normalized());
+ let transformed = q * from;
+ assert!(
+ (transformed - to).length() < eps || (-transformed - to).length() < eps
+ );
+ }
+ }
+
+ for &from in &vec2_float_test_vectors!($vec2) {
+ let from = from.normalize();
+
+ {
+ let q = $quat::from_rotation_arc_2d(from, from);
+ assert!(q.is_near_identity(), "from: {}, q: {}", from, q);
+ }
+
+ {
+ let to = -from;
+ let q = $quat::from_rotation_arc_2d(from, to);
+ assert!(q.is_normalized());
+ assert!((q * from.extend(0.0) - to.extend(0.0)).length() < eps);
+ }
+
+ for &to in &vec2_float_test_vectors!($vec2) {
+ let to = to.normalize();
+
+ let q = $quat::from_rotation_arc_2d(from, to);
+ assert!(q.is_normalized());
+ assert!((q * from.extend(0.0) - to.extend(0.0)).length() < eps);
+ }
+ }
+
+ should_glam_assert!({ $quat::from_rotation_arc($vec3::ZERO, $vec3::X) });
+ should_glam_assert!({ $quat::from_rotation_arc($vec3::X, $vec3::ZERO) });
+ should_glam_assert!({ $quat::from_rotation_arc_colinear($vec3::ZERO, $vec3::X) });
+ should_glam_assert!({ $quat::from_rotation_arc_colinear($vec3::X, $vec3::ZERO) });
+
+ should_glam_assert!({ $quat::from_rotation_arc_2d($vec2::ZERO, $vec2::X) });
+ should_glam_assert!({ $quat::from_rotation_arc_2d($vec2::X, $vec2::ZERO) });
+ });
+
+ glam_test!(test_to_array, {
+ assert!($new(1.0, 2.0, 3.0, 4.0).to_array() == [1.0, 2.0, 3.0, 4.0]);
+ });
+ };
+}
+
+mod quat {
+ use crate::support::{deg, rad};
+ use core::ops::Neg;
+ use glam::{const_quat, quat, EulerRot, Mat3, Mat4, Quat, Vec2, Vec3, Vec3A, Vec4};
+
+ glam_test!(test_align, {
+ use std::mem;
+ assert_eq!(16, mem::size_of::<Quat>());
+ if cfg!(feature = "scalar-math") {
+ assert_eq!(4, mem::align_of::<Quat>());
+ } else {
+ assert_eq!(16, mem::align_of::<Quat>());
+ }
+ });
+
+ glam_test!(test_mul_vec3a, {
+ let qrz = Quat::from_rotation_z(deg(90.0));
+ assert_approx_eq!(Vec3A::Y, qrz * Vec3A::X);
+ assert_approx_eq!(Vec3A::Y, qrz.mul_vec3a(Vec3A::X));
+ assert_approx_eq!(Vec3A::Y, -qrz * Vec3A::X);
+ assert_approx_eq!(Vec3A::Y, qrz.neg().mul_vec3a(Vec3A::X));
+ assert_approx_eq!(-Vec3A::X, qrz * Vec3A::Y);
+ assert_approx_eq!(-Vec3A::X, qrz.mul_vec3a(Vec3A::Y));
+ assert_approx_eq!(-Vec3A::X, -qrz * Vec3A::Y);
+ assert_approx_eq!(-Vec3A::X, qrz.neg().mul_vec3a(Vec3A::Y));
+
+ // check vec3 * mat3 is the same
+ let mrz = Mat3::from_quat(qrz);
+ assert_approx_eq!(Vec3A::Y, mrz * Vec3A::X);
+ assert_approx_eq!(Vec3A::Y, mrz.mul_vec3a(Vec3A::X));
+ // assert_approx_eq!(Vec3A::Y, -mrz * Vec3A::X);
+ assert_approx_eq!(-Vec3A::X, mrz * Vec3A::Y);
+ assert_approx_eq!(-Vec3A::X, mrz.mul_vec3a(Vec3A::Y));
+
+ let qrx = Quat::from_rotation_x(deg(90.0));
+ assert_approx_eq!(Vec3A::X, qrx * Vec3A::X);
+ assert_approx_eq!(Vec3A::X, qrx.mul_vec3a(Vec3A::X));
+ assert_approx_eq!(Vec3A::X, -qrx * Vec3A::X);
+ assert_approx_eq!(Vec3A::X, qrx.neg().mul_vec3a(Vec3A::X));
+ assert_approx_eq!(Vec3A::Z, qrx * Vec3A::Y);
+ assert_approx_eq!(Vec3A::Z, qrx.mul_vec3a(Vec3A::Y));
+ assert_approx_eq!(Vec3A::Z, -qrx * Vec3A::Y);
+ assert_approx_eq!(Vec3A::Z, qrx.neg().mul_vec3a(Vec3A::Y));
+
+ // check vec3 * mat3 is the same
+ let mrx = Mat3::from_quat(qrx);
+ assert_approx_eq!(Vec3A::X, mrx * Vec3A::X);
+ assert_approx_eq!(Vec3A::X, mrx.mul_vec3a(Vec3A::X));
+ assert_approx_eq!(Vec3A::Z, mrx * Vec3A::Y);
+ assert_approx_eq!(Vec3A::Z, mrx.mul_vec3a(Vec3A::Y));
+
+ let qrxz = qrz * qrx;
+ assert_approx_eq!(Vec3A::Y, qrxz * Vec3A::X);
+ assert_approx_eq!(Vec3A::Y, qrxz.mul_vec3a(Vec3A::X));
+ assert_approx_eq!(Vec3A::Z, qrxz * Vec3A::Y);
+ assert_approx_eq!(Vec3A::Z, qrxz.mul_vec3a(Vec3A::Y));
+
+ let mrxz = mrz * mrx;
+ assert_approx_eq!(Vec3A::Y, mrxz * Vec3A::X);
+ assert_approx_eq!(Vec3A::Y, mrxz.mul_vec3a(Vec3A::X));
+ assert_approx_eq!(Vec3A::Z, mrxz * Vec3A::Y);
+ assert_approx_eq!(Vec3A::Z, mrxz.mul_vec3a(Vec3A::Y));
+
+ let qrzx = qrx * qrz;
+ assert_approx_eq!(Vec3A::Z, qrzx * Vec3A::X);
+ assert_approx_eq!(Vec3A::Z, qrzx.mul_vec3a(Vec3A::X));
+ assert_approx_eq!(-Vec3A::X, qrzx * Vec3A::Y);
+ assert_approx_eq!(-Vec3A::X, qrzx.mul_vec3a(Vec3A::Y));
+
+ let mrzx = qrx * qrz;
+ assert_approx_eq!(Vec3A::Z, mrzx * Vec3A::X);
+ assert_approx_eq!(Vec3A::Z, mrzx.mul_vec3a(Vec3A::X));
+ assert_approx_eq!(-Vec3A::X, mrzx * Vec3A::Y);
+ assert_approx_eq!(-Vec3A::X, mrzx.mul_vec3a(Vec3A::Y));
+ });
+
+ glam_test!(test_as, {
+ use glam::DQuat;
+ assert_approx_eq!(
+ DQuat::from_euler(EulerRot::YXZ, 1.0, 2.0, 3.0),
+ Quat::from_euler(EulerRot::YXZ, 1.0, 2.0, 3.0).as_f64()
+ );
+ assert_approx_eq!(
+ Quat::from_euler(EulerRot::YXZ, 1.0, 2.0, 3.0),
+ DQuat::from_euler(EulerRot::YXZ, 1.0, 2.0, 3.0).as_f32()
+ );
+ });
+
+ impl_quat_tests!(f32, const_quat, quat, Mat3, Mat4, Quat, Vec2, Vec3, Vec4);
+}
+
+mod dquat {
+ use crate::support::{deg, rad};
+ use core::ops::Neg;
+ use glam::{const_dquat, dquat, DMat3, DMat4, DQuat, DVec2, DVec3, DVec4, EulerRot};
+
+ glam_test!(test_align, {
+ use std::mem;
+ assert_eq!(32, mem::size_of::<DQuat>());
+ assert_eq!(mem::align_of::<f64>(), mem::align_of::<DQuat>());
+ });
+
+ impl_quat_tests!(
+ f64,
+ const_dquat,
+ dquat,
+ DMat3,
+ DMat4,
+ DQuat,
+ DVec2,
+ DVec3,
+ DVec4
+ );
+}
diff --git a/tests/support/macros.rs b/tests/support/macros.rs
new file mode 100644
index 0000000..01adc4b
--- /dev/null
+++ b/tests/support/macros.rs
@@ -0,0 +1,204 @@
+#[macro_export]
+macro_rules! glam_test {
+ ($name:ident, $block:block) => {
+ #[cfg_attr(not(target_arch = "wasm32"), test)]
+ #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
+ fn $name() {
+ $block
+ }
+ };
+}
+
+#[macro_export]
+macro_rules! should_panic {
+ ($block:block) => {{
+ #[cfg(all(feature = "std", not(target_arch = "wasm32")))]
+ assert!(std::panic::catch_unwind(|| $block).is_err());
+ }};
+}
+
+#[macro_export]
+macro_rules! should_glam_assert {
+ ($block:block) => {{
+ #[cfg(any(feature = "glam-assert", feature = "debug-glam-assert"))]
+ should_panic!($block);
+ }};
+}
+
+#[macro_export]
+macro_rules! assert_approx_eq {
+ ($a:expr, $b:expr) => {{
+ #[allow(unused_imports)]
+ use crate::support::FloatCompare;
+ let eps = core::f32::EPSILON;
+ let (a, b) = (&$a, &$b);
+ assert!(
+ a.approx_eq(b, eps),
+ "assertion failed: `(left !== right)` \
+ (left: `{:?}`, right: `{:?}`, expect diff: `{:?}`, real diff: `{:?}`)",
+ *a,
+ *b,
+ eps,
+ a.abs_diff(b)
+ );
+ }};
+ ($a:expr, $b:expr, $eps:expr) => {{
+ use crate::support::FloatCompare;
+ let (a, b) = (&$a, &$b);
+ let eps = $eps;
+ assert!(
+ a.approx_eq(b, $eps),
+ "assertion failed: `(left !== right)` \
+ (left: `{:?}`, right: `{:?}`, expect diff: `{:?}`, real diff: `{:?}`)",
+ *a,
+ *b,
+ eps,
+ a.abs_diff(b)
+ );
+ }};
+}
+
+/// Test vector normalization for float vector
+#[macro_export]
+macro_rules! impl_vec_float_normalize_tests {
+ ($t:ident, $vec:ident) => {
+ use core::$t::MAX;
+ use core::$t::MIN_POSITIVE;
+
+ /// Works for vec2, vec3, vec4
+ fn from_x_y(x: $t, y: $t) -> $vec {
+ let mut v = $vec::ZERO;
+ v.x = x;
+ v.y = y;
+ v
+ }
+
+ glam_test!(test_normalize, {
+ assert_eq!(from_x_y(-42.0, 0.0).normalize(), from_x_y(-1.0, 0.0));
+ assert_eq!(from_x_y(MAX.sqrt(), 0.0).normalize(), from_x_y(1.0, 0.0));
+ // assert_eq!(from_x_y(MAX, 0.0).normalize(), from_x_y(1.0, 0.0)); // normalize fails for huge vectors and returns zero
+
+ // We expect not to be able to normalize small numbers:
+ should_glam_assert!({ from_x_y(0.0, 0.0).normalize() });
+ should_glam_assert!({ from_x_y(MIN_POSITIVE, 0.0).normalize() });
+
+ // We expect not to be able to normalize non-finite vectors:
+ should_glam_assert!({ from_x_y(INFINITY, 0.0).normalize() });
+ should_glam_assert!({ from_x_y(NAN, 0.0).normalize() });
+ });
+
+ #[cfg(not(any(feature = "debug-glam-assert", feature = "glam-assert")))]
+ glam_test!(test_normalize_no_glam_assert, {
+ // We expect not to be able to normalize small numbers:
+ assert!(!from_x_y(0.0, 0.0).normalize().is_finite());
+ assert!(!from_x_y(MIN_POSITIVE, 0.0).normalize().is_finite());
+
+ // We expect not to be able to normalize non-finite vectors:
+ assert!(!from_x_y(INFINITY, 0.0).normalize().is_finite());
+ assert!(!from_x_y(NAN, 0.0).normalize().is_finite());
+ });
+
+ glam_test!(test_try_normalize, {
+ assert_eq!(
+ from_x_y(-42.0, 0.0).try_normalize(),
+ Some(from_x_y(-1.0, 0.0))
+ );
+ assert_eq!(
+ from_x_y(MAX.sqrt(), 0.0).try_normalize(),
+ Some(from_x_y(1.0, 0.0))
+ );
+
+ // We expect `try_normalize` to return None when inputs are very small:
+ assert_eq!(from_x_y(0.0, 0.0).try_normalize(), None);
+ assert_eq!(from_x_y(MIN_POSITIVE, 0.0).try_normalize(), None);
+
+ // We expect `try_normalize` to return None when inputs are non-finite:
+ assert_eq!(from_x_y(INFINITY, 0.0).try_normalize(), None);
+ assert_eq!(from_x_y(NAN, 0.0).try_normalize(), None);
+
+ // We expect `try_normalize` to return None when inputs are very large:
+ assert_eq!(from_x_y(MAX, 0.0).try_normalize(), None);
+ assert_eq!(from_x_y(MAX, MAX).try_normalize(), None);
+ });
+
+ glam_test!(test_normalize_or_zero, {
+ assert_eq!(
+ from_x_y(-42.0, 0.0).normalize_or_zero(),
+ from_x_y(-1.0, 0.0)
+ );
+ assert_eq!(
+ from_x_y(MAX.sqrt(), 0.0).normalize_or_zero(),
+ from_x_y(1.0, 0.0)
+ );
+
+ // We expect `normalize_or_zero` to return zero when inputs are very small:
+ assert_eq!(from_x_y(0.0, 0.0).normalize_or_zero(), $vec::ZERO);
+ assert_eq!(from_x_y(MIN_POSITIVE, 0.0).normalize_or_zero(), $vec::ZERO);
+
+ // We expect `normalize_or_zero` to return zero when inputs are non-finite:
+ assert_eq!(from_x_y(INFINITY, 0.0).normalize_or_zero(), $vec::ZERO);
+ assert_eq!(from_x_y(NAN, 0.0).normalize_or_zero(), $vec::ZERO);
+
+ // We expect `normalize_or_zero` to return zero when inputs are very large:
+ assert_eq!(from_x_y(MAX, 0.0).normalize_or_zero(), $vec::ZERO);
+ assert_eq!(from_x_y(MAX, MAX).normalize_or_zero(), $vec::ZERO);
+ });
+ };
+}
+
+/// Useful test vectors
+#[macro_export]
+macro_rules! vec3_float_test_vectors {
+ ($vec3:ident) => {
+ [
+ $vec3::X,
+ $vec3::Y,
+ $vec3::Z,
+ -$vec3::X,
+ -$vec3::Y,
+ -$vec3::Z,
+ $vec3::new(1.0, 1e-3, 0.0),
+ $vec3::new(1.0, 1e-4, 0.0),
+ $vec3::new(1.0, 1e-5, 0.0),
+ $vec3::new(1.0, 1e-6, 0.0),
+ $vec3::new(1.0, 1e-7, 0.0),
+ $vec3::new(1.0, 1e-14, 0.0),
+ $vec3::new(1.0, 1e-15, 0.0),
+ $vec3::new(1.0, 1e-16, 0.0),
+ $vec3::new(0.1, 0.2, 0.3),
+ $vec3::new(0.2, 0.3, 0.4),
+ $vec3::new(4.0, -5.0, 6.0),
+ $vec3::new(-2.0, 0.5, -1.0),
+ // Pathalogical cases from <https://graphics.pixar.com/library/OrthonormalB/paper.pdf>:
+ $vec3::new(0.00038527316, 0.00038460016, -0.99999988079),
+ $vec3::new(-0.00019813581, -0.00008946839, -0.99999988079),
+ ]
+ };
+}
+
+#[macro_export]
+macro_rules! vec2_float_test_vectors {
+ ($vec2:ident) => {
+ [
+ $vec2::X,
+ $vec2::Y,
+ -$vec2::X,
+ -$vec2::Y,
+ $vec2::new(1.0, 1e-3),
+ $vec2::new(1.0, 1e-4),
+ $vec2::new(1.0, 1e-5),
+ $vec2::new(1.0, 1e-6),
+ $vec2::new(1.0, 1e-7),
+ $vec2::new(1.0, 1e-14),
+ $vec2::new(1.0, 1e-15),
+ $vec2::new(1.0, 1e-16),
+ $vec2::new(0.1, 0.2),
+ $vec2::new(0.2, 0.3),
+ $vec2::new(4.0, -5.0),
+ $vec2::new(-2.0, 0.5),
+ // Pathalogical cases from <https://graphics.pixar.com/library/OrthonormalB/paper.pdf>:
+ $vec2::new(0.00038527316, 0.00038460016),
+ $vec2::new(-0.00019813581, -0.00008946839),
+ ]
+ };
+}
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
new file mode 100644
index 0000000..865aa5e
--- /dev/null
+++ b/tests/support/mod.rs
@@ -0,0 +1,280 @@
+#[macro_use]
+mod macros;
+
+#[cfg(target_arch = "wasm32")]
+wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser);
+
+use glam::{
+ DMat2, DMat3, DMat4, DQuat, DVec2, DVec3, DVec4, Mat2, Mat3, Mat3A, Mat4, Quat, Vec2, Vec3,
+ Vec3A, Vec4,
+};
+
+pub trait Deg {
+ fn to_radians(self) -> Self;
+}
+
+impl Deg for f32 {
+ fn to_radians(self) -> f32 {
+ f32::to_radians(self)
+ }
+}
+
+impl Deg for f64 {
+ fn to_radians(self) -> f64 {
+ f64::to_radians(self)
+ }
+}
+
+/// Helper function for migrating away from `glam::angle::deg`.
+#[allow(dead_code)]
+#[inline]
+pub fn deg<T: Deg>(angle: T) -> T {
+ angle.to_radians()
+}
+
+/// Helper function for migrating away from `glam::angle::rad`.
+#[allow(dead_code)]
+#[inline]
+pub fn rad<T>(angle: T) -> T {
+ angle
+}
+
+/// Trait used by the `assert_approx_eq` macro for floating point comparisons.
+pub trait FloatCompare<Rhs: ?Sized = Self> {
+ /// Return true if the absolute difference between `self` and `other` is
+ /// less then or equal to `max_abs_diff`.
+ fn approx_eq(&self, other: &Rhs, max_abs_diff: f32) -> bool;
+ /// Returns the absolute difference of `self` and `other` which is printed
+ /// if `assert_approx_eq` fails.
+ fn abs_diff(&self, other: &Rhs) -> Rhs;
+}
+
+impl FloatCompare for f32 {
+ #[inline]
+ fn approx_eq(&self, other: &f32, max_abs_diff: f32) -> bool {
+ (self - other).abs() <= max_abs_diff
+ }
+ #[inline]
+ fn abs_diff(&self, other: &f32) -> f32 {
+ (self - other).abs()
+ }
+}
+
+impl FloatCompare for f64 {
+ #[inline]
+ fn approx_eq(&self, other: &f64, max_abs_diff: f32) -> bool {
+ (self - other).abs() <= max_abs_diff as f64
+ }
+ #[inline]
+ fn abs_diff(&self, other: &f64) -> f64 {
+ (self - other).abs()
+ }
+}
+
+impl FloatCompare for Mat2 {
+ #[inline]
+ fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
+ self.abs_diff_eq(other, max_abs_diff)
+ }
+ #[inline]
+ fn abs_diff(&self, other: &Self) -> Self {
+ Self::from_cols(
+ (self.x_axis - other.x_axis).abs(),
+ (self.y_axis - other.y_axis).abs(),
+ )
+ }
+}
+
+impl FloatCompare for DMat2 {
+ #[inline]
+ fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
+ self.abs_diff_eq(other, max_abs_diff as f64)
+ }
+ #[inline]
+ fn abs_diff(&self, other: &Self) -> Self {
+ Self::from_cols(
+ (self.x_axis - other.x_axis).abs(),
+ (self.y_axis - other.y_axis).abs(),
+ )
+ }
+}
+
+impl FloatCompare for Mat3 {
+ #[inline]
+ fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
+ self.abs_diff_eq(*other, max_abs_diff)
+ }
+ #[inline]
+ fn abs_diff(&self, other: &Self) -> Self {
+ Self::from_cols(
+ (self.x_axis - other.x_axis).abs(),
+ (self.y_axis - other.y_axis).abs(),
+ (self.z_axis - other.z_axis).abs(),
+ )
+ }
+}
+
+impl FloatCompare for Mat3A {
+ #[inline]
+ fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
+ self.abs_diff_eq(*other, max_abs_diff)
+ }
+ #[inline]
+ fn abs_diff(&self, other: &Self) -> Self {
+ Self::from_cols(
+ (self.x_axis - other.x_axis).abs(),
+ (self.y_axis - other.y_axis).abs(),
+ (self.z_axis - other.z_axis).abs(),
+ )
+ }
+}
+
+impl FloatCompare for DMat3 {
+ #[inline]
+ fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
+ self.abs_diff_eq(*other, max_abs_diff as f64)
+ }
+ #[inline]
+ fn abs_diff(&self, other: &Self) -> Self {
+ Self::from_cols(
+ (self.x_axis - other.x_axis).abs(),
+ (self.y_axis - other.y_axis).abs(),
+ (self.z_axis - other.z_axis).abs(),
+ )
+ }
+}
+
+impl FloatCompare for DMat4 {
+ #[inline]
+ fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
+ self.abs_diff_eq(*other, max_abs_diff as f64)
+ }
+ #[inline]
+ fn abs_diff(&self, other: &Self) -> Self {
+ Self::from_cols(
+ (self.x_axis - other.x_axis).abs(),
+ (self.y_axis - other.y_axis).abs(),
+ (self.z_axis - other.z_axis).abs(),
+ (self.w_axis - other.w_axis).abs(),
+ )
+ }
+}
+
+impl FloatCompare for Mat4 {
+ #[inline]
+ fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
+ self.abs_diff_eq(*other, max_abs_diff)
+ }
+ #[inline]
+ fn abs_diff(&self, other: &Self) -> Self {
+ Self::from_cols(
+ (self.x_axis - other.x_axis).abs(),
+ (self.y_axis - other.y_axis).abs(),
+ (self.z_axis - other.z_axis).abs(),
+ (self.w_axis - other.w_axis).abs(),
+ )
+ }
+}
+
+impl FloatCompare for Quat {
+ #[inline]
+ fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
+ self.abs_diff_eq(*other, max_abs_diff)
+ }
+ #[inline]
+ fn abs_diff(&self, other: &Self) -> Self {
+ let a: Vec4 = (*self).into();
+ let b: Vec4 = (*other).into();
+ Quat::from_vec4((a - b).abs())
+ }
+}
+
+impl FloatCompare for Vec2 {
+ #[inline]
+ fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
+ self.abs_diff_eq(*other, max_abs_diff)
+ }
+ #[inline]
+ fn abs_diff(&self, other: &Self) -> Self {
+ (*self - *other).abs()
+ }
+}
+
+impl FloatCompare for Vec3 {
+ #[inline]
+ fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
+ self.abs_diff_eq(*other, max_abs_diff)
+ }
+ #[inline]
+ fn abs_diff(&self, other: &Self) -> Self {
+ (*self - *other).abs()
+ }
+}
+
+impl FloatCompare for Vec3A {
+ #[inline]
+ fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
+ self.abs_diff_eq(*other, max_abs_diff)
+ }
+ #[inline]
+ fn abs_diff(&self, other: &Self) -> Self {
+ (*self - *other).abs()
+ }
+}
+
+impl FloatCompare for Vec4 {
+ #[inline]
+ fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
+ self.abs_diff_eq(*other, max_abs_diff)
+ }
+ #[inline]
+ fn abs_diff(&self, other: &Self) -> Self {
+ (*self - *other).abs()
+ }
+}
+
+impl FloatCompare for DQuat {
+ #[inline]
+ fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
+ self.abs_diff_eq(*other, max_abs_diff as f64)
+ }
+ #[inline]
+ fn abs_diff(&self, other: &Self) -> Self {
+ let a: DVec4 = (*self).into();
+ let b: DVec4 = (*other).into();
+ DQuat::from_vec4((a - b).abs())
+ }
+}
+
+impl FloatCompare for DVec2 {
+ #[inline]
+ fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
+ self.abs_diff_eq(*other, max_abs_diff as f64)
+ }
+ #[inline]
+ fn abs_diff(&self, other: &Self) -> Self {
+ (*self - *other).abs()
+ }
+}
+
+impl FloatCompare for DVec3 {
+ #[inline]
+ fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
+ self.abs_diff_eq(*other, max_abs_diff as f64)
+ }
+ #[inline]
+ fn abs_diff(&self, other: &Self) -> Self {
+ (*self - *other).abs()
+ }
+}
+
+impl FloatCompare for DVec4 {
+ #[inline]
+ fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
+ self.abs_diff_eq(*other, max_abs_diff as f64)
+ }
+ #[inline]
+ fn abs_diff(&self, other: &Self) -> Self {
+ (*self - *other).abs()
+ }
+}
diff --git a/tests/swizzles_f32.rs b/tests/swizzles_f32.rs
new file mode 100644
index 0000000..8e61518
--- /dev/null
+++ b/tests/swizzles_f32.rs
@@ -0,0 +1,618 @@
+// Generated by swizzlegen. Do not edit.
+#[macro_use]
+mod support;
+use glam::*;
+
+glam_test!(test_vec4_swizzles, {
+ let v = vec4(1_f32, 2_f32, 3_f32, 4_f32);
+ assert_eq!(v, v.xyzw());
+ assert_eq!(v.xxxx(), vec4(1_f32, 1_f32, 1_f32, 1_f32));
+ assert_eq!(v.xxxy(), vec4(1_f32, 1_f32, 1_f32, 2_f32));
+ assert_eq!(v.xxxz(), vec4(1_f32, 1_f32, 1_f32, 3_f32));
+ assert_eq!(v.xxxw(), vec4(1_f32, 1_f32, 1_f32, 4_f32));
+ assert_eq!(v.xxyx(), vec4(1_f32, 1_f32, 2_f32, 1_f32));
+ assert_eq!(v.xxyy(), vec4(1_f32, 1_f32, 2_f32, 2_f32));
+ assert_eq!(v.xxyz(), vec4(1_f32, 1_f32, 2_f32, 3_f32));
+ assert_eq!(v.xxyw(), vec4(1_f32, 1_f32, 2_f32, 4_f32));
+ assert_eq!(v.xxzx(), vec4(1_f32, 1_f32, 3_f32, 1_f32));
+ assert_eq!(v.xxzy(), vec4(1_f32, 1_f32, 3_f32, 2_f32));
+ assert_eq!(v.xxzz(), vec4(1_f32, 1_f32, 3_f32, 3_f32));
+ assert_eq!(v.xxzw(), vec4(1_f32, 1_f32, 3_f32, 4_f32));
+ assert_eq!(v.xxwx(), vec4(1_f32, 1_f32, 4_f32, 1_f32));
+ assert_eq!(v.xxwy(), vec4(1_f32, 1_f32, 4_f32, 2_f32));
+ assert_eq!(v.xxwz(), vec4(1_f32, 1_f32, 4_f32, 3_f32));
+ assert_eq!(v.xxww(), vec4(1_f32, 1_f32, 4_f32, 4_f32));
+ assert_eq!(v.xyxx(), vec4(1_f32, 2_f32, 1_f32, 1_f32));
+ assert_eq!(v.xyxy(), vec4(1_f32, 2_f32, 1_f32, 2_f32));
+ assert_eq!(v.xyxz(), vec4(1_f32, 2_f32, 1_f32, 3_f32));
+ assert_eq!(v.xyxw(), vec4(1_f32, 2_f32, 1_f32, 4_f32));
+ assert_eq!(v.xyyx(), vec4(1_f32, 2_f32, 2_f32, 1_f32));
+ assert_eq!(v.xyyy(), vec4(1_f32, 2_f32, 2_f32, 2_f32));
+ assert_eq!(v.xyyz(), vec4(1_f32, 2_f32, 2_f32, 3_f32));
+ assert_eq!(v.xyyw(), vec4(1_f32, 2_f32, 2_f32, 4_f32));
+ assert_eq!(v.xyzx(), vec4(1_f32, 2_f32, 3_f32, 1_f32));
+ assert_eq!(v.xyzy(), vec4(1_f32, 2_f32, 3_f32, 2_f32));
+ assert_eq!(v.xyzz(), vec4(1_f32, 2_f32, 3_f32, 3_f32));
+ assert_eq!(v.xywx(), vec4(1_f32, 2_f32, 4_f32, 1_f32));
+ assert_eq!(v.xywy(), vec4(1_f32, 2_f32, 4_f32, 2_f32));
+ assert_eq!(v.xywz(), vec4(1_f32, 2_f32, 4_f32, 3_f32));
+ assert_eq!(v.xyww(), vec4(1_f32, 2_f32, 4_f32, 4_f32));
+ assert_eq!(v.xzxx(), vec4(1_f32, 3_f32, 1_f32, 1_f32));
+ assert_eq!(v.xzxy(), vec4(1_f32, 3_f32, 1_f32, 2_f32));
+ assert_eq!(v.xzxz(), vec4(1_f32, 3_f32, 1_f32, 3_f32));
+ assert_eq!(v.xzxw(), vec4(1_f32, 3_f32, 1_f32, 4_f32));
+ assert_eq!(v.xzyx(), vec4(1_f32, 3_f32, 2_f32, 1_f32));
+ assert_eq!(v.xzyy(), vec4(1_f32, 3_f32, 2_f32, 2_f32));
+ assert_eq!(v.xzyz(), vec4(1_f32, 3_f32, 2_f32, 3_f32));
+ assert_eq!(v.xzyw(), vec4(1_f32, 3_f32, 2_f32, 4_f32));
+ assert_eq!(v.xzzx(), vec4(1_f32, 3_f32, 3_f32, 1_f32));
+ assert_eq!(v.xzzy(), vec4(1_f32, 3_f32, 3_f32, 2_f32));
+ assert_eq!(v.xzzz(), vec4(1_f32, 3_f32, 3_f32, 3_f32));
+ assert_eq!(v.xzzw(), vec4(1_f32, 3_f32, 3_f32, 4_f32));
+ assert_eq!(v.xzwx(), vec4(1_f32, 3_f32, 4_f32, 1_f32));
+ assert_eq!(v.xzwy(), vec4(1_f32, 3_f32, 4_f32, 2_f32));
+ assert_eq!(v.xzwz(), vec4(1_f32, 3_f32, 4_f32, 3_f32));
+ assert_eq!(v.xzww(), vec4(1_f32, 3_f32, 4_f32, 4_f32));
+ assert_eq!(v.xwxx(), vec4(1_f32, 4_f32, 1_f32, 1_f32));
+ assert_eq!(v.xwxy(), vec4(1_f32, 4_f32, 1_f32, 2_f32));
+ assert_eq!(v.xwxz(), vec4(1_f32, 4_f32, 1_f32, 3_f32));
+ assert_eq!(v.xwxw(), vec4(1_f32, 4_f32, 1_f32, 4_f32));
+ assert_eq!(v.xwyx(), vec4(1_f32, 4_f32, 2_f32, 1_f32));
+ assert_eq!(v.xwyy(), vec4(1_f32, 4_f32, 2_f32, 2_f32));
+ assert_eq!(v.xwyz(), vec4(1_f32, 4_f32, 2_f32, 3_f32));
+ assert_eq!(v.xwyw(), vec4(1_f32, 4_f32, 2_f32, 4_f32));
+ assert_eq!(v.xwzx(), vec4(1_f32, 4_f32, 3_f32, 1_f32));
+ assert_eq!(v.xwzy(), vec4(1_f32, 4_f32, 3_f32, 2_f32));
+ assert_eq!(v.xwzz(), vec4(1_f32, 4_f32, 3_f32, 3_f32));
+ assert_eq!(v.xwzw(), vec4(1_f32, 4_f32, 3_f32, 4_f32));
+ assert_eq!(v.xwwx(), vec4(1_f32, 4_f32, 4_f32, 1_f32));
+ assert_eq!(v.xwwy(), vec4(1_f32, 4_f32, 4_f32, 2_f32));
+ assert_eq!(v.xwwz(), vec4(1_f32, 4_f32, 4_f32, 3_f32));
+ assert_eq!(v.xwww(), vec4(1_f32, 4_f32, 4_f32, 4_f32));
+ assert_eq!(v.yxxx(), vec4(2_f32, 1_f32, 1_f32, 1_f32));
+ assert_eq!(v.yxxy(), vec4(2_f32, 1_f32, 1_f32, 2_f32));
+ assert_eq!(v.yxxz(), vec4(2_f32, 1_f32, 1_f32, 3_f32));
+ assert_eq!(v.yxxw(), vec4(2_f32, 1_f32, 1_f32, 4_f32));
+ assert_eq!(v.yxyx(), vec4(2_f32, 1_f32, 2_f32, 1_f32));
+ assert_eq!(v.yxyy(), vec4(2_f32, 1_f32, 2_f32, 2_f32));
+ assert_eq!(v.yxyz(), vec4(2_f32, 1_f32, 2_f32, 3_f32));
+ assert_eq!(v.yxyw(), vec4(2_f32, 1_f32, 2_f32, 4_f32));
+ assert_eq!(v.yxzx(), vec4(2_f32, 1_f32, 3_f32, 1_f32));
+ assert_eq!(v.yxzy(), vec4(2_f32, 1_f32, 3_f32, 2_f32));
+ assert_eq!(v.yxzz(), vec4(2_f32, 1_f32, 3_f32, 3_f32));
+ assert_eq!(v.yxzw(), vec4(2_f32, 1_f32, 3_f32, 4_f32));
+ assert_eq!(v.yxwx(), vec4(2_f32, 1_f32, 4_f32, 1_f32));
+ assert_eq!(v.yxwy(), vec4(2_f32, 1_f32, 4_f32, 2_f32));
+ assert_eq!(v.yxwz(), vec4(2_f32, 1_f32, 4_f32, 3_f32));
+ assert_eq!(v.yxww(), vec4(2_f32, 1_f32, 4_f32, 4_f32));
+ assert_eq!(v.yyxx(), vec4(2_f32, 2_f32, 1_f32, 1_f32));
+ assert_eq!(v.yyxy(), vec4(2_f32, 2_f32, 1_f32, 2_f32));
+ assert_eq!(v.yyxz(), vec4(2_f32, 2_f32, 1_f32, 3_f32));
+ assert_eq!(v.yyxw(), vec4(2_f32, 2_f32, 1_f32, 4_f32));
+ assert_eq!(v.yyyx(), vec4(2_f32, 2_f32, 2_f32, 1_f32));
+ assert_eq!(v.yyyy(), vec4(2_f32, 2_f32, 2_f32, 2_f32));
+ assert_eq!(v.yyyz(), vec4(2_f32, 2_f32, 2_f32, 3_f32));
+ assert_eq!(v.yyyw(), vec4(2_f32, 2_f32, 2_f32, 4_f32));
+ assert_eq!(v.yyzx(), vec4(2_f32, 2_f32, 3_f32, 1_f32));
+ assert_eq!(v.yyzy(), vec4(2_f32, 2_f32, 3_f32, 2_f32));
+ assert_eq!(v.yyzz(), vec4(2_f32, 2_f32, 3_f32, 3_f32));
+ assert_eq!(v.yyzw(), vec4(2_f32, 2_f32, 3_f32, 4_f32));
+ assert_eq!(v.yywx(), vec4(2_f32, 2_f32, 4_f32, 1_f32));
+ assert_eq!(v.yywy(), vec4(2_f32, 2_f32, 4_f32, 2_f32));
+ assert_eq!(v.yywz(), vec4(2_f32, 2_f32, 4_f32, 3_f32));
+ assert_eq!(v.yyww(), vec4(2_f32, 2_f32, 4_f32, 4_f32));
+ assert_eq!(v.yzxx(), vec4(2_f32, 3_f32, 1_f32, 1_f32));
+ assert_eq!(v.yzxy(), vec4(2_f32, 3_f32, 1_f32, 2_f32));
+ assert_eq!(v.yzxz(), vec4(2_f32, 3_f32, 1_f32, 3_f32));
+ assert_eq!(v.yzxw(), vec4(2_f32, 3_f32, 1_f32, 4_f32));
+ assert_eq!(v.yzyx(), vec4(2_f32, 3_f32, 2_f32, 1_f32));
+ assert_eq!(v.yzyy(), vec4(2_f32, 3_f32, 2_f32, 2_f32));
+ assert_eq!(v.yzyz(), vec4(2_f32, 3_f32, 2_f32, 3_f32));
+ assert_eq!(v.yzyw(), vec4(2_f32, 3_f32, 2_f32, 4_f32));
+ assert_eq!(v.yzzx(), vec4(2_f32, 3_f32, 3_f32, 1_f32));
+ assert_eq!(v.yzzy(), vec4(2_f32, 3_f32, 3_f32, 2_f32));
+ assert_eq!(v.yzzz(), vec4(2_f32, 3_f32, 3_f32, 3_f32));
+ assert_eq!(v.yzzw(), vec4(2_f32, 3_f32, 3_f32, 4_f32));
+ assert_eq!(v.yzwx(), vec4(2_f32, 3_f32, 4_f32, 1_f32));
+ assert_eq!(v.yzwy(), vec4(2_f32, 3_f32, 4_f32, 2_f32));
+ assert_eq!(v.yzwz(), vec4(2_f32, 3_f32, 4_f32, 3_f32));
+ assert_eq!(v.yzww(), vec4(2_f32, 3_f32, 4_f32, 4_f32));
+ assert_eq!(v.ywxx(), vec4(2_f32, 4_f32, 1_f32, 1_f32));
+ assert_eq!(v.ywxy(), vec4(2_f32, 4_f32, 1_f32, 2_f32));
+ assert_eq!(v.ywxz(), vec4(2_f32, 4_f32, 1_f32, 3_f32));
+ assert_eq!(v.ywxw(), vec4(2_f32, 4_f32, 1_f32, 4_f32));
+ assert_eq!(v.ywyx(), vec4(2_f32, 4_f32, 2_f32, 1_f32));
+ assert_eq!(v.ywyy(), vec4(2_f32, 4_f32, 2_f32, 2_f32));
+ assert_eq!(v.ywyz(), vec4(2_f32, 4_f32, 2_f32, 3_f32));
+ assert_eq!(v.ywyw(), vec4(2_f32, 4_f32, 2_f32, 4_f32));
+ assert_eq!(v.ywzx(), vec4(2_f32, 4_f32, 3_f32, 1_f32));
+ assert_eq!(v.ywzy(), vec4(2_f32, 4_f32, 3_f32, 2_f32));
+ assert_eq!(v.ywzz(), vec4(2_f32, 4_f32, 3_f32, 3_f32));
+ assert_eq!(v.ywzw(), vec4(2_f32, 4_f32, 3_f32, 4_f32));
+ assert_eq!(v.ywwx(), vec4(2_f32, 4_f32, 4_f32, 1_f32));
+ assert_eq!(v.ywwy(), vec4(2_f32, 4_f32, 4_f32, 2_f32));
+ assert_eq!(v.ywwz(), vec4(2_f32, 4_f32, 4_f32, 3_f32));
+ assert_eq!(v.ywww(), vec4(2_f32, 4_f32, 4_f32, 4_f32));
+ assert_eq!(v.zxxx(), vec4(3_f32, 1_f32, 1_f32, 1_f32));
+ assert_eq!(v.zxxy(), vec4(3_f32, 1_f32, 1_f32, 2_f32));
+ assert_eq!(v.zxxz(), vec4(3_f32, 1_f32, 1_f32, 3_f32));
+ assert_eq!(v.zxxw(), vec4(3_f32, 1_f32, 1_f32, 4_f32));
+ assert_eq!(v.zxyx(), vec4(3_f32, 1_f32, 2_f32, 1_f32));
+ assert_eq!(v.zxyy(), vec4(3_f32, 1_f32, 2_f32, 2_f32));
+ assert_eq!(v.zxyz(), vec4(3_f32, 1_f32, 2_f32, 3_f32));
+ assert_eq!(v.zxyw(), vec4(3_f32, 1_f32, 2_f32, 4_f32));
+ assert_eq!(v.zxzx(), vec4(3_f32, 1_f32, 3_f32, 1_f32));
+ assert_eq!(v.zxzy(), vec4(3_f32, 1_f32, 3_f32, 2_f32));
+ assert_eq!(v.zxzz(), vec4(3_f32, 1_f32, 3_f32, 3_f32));
+ assert_eq!(v.zxzw(), vec4(3_f32, 1_f32, 3_f32, 4_f32));
+ assert_eq!(v.zxwx(), vec4(3_f32, 1_f32, 4_f32, 1_f32));
+ assert_eq!(v.zxwy(), vec4(3_f32, 1_f32, 4_f32, 2_f32));
+ assert_eq!(v.zxwz(), vec4(3_f32, 1_f32, 4_f32, 3_f32));
+ assert_eq!(v.zxww(), vec4(3_f32, 1_f32, 4_f32, 4_f32));
+ assert_eq!(v.zyxx(), vec4(3_f32, 2_f32, 1_f32, 1_f32));
+ assert_eq!(v.zyxy(), vec4(3_f32, 2_f32, 1_f32, 2_f32));
+ assert_eq!(v.zyxz(), vec4(3_f32, 2_f32, 1_f32, 3_f32));
+ assert_eq!(v.zyxw(), vec4(3_f32, 2_f32, 1_f32, 4_f32));
+ assert_eq!(v.zyyx(), vec4(3_f32, 2_f32, 2_f32, 1_f32));
+ assert_eq!(v.zyyy(), vec4(3_f32, 2_f32, 2_f32, 2_f32));
+ assert_eq!(v.zyyz(), vec4(3_f32, 2_f32, 2_f32, 3_f32));
+ assert_eq!(v.zyyw(), vec4(3_f32, 2_f32, 2_f32, 4_f32));
+ assert_eq!(v.zyzx(), vec4(3_f32, 2_f32, 3_f32, 1_f32));
+ assert_eq!(v.zyzy(), vec4(3_f32, 2_f32, 3_f32, 2_f32));
+ assert_eq!(v.zyzz(), vec4(3_f32, 2_f32, 3_f32, 3_f32));
+ assert_eq!(v.zyzw(), vec4(3_f32, 2_f32, 3_f32, 4_f32));
+ assert_eq!(v.zywx(), vec4(3_f32, 2_f32, 4_f32, 1_f32));
+ assert_eq!(v.zywy(), vec4(3_f32, 2_f32, 4_f32, 2_f32));
+ assert_eq!(v.zywz(), vec4(3_f32, 2_f32, 4_f32, 3_f32));
+ assert_eq!(v.zyww(), vec4(3_f32, 2_f32, 4_f32, 4_f32));
+ assert_eq!(v.zzxx(), vec4(3_f32, 3_f32, 1_f32, 1_f32));
+ assert_eq!(v.zzxy(), vec4(3_f32, 3_f32, 1_f32, 2_f32));
+ assert_eq!(v.zzxz(), vec4(3_f32, 3_f32, 1_f32, 3_f32));
+ assert_eq!(v.zzxw(), vec4(3_f32, 3_f32, 1_f32, 4_f32));
+ assert_eq!(v.zzyx(), vec4(3_f32, 3_f32, 2_f32, 1_f32));
+ assert_eq!(v.zzyy(), vec4(3_f32, 3_f32, 2_f32, 2_f32));
+ assert_eq!(v.zzyz(), vec4(3_f32, 3_f32, 2_f32, 3_f32));
+ assert_eq!(v.zzyw(), vec4(3_f32, 3_f32, 2_f32, 4_f32));
+ assert_eq!(v.zzzx(), vec4(3_f32, 3_f32, 3_f32, 1_f32));
+ assert_eq!(v.zzzy(), vec4(3_f32, 3_f32, 3_f32, 2_f32));
+ assert_eq!(v.zzzz(), vec4(3_f32, 3_f32, 3_f32, 3_f32));
+ assert_eq!(v.zzzw(), vec4(3_f32, 3_f32, 3_f32, 4_f32));
+ assert_eq!(v.zzwx(), vec4(3_f32, 3_f32, 4_f32, 1_f32));
+ assert_eq!(v.zzwy(), vec4(3_f32, 3_f32, 4_f32, 2_f32));
+ assert_eq!(v.zzwz(), vec4(3_f32, 3_f32, 4_f32, 3_f32));
+ assert_eq!(v.zzww(), vec4(3_f32, 3_f32, 4_f32, 4_f32));
+ assert_eq!(v.zwxx(), vec4(3_f32, 4_f32, 1_f32, 1_f32));
+ assert_eq!(v.zwxy(), vec4(3_f32, 4_f32, 1_f32, 2_f32));
+ assert_eq!(v.zwxz(), vec4(3_f32, 4_f32, 1_f32, 3_f32));
+ assert_eq!(v.zwxw(), vec4(3_f32, 4_f32, 1_f32, 4_f32));
+ assert_eq!(v.zwyx(), vec4(3_f32, 4_f32, 2_f32, 1_f32));
+ assert_eq!(v.zwyy(), vec4(3_f32, 4_f32, 2_f32, 2_f32));
+ assert_eq!(v.zwyz(), vec4(3_f32, 4_f32, 2_f32, 3_f32));
+ assert_eq!(v.zwyw(), vec4(3_f32, 4_f32, 2_f32, 4_f32));
+ assert_eq!(v.zwzx(), vec4(3_f32, 4_f32, 3_f32, 1_f32));
+ assert_eq!(v.zwzy(), vec4(3_f32, 4_f32, 3_f32, 2_f32));
+ assert_eq!(v.zwzz(), vec4(3_f32, 4_f32, 3_f32, 3_f32));
+ assert_eq!(v.zwzw(), vec4(3_f32, 4_f32, 3_f32, 4_f32));
+ assert_eq!(v.zwwx(), vec4(3_f32, 4_f32, 4_f32, 1_f32));
+ assert_eq!(v.zwwy(), vec4(3_f32, 4_f32, 4_f32, 2_f32));
+ assert_eq!(v.zwwz(), vec4(3_f32, 4_f32, 4_f32, 3_f32));
+ assert_eq!(v.zwww(), vec4(3_f32, 4_f32, 4_f32, 4_f32));
+ assert_eq!(v.wxxx(), vec4(4_f32, 1_f32, 1_f32, 1_f32));
+ assert_eq!(v.wxxy(), vec4(4_f32, 1_f32, 1_f32, 2_f32));
+ assert_eq!(v.wxxz(), vec4(4_f32, 1_f32, 1_f32, 3_f32));
+ assert_eq!(v.wxxw(), vec4(4_f32, 1_f32, 1_f32, 4_f32));
+ assert_eq!(v.wxyx(), vec4(4_f32, 1_f32, 2_f32, 1_f32));
+ assert_eq!(v.wxyy(), vec4(4_f32, 1_f32, 2_f32, 2_f32));
+ assert_eq!(v.wxyz(), vec4(4_f32, 1_f32, 2_f32, 3_f32));
+ assert_eq!(v.wxyw(), vec4(4_f32, 1_f32, 2_f32, 4_f32));
+ assert_eq!(v.wxzx(), vec4(4_f32, 1_f32, 3_f32, 1_f32));
+ assert_eq!(v.wxzy(), vec4(4_f32, 1_f32, 3_f32, 2_f32));
+ assert_eq!(v.wxzz(), vec4(4_f32, 1_f32, 3_f32, 3_f32));
+ assert_eq!(v.wxzw(), vec4(4_f32, 1_f32, 3_f32, 4_f32));
+ assert_eq!(v.wxwx(), vec4(4_f32, 1_f32, 4_f32, 1_f32));
+ assert_eq!(v.wxwy(), vec4(4_f32, 1_f32, 4_f32, 2_f32));
+ assert_eq!(v.wxwz(), vec4(4_f32, 1_f32, 4_f32, 3_f32));
+ assert_eq!(v.wxww(), vec4(4_f32, 1_f32, 4_f32, 4_f32));
+ assert_eq!(v.wyxx(), vec4(4_f32, 2_f32, 1_f32, 1_f32));
+ assert_eq!(v.wyxy(), vec4(4_f32, 2_f32, 1_f32, 2_f32));
+ assert_eq!(v.wyxz(), vec4(4_f32, 2_f32, 1_f32, 3_f32));
+ assert_eq!(v.wyxw(), vec4(4_f32, 2_f32, 1_f32, 4_f32));
+ assert_eq!(v.wyyx(), vec4(4_f32, 2_f32, 2_f32, 1_f32));
+ assert_eq!(v.wyyy(), vec4(4_f32, 2_f32, 2_f32, 2_f32));
+ assert_eq!(v.wyyz(), vec4(4_f32, 2_f32, 2_f32, 3_f32));
+ assert_eq!(v.wyyw(), vec4(4_f32, 2_f32, 2_f32, 4_f32));
+ assert_eq!(v.wyzx(), vec4(4_f32, 2_f32, 3_f32, 1_f32));
+ assert_eq!(v.wyzy(), vec4(4_f32, 2_f32, 3_f32, 2_f32));
+ assert_eq!(v.wyzz(), vec4(4_f32, 2_f32, 3_f32, 3_f32));
+ assert_eq!(v.wyzw(), vec4(4_f32, 2_f32, 3_f32, 4_f32));
+ assert_eq!(v.wywx(), vec4(4_f32, 2_f32, 4_f32, 1_f32));
+ assert_eq!(v.wywy(), vec4(4_f32, 2_f32, 4_f32, 2_f32));
+ assert_eq!(v.wywz(), vec4(4_f32, 2_f32, 4_f32, 3_f32));
+ assert_eq!(v.wyww(), vec4(4_f32, 2_f32, 4_f32, 4_f32));
+ assert_eq!(v.wzxx(), vec4(4_f32, 3_f32, 1_f32, 1_f32));
+ assert_eq!(v.wzxy(), vec4(4_f32, 3_f32, 1_f32, 2_f32));
+ assert_eq!(v.wzxz(), vec4(4_f32, 3_f32, 1_f32, 3_f32));
+ assert_eq!(v.wzxw(), vec4(4_f32, 3_f32, 1_f32, 4_f32));
+ assert_eq!(v.wzyx(), vec4(4_f32, 3_f32, 2_f32, 1_f32));
+ assert_eq!(v.wzyy(), vec4(4_f32, 3_f32, 2_f32, 2_f32));
+ assert_eq!(v.wzyz(), vec4(4_f32, 3_f32, 2_f32, 3_f32));
+ assert_eq!(v.wzyw(), vec4(4_f32, 3_f32, 2_f32, 4_f32));
+ assert_eq!(v.wzzx(), vec4(4_f32, 3_f32, 3_f32, 1_f32));
+ assert_eq!(v.wzzy(), vec4(4_f32, 3_f32, 3_f32, 2_f32));
+ assert_eq!(v.wzzz(), vec4(4_f32, 3_f32, 3_f32, 3_f32));
+ assert_eq!(v.wzzw(), vec4(4_f32, 3_f32, 3_f32, 4_f32));
+ assert_eq!(v.wzwx(), vec4(4_f32, 3_f32, 4_f32, 1_f32));
+ assert_eq!(v.wzwy(), vec4(4_f32, 3_f32, 4_f32, 2_f32));
+ assert_eq!(v.wzwz(), vec4(4_f32, 3_f32, 4_f32, 3_f32));
+ assert_eq!(v.wzww(), vec4(4_f32, 3_f32, 4_f32, 4_f32));
+ assert_eq!(v.wwxx(), vec4(4_f32, 4_f32, 1_f32, 1_f32));
+ assert_eq!(v.wwxy(), vec4(4_f32, 4_f32, 1_f32, 2_f32));
+ assert_eq!(v.wwxz(), vec4(4_f32, 4_f32, 1_f32, 3_f32));
+ assert_eq!(v.wwxw(), vec4(4_f32, 4_f32, 1_f32, 4_f32));
+ assert_eq!(v.wwyx(), vec4(4_f32, 4_f32, 2_f32, 1_f32));
+ assert_eq!(v.wwyy(), vec4(4_f32, 4_f32, 2_f32, 2_f32));
+ assert_eq!(v.wwyz(), vec4(4_f32, 4_f32, 2_f32, 3_f32));
+ assert_eq!(v.wwyw(), vec4(4_f32, 4_f32, 2_f32, 4_f32));
+ assert_eq!(v.wwzx(), vec4(4_f32, 4_f32, 3_f32, 1_f32));
+ assert_eq!(v.wwzy(), vec4(4_f32, 4_f32, 3_f32, 2_f32));
+ assert_eq!(v.wwzz(), vec4(4_f32, 4_f32, 3_f32, 3_f32));
+ assert_eq!(v.wwzw(), vec4(4_f32, 4_f32, 3_f32, 4_f32));
+ assert_eq!(v.wwwx(), vec4(4_f32, 4_f32, 4_f32, 1_f32));
+ assert_eq!(v.wwwy(), vec4(4_f32, 4_f32, 4_f32, 2_f32));
+ assert_eq!(v.wwwz(), vec4(4_f32, 4_f32, 4_f32, 3_f32));
+ assert_eq!(v.wwww(), vec4(4_f32, 4_f32, 4_f32, 4_f32));
+ assert_eq!(v.xxx(), vec3(1_f32, 1_f32, 1_f32));
+ assert_eq!(v.xxy(), vec3(1_f32, 1_f32, 2_f32));
+ assert_eq!(v.xxz(), vec3(1_f32, 1_f32, 3_f32));
+ assert_eq!(v.xxw(), vec3(1_f32, 1_f32, 4_f32));
+ assert_eq!(v.xyx(), vec3(1_f32, 2_f32, 1_f32));
+ assert_eq!(v.xyy(), vec3(1_f32, 2_f32, 2_f32));
+ assert_eq!(v.xyz(), vec3(1_f32, 2_f32, 3_f32));
+ assert_eq!(v.xyw(), vec3(1_f32, 2_f32, 4_f32));
+ assert_eq!(v.xzx(), vec3(1_f32, 3_f32, 1_f32));
+ assert_eq!(v.xzy(), vec3(1_f32, 3_f32, 2_f32));
+ assert_eq!(v.xzz(), vec3(1_f32, 3_f32, 3_f32));
+ assert_eq!(v.xzw(), vec3(1_f32, 3_f32, 4_f32));
+ assert_eq!(v.xwx(), vec3(1_f32, 4_f32, 1_f32));
+ assert_eq!(v.xwy(), vec3(1_f32, 4_f32, 2_f32));
+ assert_eq!(v.xwz(), vec3(1_f32, 4_f32, 3_f32));
+ assert_eq!(v.xww(), vec3(1_f32, 4_f32, 4_f32));
+ assert_eq!(v.yxx(), vec3(2_f32, 1_f32, 1_f32));
+ assert_eq!(v.yxy(), vec3(2_f32, 1_f32, 2_f32));
+ assert_eq!(v.yxz(), vec3(2_f32, 1_f32, 3_f32));
+ assert_eq!(v.yxw(), vec3(2_f32, 1_f32, 4_f32));
+ assert_eq!(v.yyx(), vec3(2_f32, 2_f32, 1_f32));
+ assert_eq!(v.yyy(), vec3(2_f32, 2_f32, 2_f32));
+ assert_eq!(v.yyz(), vec3(2_f32, 2_f32, 3_f32));
+ assert_eq!(v.yyw(), vec3(2_f32, 2_f32, 4_f32));
+ assert_eq!(v.yzx(), vec3(2_f32, 3_f32, 1_f32));
+ assert_eq!(v.yzy(), vec3(2_f32, 3_f32, 2_f32));
+ assert_eq!(v.yzz(), vec3(2_f32, 3_f32, 3_f32));
+ assert_eq!(v.yzw(), vec3(2_f32, 3_f32, 4_f32));
+ assert_eq!(v.ywx(), vec3(2_f32, 4_f32, 1_f32));
+ assert_eq!(v.ywy(), vec3(2_f32, 4_f32, 2_f32));
+ assert_eq!(v.ywz(), vec3(2_f32, 4_f32, 3_f32));
+ assert_eq!(v.yww(), vec3(2_f32, 4_f32, 4_f32));
+ assert_eq!(v.zxx(), vec3(3_f32, 1_f32, 1_f32));
+ assert_eq!(v.zxy(), vec3(3_f32, 1_f32, 2_f32));
+ assert_eq!(v.zxz(), vec3(3_f32, 1_f32, 3_f32));
+ assert_eq!(v.zxw(), vec3(3_f32, 1_f32, 4_f32));
+ assert_eq!(v.zyx(), vec3(3_f32, 2_f32, 1_f32));
+ assert_eq!(v.zyy(), vec3(3_f32, 2_f32, 2_f32));
+ assert_eq!(v.zyz(), vec3(3_f32, 2_f32, 3_f32));
+ assert_eq!(v.zyw(), vec3(3_f32, 2_f32, 4_f32));
+ assert_eq!(v.zzx(), vec3(3_f32, 3_f32, 1_f32));
+ assert_eq!(v.zzy(), vec3(3_f32, 3_f32, 2_f32));
+ assert_eq!(v.zzz(), vec3(3_f32, 3_f32, 3_f32));
+ assert_eq!(v.zzw(), vec3(3_f32, 3_f32, 4_f32));
+ assert_eq!(v.zwx(), vec3(3_f32, 4_f32, 1_f32));
+ assert_eq!(v.zwy(), vec3(3_f32, 4_f32, 2_f32));
+ assert_eq!(v.zwz(), vec3(3_f32, 4_f32, 3_f32));
+ assert_eq!(v.zww(), vec3(3_f32, 4_f32, 4_f32));
+ assert_eq!(v.wxx(), vec3(4_f32, 1_f32, 1_f32));
+ assert_eq!(v.wxy(), vec3(4_f32, 1_f32, 2_f32));
+ assert_eq!(v.wxz(), vec3(4_f32, 1_f32, 3_f32));
+ assert_eq!(v.wxw(), vec3(4_f32, 1_f32, 4_f32));
+ assert_eq!(v.wyx(), vec3(4_f32, 2_f32, 1_f32));
+ assert_eq!(v.wyy(), vec3(4_f32, 2_f32, 2_f32));
+ assert_eq!(v.wyz(), vec3(4_f32, 2_f32, 3_f32));
+ assert_eq!(v.wyw(), vec3(4_f32, 2_f32, 4_f32));
+ assert_eq!(v.wzx(), vec3(4_f32, 3_f32, 1_f32));
+ assert_eq!(v.wzy(), vec3(4_f32, 3_f32, 2_f32));
+ assert_eq!(v.wzz(), vec3(4_f32, 3_f32, 3_f32));
+ assert_eq!(v.wzw(), vec3(4_f32, 3_f32, 4_f32));
+ assert_eq!(v.wwx(), vec3(4_f32, 4_f32, 1_f32));
+ assert_eq!(v.wwy(), vec3(4_f32, 4_f32, 2_f32));
+ assert_eq!(v.wwz(), vec3(4_f32, 4_f32, 3_f32));
+ assert_eq!(v.www(), vec3(4_f32, 4_f32, 4_f32));
+ assert_eq!(v.xx(), vec2(1_f32, 1_f32));
+ assert_eq!(v.xy(), vec2(1_f32, 2_f32));
+ assert_eq!(v.xz(), vec2(1_f32, 3_f32));
+ assert_eq!(v.xw(), vec2(1_f32, 4_f32));
+ assert_eq!(v.yx(), vec2(2_f32, 1_f32));
+ assert_eq!(v.yy(), vec2(2_f32, 2_f32));
+ assert_eq!(v.yz(), vec2(2_f32, 3_f32));
+ assert_eq!(v.yw(), vec2(2_f32, 4_f32));
+ assert_eq!(v.zx(), vec2(3_f32, 1_f32));
+ assert_eq!(v.zy(), vec2(3_f32, 2_f32));
+ assert_eq!(v.zz(), vec2(3_f32, 3_f32));
+ assert_eq!(v.zw(), vec2(3_f32, 4_f32));
+ assert_eq!(v.wx(), vec2(4_f32, 1_f32));
+ assert_eq!(v.wy(), vec2(4_f32, 2_f32));
+ assert_eq!(v.wz(), vec2(4_f32, 3_f32));
+ assert_eq!(v.ww(), vec2(4_f32, 4_f32));
+});
+
+glam_test!(test_vec3a_swizzles, {
+ let v = vec3a(1_f32, 2_f32, 3_f32);
+ assert_eq!(v, v.xyz());
+ assert_eq!(v.xxxx(), vec4(1_f32, 1_f32, 1_f32, 1_f32));
+ assert_eq!(v.xxxy(), vec4(1_f32, 1_f32, 1_f32, 2_f32));
+ assert_eq!(v.xxxz(), vec4(1_f32, 1_f32, 1_f32, 3_f32));
+ assert_eq!(v.xxyx(), vec4(1_f32, 1_f32, 2_f32, 1_f32));
+ assert_eq!(v.xxyy(), vec4(1_f32, 1_f32, 2_f32, 2_f32));
+ assert_eq!(v.xxyz(), vec4(1_f32, 1_f32, 2_f32, 3_f32));
+ assert_eq!(v.xxzx(), vec4(1_f32, 1_f32, 3_f32, 1_f32));
+ assert_eq!(v.xxzy(), vec4(1_f32, 1_f32, 3_f32, 2_f32));
+ assert_eq!(v.xxzz(), vec4(1_f32, 1_f32, 3_f32, 3_f32));
+ assert_eq!(v.xyxx(), vec4(1_f32, 2_f32, 1_f32, 1_f32));
+ assert_eq!(v.xyxy(), vec4(1_f32, 2_f32, 1_f32, 2_f32));
+ assert_eq!(v.xyxz(), vec4(1_f32, 2_f32, 1_f32, 3_f32));
+ assert_eq!(v.xyyx(), vec4(1_f32, 2_f32, 2_f32, 1_f32));
+ assert_eq!(v.xyyy(), vec4(1_f32, 2_f32, 2_f32, 2_f32));
+ assert_eq!(v.xyyz(), vec4(1_f32, 2_f32, 2_f32, 3_f32));
+ assert_eq!(v.xyzx(), vec4(1_f32, 2_f32, 3_f32, 1_f32));
+ assert_eq!(v.xyzy(), vec4(1_f32, 2_f32, 3_f32, 2_f32));
+ assert_eq!(v.xyzz(), vec4(1_f32, 2_f32, 3_f32, 3_f32));
+ assert_eq!(v.xzxx(), vec4(1_f32, 3_f32, 1_f32, 1_f32));
+ assert_eq!(v.xzxy(), vec4(1_f32, 3_f32, 1_f32, 2_f32));
+ assert_eq!(v.xzxz(), vec4(1_f32, 3_f32, 1_f32, 3_f32));
+ assert_eq!(v.xzyx(), vec4(1_f32, 3_f32, 2_f32, 1_f32));
+ assert_eq!(v.xzyy(), vec4(1_f32, 3_f32, 2_f32, 2_f32));
+ assert_eq!(v.xzyz(), vec4(1_f32, 3_f32, 2_f32, 3_f32));
+ assert_eq!(v.xzzx(), vec4(1_f32, 3_f32, 3_f32, 1_f32));
+ assert_eq!(v.xzzy(), vec4(1_f32, 3_f32, 3_f32, 2_f32));
+ assert_eq!(v.xzzz(), vec4(1_f32, 3_f32, 3_f32, 3_f32));
+ assert_eq!(v.yxxx(), vec4(2_f32, 1_f32, 1_f32, 1_f32));
+ assert_eq!(v.yxxy(), vec4(2_f32, 1_f32, 1_f32, 2_f32));
+ assert_eq!(v.yxxz(), vec4(2_f32, 1_f32, 1_f32, 3_f32));
+ assert_eq!(v.yxyx(), vec4(2_f32, 1_f32, 2_f32, 1_f32));
+ assert_eq!(v.yxyy(), vec4(2_f32, 1_f32, 2_f32, 2_f32));
+ assert_eq!(v.yxyz(), vec4(2_f32, 1_f32, 2_f32, 3_f32));
+ assert_eq!(v.yxzx(), vec4(2_f32, 1_f32, 3_f32, 1_f32));
+ assert_eq!(v.yxzy(), vec4(2_f32, 1_f32, 3_f32, 2_f32));
+ assert_eq!(v.yxzz(), vec4(2_f32, 1_f32, 3_f32, 3_f32));
+ assert_eq!(v.yyxx(), vec4(2_f32, 2_f32, 1_f32, 1_f32));
+ assert_eq!(v.yyxy(), vec4(2_f32, 2_f32, 1_f32, 2_f32));
+ assert_eq!(v.yyxz(), vec4(2_f32, 2_f32, 1_f32, 3_f32));
+ assert_eq!(v.yyyx(), vec4(2_f32, 2_f32, 2_f32, 1_f32));
+ assert_eq!(v.yyyy(), vec4(2_f32, 2_f32, 2_f32, 2_f32));
+ assert_eq!(v.yyyz(), vec4(2_f32, 2_f32, 2_f32, 3_f32));
+ assert_eq!(v.yyzx(), vec4(2_f32, 2_f32, 3_f32, 1_f32));
+ assert_eq!(v.yyzy(), vec4(2_f32, 2_f32, 3_f32, 2_f32));
+ assert_eq!(v.yyzz(), vec4(2_f32, 2_f32, 3_f32, 3_f32));
+ assert_eq!(v.yzxx(), vec4(2_f32, 3_f32, 1_f32, 1_f32));
+ assert_eq!(v.yzxy(), vec4(2_f32, 3_f32, 1_f32, 2_f32));
+ assert_eq!(v.yzxz(), vec4(2_f32, 3_f32, 1_f32, 3_f32));
+ assert_eq!(v.yzyx(), vec4(2_f32, 3_f32, 2_f32, 1_f32));
+ assert_eq!(v.yzyy(), vec4(2_f32, 3_f32, 2_f32, 2_f32));
+ assert_eq!(v.yzyz(), vec4(2_f32, 3_f32, 2_f32, 3_f32));
+ assert_eq!(v.yzzx(), vec4(2_f32, 3_f32, 3_f32, 1_f32));
+ assert_eq!(v.yzzy(), vec4(2_f32, 3_f32, 3_f32, 2_f32));
+ assert_eq!(v.yzzz(), vec4(2_f32, 3_f32, 3_f32, 3_f32));
+ assert_eq!(v.zxxx(), vec4(3_f32, 1_f32, 1_f32, 1_f32));
+ assert_eq!(v.zxxy(), vec4(3_f32, 1_f32, 1_f32, 2_f32));
+ assert_eq!(v.zxxz(), vec4(3_f32, 1_f32, 1_f32, 3_f32));
+ assert_eq!(v.zxyx(), vec4(3_f32, 1_f32, 2_f32, 1_f32));
+ assert_eq!(v.zxyy(), vec4(3_f32, 1_f32, 2_f32, 2_f32));
+ assert_eq!(v.zxyz(), vec4(3_f32, 1_f32, 2_f32, 3_f32));
+ assert_eq!(v.zxzx(), vec4(3_f32, 1_f32, 3_f32, 1_f32));
+ assert_eq!(v.zxzy(), vec4(3_f32, 1_f32, 3_f32, 2_f32));
+ assert_eq!(v.zxzz(), vec4(3_f32, 1_f32, 3_f32, 3_f32));
+ assert_eq!(v.zyxx(), vec4(3_f32, 2_f32, 1_f32, 1_f32));
+ assert_eq!(v.zyxy(), vec4(3_f32, 2_f32, 1_f32, 2_f32));
+ assert_eq!(v.zyxz(), vec4(3_f32, 2_f32, 1_f32, 3_f32));
+ assert_eq!(v.zyyx(), vec4(3_f32, 2_f32, 2_f32, 1_f32));
+ assert_eq!(v.zyyy(), vec4(3_f32, 2_f32, 2_f32, 2_f32));
+ assert_eq!(v.zyyz(), vec4(3_f32, 2_f32, 2_f32, 3_f32));
+ assert_eq!(v.zyzx(), vec4(3_f32, 2_f32, 3_f32, 1_f32));
+ assert_eq!(v.zyzy(), vec4(3_f32, 2_f32, 3_f32, 2_f32));
+ assert_eq!(v.zyzz(), vec4(3_f32, 2_f32, 3_f32, 3_f32));
+ assert_eq!(v.zzxx(), vec4(3_f32, 3_f32, 1_f32, 1_f32));
+ assert_eq!(v.zzxy(), vec4(3_f32, 3_f32, 1_f32, 2_f32));
+ assert_eq!(v.zzxz(), vec4(3_f32, 3_f32, 1_f32, 3_f32));
+ assert_eq!(v.zzyx(), vec4(3_f32, 3_f32, 2_f32, 1_f32));
+ assert_eq!(v.zzyy(), vec4(3_f32, 3_f32, 2_f32, 2_f32));
+ assert_eq!(v.zzyz(), vec4(3_f32, 3_f32, 2_f32, 3_f32));
+ assert_eq!(v.zzzx(), vec4(3_f32, 3_f32, 3_f32, 1_f32));
+ assert_eq!(v.zzzy(), vec4(3_f32, 3_f32, 3_f32, 2_f32));
+ assert_eq!(v.zzzz(), vec4(3_f32, 3_f32, 3_f32, 3_f32));
+ assert_eq!(v.xxx(), vec3a(1_f32, 1_f32, 1_f32));
+ assert_eq!(v.xxy(), vec3a(1_f32, 1_f32, 2_f32));
+ assert_eq!(v.xxz(), vec3a(1_f32, 1_f32, 3_f32));
+ assert_eq!(v.xyx(), vec3a(1_f32, 2_f32, 1_f32));
+ assert_eq!(v.xyy(), vec3a(1_f32, 2_f32, 2_f32));
+ assert_eq!(v.xzx(), vec3a(1_f32, 3_f32, 1_f32));
+ assert_eq!(v.xzy(), vec3a(1_f32, 3_f32, 2_f32));
+ assert_eq!(v.xzz(), vec3a(1_f32, 3_f32, 3_f32));
+ assert_eq!(v.yxx(), vec3a(2_f32, 1_f32, 1_f32));
+ assert_eq!(v.yxy(), vec3a(2_f32, 1_f32, 2_f32));
+ assert_eq!(v.yxz(), vec3a(2_f32, 1_f32, 3_f32));
+ assert_eq!(v.yyx(), vec3a(2_f32, 2_f32, 1_f32));
+ assert_eq!(v.yyy(), vec3a(2_f32, 2_f32, 2_f32));
+ assert_eq!(v.yyz(), vec3a(2_f32, 2_f32, 3_f32));
+ assert_eq!(v.yzx(), vec3a(2_f32, 3_f32, 1_f32));
+ assert_eq!(v.yzy(), vec3a(2_f32, 3_f32, 2_f32));
+ assert_eq!(v.yzz(), vec3a(2_f32, 3_f32, 3_f32));
+ assert_eq!(v.zxx(), vec3a(3_f32, 1_f32, 1_f32));
+ assert_eq!(v.zxy(), vec3a(3_f32, 1_f32, 2_f32));
+ assert_eq!(v.zxz(), vec3a(3_f32, 1_f32, 3_f32));
+ assert_eq!(v.zyx(), vec3a(3_f32, 2_f32, 1_f32));
+ assert_eq!(v.zyy(), vec3a(3_f32, 2_f32, 2_f32));
+ assert_eq!(v.zyz(), vec3a(3_f32, 2_f32, 3_f32));
+ assert_eq!(v.zzx(), vec3a(3_f32, 3_f32, 1_f32));
+ assert_eq!(v.zzy(), vec3a(3_f32, 3_f32, 2_f32));
+ assert_eq!(v.zzz(), vec3a(3_f32, 3_f32, 3_f32));
+ assert_eq!(v.xx(), vec2(1_f32, 1_f32));
+ assert_eq!(v.xy(), vec2(1_f32, 2_f32));
+ assert_eq!(v.xz(), vec2(1_f32, 3_f32));
+ assert_eq!(v.yx(), vec2(2_f32, 1_f32));
+ assert_eq!(v.yy(), vec2(2_f32, 2_f32));
+ assert_eq!(v.yz(), vec2(2_f32, 3_f32));
+ assert_eq!(v.zx(), vec2(3_f32, 1_f32));
+ assert_eq!(v.zy(), vec2(3_f32, 2_f32));
+ assert_eq!(v.zz(), vec2(3_f32, 3_f32));
+});
+
+glam_test!(test_vec3_swizzles, {
+ let v = vec3(1_f32, 2_f32, 3_f32);
+ assert_eq!(v, v.xyz());
+ assert_eq!(v.xxxx(), vec4(1_f32, 1_f32, 1_f32, 1_f32));
+ assert_eq!(v.xxxy(), vec4(1_f32, 1_f32, 1_f32, 2_f32));
+ assert_eq!(v.xxxz(), vec4(1_f32, 1_f32, 1_f32, 3_f32));
+ assert_eq!(v.xxyx(), vec4(1_f32, 1_f32, 2_f32, 1_f32));
+ assert_eq!(v.xxyy(), vec4(1_f32, 1_f32, 2_f32, 2_f32));
+ assert_eq!(v.xxyz(), vec4(1_f32, 1_f32, 2_f32, 3_f32));
+ assert_eq!(v.xxzx(), vec4(1_f32, 1_f32, 3_f32, 1_f32));
+ assert_eq!(v.xxzy(), vec4(1_f32, 1_f32, 3_f32, 2_f32));
+ assert_eq!(v.xxzz(), vec4(1_f32, 1_f32, 3_f32, 3_f32));
+ assert_eq!(v.xyxx(), vec4(1_f32, 2_f32, 1_f32, 1_f32));
+ assert_eq!(v.xyxy(), vec4(1_f32, 2_f32, 1_f32, 2_f32));
+ assert_eq!(v.xyxz(), vec4(1_f32, 2_f32, 1_f32, 3_f32));
+ assert_eq!(v.xyyx(), vec4(1_f32, 2_f32, 2_f32, 1_f32));
+ assert_eq!(v.xyyy(), vec4(1_f32, 2_f32, 2_f32, 2_f32));
+ assert_eq!(v.xyyz(), vec4(1_f32, 2_f32, 2_f32, 3_f32));
+ assert_eq!(v.xyzx(), vec4(1_f32, 2_f32, 3_f32, 1_f32));
+ assert_eq!(v.xyzy(), vec4(1_f32, 2_f32, 3_f32, 2_f32));
+ assert_eq!(v.xyzz(), vec4(1_f32, 2_f32, 3_f32, 3_f32));
+ assert_eq!(v.xzxx(), vec4(1_f32, 3_f32, 1_f32, 1_f32));
+ assert_eq!(v.xzxy(), vec4(1_f32, 3_f32, 1_f32, 2_f32));
+ assert_eq!(v.xzxz(), vec4(1_f32, 3_f32, 1_f32, 3_f32));
+ assert_eq!(v.xzyx(), vec4(1_f32, 3_f32, 2_f32, 1_f32));
+ assert_eq!(v.xzyy(), vec4(1_f32, 3_f32, 2_f32, 2_f32));
+ assert_eq!(v.xzyz(), vec4(1_f32, 3_f32, 2_f32, 3_f32));
+ assert_eq!(v.xzzx(), vec4(1_f32, 3_f32, 3_f32, 1_f32));
+ assert_eq!(v.xzzy(), vec4(1_f32, 3_f32, 3_f32, 2_f32));
+ assert_eq!(v.xzzz(), vec4(1_f32, 3_f32, 3_f32, 3_f32));
+ assert_eq!(v.yxxx(), vec4(2_f32, 1_f32, 1_f32, 1_f32));
+ assert_eq!(v.yxxy(), vec4(2_f32, 1_f32, 1_f32, 2_f32));
+ assert_eq!(v.yxxz(), vec4(2_f32, 1_f32, 1_f32, 3_f32));
+ assert_eq!(v.yxyx(), vec4(2_f32, 1_f32, 2_f32, 1_f32));
+ assert_eq!(v.yxyy(), vec4(2_f32, 1_f32, 2_f32, 2_f32));
+ assert_eq!(v.yxyz(), vec4(2_f32, 1_f32, 2_f32, 3_f32));
+ assert_eq!(v.yxzx(), vec4(2_f32, 1_f32, 3_f32, 1_f32));
+ assert_eq!(v.yxzy(), vec4(2_f32, 1_f32, 3_f32, 2_f32));
+ assert_eq!(v.yxzz(), vec4(2_f32, 1_f32, 3_f32, 3_f32));
+ assert_eq!(v.yyxx(), vec4(2_f32, 2_f32, 1_f32, 1_f32));
+ assert_eq!(v.yyxy(), vec4(2_f32, 2_f32, 1_f32, 2_f32));
+ assert_eq!(v.yyxz(), vec4(2_f32, 2_f32, 1_f32, 3_f32));
+ assert_eq!(v.yyyx(), vec4(2_f32, 2_f32, 2_f32, 1_f32));
+ assert_eq!(v.yyyy(), vec4(2_f32, 2_f32, 2_f32, 2_f32));
+ assert_eq!(v.yyyz(), vec4(2_f32, 2_f32, 2_f32, 3_f32));
+ assert_eq!(v.yyzx(), vec4(2_f32, 2_f32, 3_f32, 1_f32));
+ assert_eq!(v.yyzy(), vec4(2_f32, 2_f32, 3_f32, 2_f32));
+ assert_eq!(v.yyzz(), vec4(2_f32, 2_f32, 3_f32, 3_f32));
+ assert_eq!(v.yzxx(), vec4(2_f32, 3_f32, 1_f32, 1_f32));
+ assert_eq!(v.yzxy(), vec4(2_f32, 3_f32, 1_f32, 2_f32));
+ assert_eq!(v.yzxz(), vec4(2_f32, 3_f32, 1_f32, 3_f32));
+ assert_eq!(v.yzyx(), vec4(2_f32, 3_f32, 2_f32, 1_f32));
+ assert_eq!(v.yzyy(), vec4(2_f32, 3_f32, 2_f32, 2_f32));
+ assert_eq!(v.yzyz(), vec4(2_f32, 3_f32, 2_f32, 3_f32));
+ assert_eq!(v.yzzx(), vec4(2_f32, 3_f32, 3_f32, 1_f32));
+ assert_eq!(v.yzzy(), vec4(2_f32, 3_f32, 3_f32, 2_f32));
+ assert_eq!(v.yzzz(), vec4(2_f32, 3_f32, 3_f32, 3_f32));
+ assert_eq!(v.zxxx(), vec4(3_f32, 1_f32, 1_f32, 1_f32));
+ assert_eq!(v.zxxy(), vec4(3_f32, 1_f32, 1_f32, 2_f32));
+ assert_eq!(v.zxxz(), vec4(3_f32, 1_f32, 1_f32, 3_f32));
+ assert_eq!(v.zxyx(), vec4(3_f32, 1_f32, 2_f32, 1_f32));
+ assert_eq!(v.zxyy(), vec4(3_f32, 1_f32, 2_f32, 2_f32));
+ assert_eq!(v.zxyz(), vec4(3_f32, 1_f32, 2_f32, 3_f32));
+ assert_eq!(v.zxzx(), vec4(3_f32, 1_f32, 3_f32, 1_f32));
+ assert_eq!(v.zxzy(), vec4(3_f32, 1_f32, 3_f32, 2_f32));
+ assert_eq!(v.zxzz(), vec4(3_f32, 1_f32, 3_f32, 3_f32));
+ assert_eq!(v.zyxx(), vec4(3_f32, 2_f32, 1_f32, 1_f32));
+ assert_eq!(v.zyxy(), vec4(3_f32, 2_f32, 1_f32, 2_f32));
+ assert_eq!(v.zyxz(), vec4(3_f32, 2_f32, 1_f32, 3_f32));
+ assert_eq!(v.zyyx(), vec4(3_f32, 2_f32, 2_f32, 1_f32));
+ assert_eq!(v.zyyy(), vec4(3_f32, 2_f32, 2_f32, 2_f32));
+ assert_eq!(v.zyyz(), vec4(3_f32, 2_f32, 2_f32, 3_f32));
+ assert_eq!(v.zyzx(), vec4(3_f32, 2_f32, 3_f32, 1_f32));
+ assert_eq!(v.zyzy(), vec4(3_f32, 2_f32, 3_f32, 2_f32));
+ assert_eq!(v.zyzz(), vec4(3_f32, 2_f32, 3_f32, 3_f32));
+ assert_eq!(v.zzxx(), vec4(3_f32, 3_f32, 1_f32, 1_f32));
+ assert_eq!(v.zzxy(), vec4(3_f32, 3_f32, 1_f32, 2_f32));
+ assert_eq!(v.zzxz(), vec4(3_f32, 3_f32, 1_f32, 3_f32));
+ assert_eq!(v.zzyx(), vec4(3_f32, 3_f32, 2_f32, 1_f32));
+ assert_eq!(v.zzyy(), vec4(3_f32, 3_f32, 2_f32, 2_f32));
+ assert_eq!(v.zzyz(), vec4(3_f32, 3_f32, 2_f32, 3_f32));
+ assert_eq!(v.zzzx(), vec4(3_f32, 3_f32, 3_f32, 1_f32));
+ assert_eq!(v.zzzy(), vec4(3_f32, 3_f32, 3_f32, 2_f32));
+ assert_eq!(v.zzzz(), vec4(3_f32, 3_f32, 3_f32, 3_f32));
+ assert_eq!(v.xxx(), vec3(1_f32, 1_f32, 1_f32));
+ assert_eq!(v.xxy(), vec3(1_f32, 1_f32, 2_f32));
+ assert_eq!(v.xxz(), vec3(1_f32, 1_f32, 3_f32));
+ assert_eq!(v.xyx(), vec3(1_f32, 2_f32, 1_f32));
+ assert_eq!(v.xyy(), vec3(1_f32, 2_f32, 2_f32));
+ assert_eq!(v.xzx(), vec3(1_f32, 3_f32, 1_f32));
+ assert_eq!(v.xzy(), vec3(1_f32, 3_f32, 2_f32));
+ assert_eq!(v.xzz(), vec3(1_f32, 3_f32, 3_f32));
+ assert_eq!(v.yxx(), vec3(2_f32, 1_f32, 1_f32));
+ assert_eq!(v.yxy(), vec3(2_f32, 1_f32, 2_f32));
+ assert_eq!(v.yxz(), vec3(2_f32, 1_f32, 3_f32));
+ assert_eq!(v.yyx(), vec3(2_f32, 2_f32, 1_f32));
+ assert_eq!(v.yyy(), vec3(2_f32, 2_f32, 2_f32));
+ assert_eq!(v.yyz(), vec3(2_f32, 2_f32, 3_f32));
+ assert_eq!(v.yzx(), vec3(2_f32, 3_f32, 1_f32));
+ assert_eq!(v.yzy(), vec3(2_f32, 3_f32, 2_f32));
+ assert_eq!(v.yzz(), vec3(2_f32, 3_f32, 3_f32));
+ assert_eq!(v.zxx(), vec3(3_f32, 1_f32, 1_f32));
+ assert_eq!(v.zxy(), vec3(3_f32, 1_f32, 2_f32));
+ assert_eq!(v.zxz(), vec3(3_f32, 1_f32, 3_f32));
+ assert_eq!(v.zyx(), vec3(3_f32, 2_f32, 1_f32));
+ assert_eq!(v.zyy(), vec3(3_f32, 2_f32, 2_f32));
+ assert_eq!(v.zyz(), vec3(3_f32, 2_f32, 3_f32));
+ assert_eq!(v.zzx(), vec3(3_f32, 3_f32, 1_f32));
+ assert_eq!(v.zzy(), vec3(3_f32, 3_f32, 2_f32));
+ assert_eq!(v.zzz(), vec3(3_f32, 3_f32, 3_f32));
+ assert_eq!(v.xx(), vec2(1_f32, 1_f32));
+ assert_eq!(v.xy(), vec2(1_f32, 2_f32));
+ assert_eq!(v.xz(), vec2(1_f32, 3_f32));
+ assert_eq!(v.yx(), vec2(2_f32, 1_f32));
+ assert_eq!(v.yy(), vec2(2_f32, 2_f32));
+ assert_eq!(v.yz(), vec2(2_f32, 3_f32));
+ assert_eq!(v.zx(), vec2(3_f32, 1_f32));
+ assert_eq!(v.zy(), vec2(3_f32, 2_f32));
+ assert_eq!(v.zz(), vec2(3_f32, 3_f32));
+});
+
+glam_test!(test_vec2_swizzles, {
+ let v = vec2(1_f32, 2_f32);
+ assert_eq!(v, v.xy());
+ assert_eq!(v.xxxx(), vec4(1_f32, 1_f32, 1_f32, 1_f32));
+ assert_eq!(v.xxxy(), vec4(1_f32, 1_f32, 1_f32, 2_f32));
+ assert_eq!(v.xxyx(), vec4(1_f32, 1_f32, 2_f32, 1_f32));
+ assert_eq!(v.xxyy(), vec4(1_f32, 1_f32, 2_f32, 2_f32));
+ assert_eq!(v.xyxx(), vec4(1_f32, 2_f32, 1_f32, 1_f32));
+ assert_eq!(v.xyxy(), vec4(1_f32, 2_f32, 1_f32, 2_f32));
+ assert_eq!(v.xyyx(), vec4(1_f32, 2_f32, 2_f32, 1_f32));
+ assert_eq!(v.xyyy(), vec4(1_f32, 2_f32, 2_f32, 2_f32));
+ assert_eq!(v.yxxx(), vec4(2_f32, 1_f32, 1_f32, 1_f32));
+ assert_eq!(v.yxxy(), vec4(2_f32, 1_f32, 1_f32, 2_f32));
+ assert_eq!(v.yxyx(), vec4(2_f32, 1_f32, 2_f32, 1_f32));
+ assert_eq!(v.yxyy(), vec4(2_f32, 1_f32, 2_f32, 2_f32));
+ assert_eq!(v.yyxx(), vec4(2_f32, 2_f32, 1_f32, 1_f32));
+ assert_eq!(v.yyxy(), vec4(2_f32, 2_f32, 1_f32, 2_f32));
+ assert_eq!(v.yyyx(), vec4(2_f32, 2_f32, 2_f32, 1_f32));
+ assert_eq!(v.yyyy(), vec4(2_f32, 2_f32, 2_f32, 2_f32));
+ assert_eq!(v.xxx(), vec3(1_f32, 1_f32, 1_f32));
+ assert_eq!(v.xxy(), vec3(1_f32, 1_f32, 2_f32));
+ assert_eq!(v.xyx(), vec3(1_f32, 2_f32, 1_f32));
+ assert_eq!(v.xyy(), vec3(1_f32, 2_f32, 2_f32));
+ assert_eq!(v.yxx(), vec3(2_f32, 1_f32, 1_f32));
+ assert_eq!(v.yxy(), vec3(2_f32, 1_f32, 2_f32));
+ assert_eq!(v.yyx(), vec3(2_f32, 2_f32, 1_f32));
+ assert_eq!(v.yyy(), vec3(2_f32, 2_f32, 2_f32));
+ assert_eq!(v.xx(), vec2(1_f32, 1_f32));
+ assert_eq!(v.yx(), vec2(2_f32, 1_f32));
+ assert_eq!(v.yy(), vec2(2_f32, 2_f32));
+});
diff --git a/tests/swizzles_f64.rs b/tests/swizzles_f64.rs
new file mode 100644
index 0000000..bcc0649
--- /dev/null
+++ b/tests/swizzles_f64.rs
@@ -0,0 +1,501 @@
+// Generated by swizzlegen. Do not edit.
+#[macro_use]
+mod support;
+use glam::*;
+
+glam_test!(test_dvec4_swizzles, {
+ let v = dvec4(1_f64, 2_f64, 3_f64, 4_f64);
+ assert_eq!(v, v.xyzw());
+ assert_eq!(v.xxxx(), dvec4(1_f64, 1_f64, 1_f64, 1_f64));
+ assert_eq!(v.xxxy(), dvec4(1_f64, 1_f64, 1_f64, 2_f64));
+ assert_eq!(v.xxxz(), dvec4(1_f64, 1_f64, 1_f64, 3_f64));
+ assert_eq!(v.xxxw(), dvec4(1_f64, 1_f64, 1_f64, 4_f64));
+ assert_eq!(v.xxyx(), dvec4(1_f64, 1_f64, 2_f64, 1_f64));
+ assert_eq!(v.xxyy(), dvec4(1_f64, 1_f64, 2_f64, 2_f64));
+ assert_eq!(v.xxyz(), dvec4(1_f64, 1_f64, 2_f64, 3_f64));
+ assert_eq!(v.xxyw(), dvec4(1_f64, 1_f64, 2_f64, 4_f64));
+ assert_eq!(v.xxzx(), dvec4(1_f64, 1_f64, 3_f64, 1_f64));
+ assert_eq!(v.xxzy(), dvec4(1_f64, 1_f64, 3_f64, 2_f64));
+ assert_eq!(v.xxzz(), dvec4(1_f64, 1_f64, 3_f64, 3_f64));
+ assert_eq!(v.xxzw(), dvec4(1_f64, 1_f64, 3_f64, 4_f64));
+ assert_eq!(v.xxwx(), dvec4(1_f64, 1_f64, 4_f64, 1_f64));
+ assert_eq!(v.xxwy(), dvec4(1_f64, 1_f64, 4_f64, 2_f64));
+ assert_eq!(v.xxwz(), dvec4(1_f64, 1_f64, 4_f64, 3_f64));
+ assert_eq!(v.xxww(), dvec4(1_f64, 1_f64, 4_f64, 4_f64));
+ assert_eq!(v.xyxx(), dvec4(1_f64, 2_f64, 1_f64, 1_f64));
+ assert_eq!(v.xyxy(), dvec4(1_f64, 2_f64, 1_f64, 2_f64));
+ assert_eq!(v.xyxz(), dvec4(1_f64, 2_f64, 1_f64, 3_f64));
+ assert_eq!(v.xyxw(), dvec4(1_f64, 2_f64, 1_f64, 4_f64));
+ assert_eq!(v.xyyx(), dvec4(1_f64, 2_f64, 2_f64, 1_f64));
+ assert_eq!(v.xyyy(), dvec4(1_f64, 2_f64, 2_f64, 2_f64));
+ assert_eq!(v.xyyz(), dvec4(1_f64, 2_f64, 2_f64, 3_f64));
+ assert_eq!(v.xyyw(), dvec4(1_f64, 2_f64, 2_f64, 4_f64));
+ assert_eq!(v.xyzx(), dvec4(1_f64, 2_f64, 3_f64, 1_f64));
+ assert_eq!(v.xyzy(), dvec4(1_f64, 2_f64, 3_f64, 2_f64));
+ assert_eq!(v.xyzz(), dvec4(1_f64, 2_f64, 3_f64, 3_f64));
+ assert_eq!(v.xywx(), dvec4(1_f64, 2_f64, 4_f64, 1_f64));
+ assert_eq!(v.xywy(), dvec4(1_f64, 2_f64, 4_f64, 2_f64));
+ assert_eq!(v.xywz(), dvec4(1_f64, 2_f64, 4_f64, 3_f64));
+ assert_eq!(v.xyww(), dvec4(1_f64, 2_f64, 4_f64, 4_f64));
+ assert_eq!(v.xzxx(), dvec4(1_f64, 3_f64, 1_f64, 1_f64));
+ assert_eq!(v.xzxy(), dvec4(1_f64, 3_f64, 1_f64, 2_f64));
+ assert_eq!(v.xzxz(), dvec4(1_f64, 3_f64, 1_f64, 3_f64));
+ assert_eq!(v.xzxw(), dvec4(1_f64, 3_f64, 1_f64, 4_f64));
+ assert_eq!(v.xzyx(), dvec4(1_f64, 3_f64, 2_f64, 1_f64));
+ assert_eq!(v.xzyy(), dvec4(1_f64, 3_f64, 2_f64, 2_f64));
+ assert_eq!(v.xzyz(), dvec4(1_f64, 3_f64, 2_f64, 3_f64));
+ assert_eq!(v.xzyw(), dvec4(1_f64, 3_f64, 2_f64, 4_f64));
+ assert_eq!(v.xzzx(), dvec4(1_f64, 3_f64, 3_f64, 1_f64));
+ assert_eq!(v.xzzy(), dvec4(1_f64, 3_f64, 3_f64, 2_f64));
+ assert_eq!(v.xzzz(), dvec4(1_f64, 3_f64, 3_f64, 3_f64));
+ assert_eq!(v.xzzw(), dvec4(1_f64, 3_f64, 3_f64, 4_f64));
+ assert_eq!(v.xzwx(), dvec4(1_f64, 3_f64, 4_f64, 1_f64));
+ assert_eq!(v.xzwy(), dvec4(1_f64, 3_f64, 4_f64, 2_f64));
+ assert_eq!(v.xzwz(), dvec4(1_f64, 3_f64, 4_f64, 3_f64));
+ assert_eq!(v.xzww(), dvec4(1_f64, 3_f64, 4_f64, 4_f64));
+ assert_eq!(v.xwxx(), dvec4(1_f64, 4_f64, 1_f64, 1_f64));
+ assert_eq!(v.xwxy(), dvec4(1_f64, 4_f64, 1_f64, 2_f64));
+ assert_eq!(v.xwxz(), dvec4(1_f64, 4_f64, 1_f64, 3_f64));
+ assert_eq!(v.xwxw(), dvec4(1_f64, 4_f64, 1_f64, 4_f64));
+ assert_eq!(v.xwyx(), dvec4(1_f64, 4_f64, 2_f64, 1_f64));
+ assert_eq!(v.xwyy(), dvec4(1_f64, 4_f64, 2_f64, 2_f64));
+ assert_eq!(v.xwyz(), dvec4(1_f64, 4_f64, 2_f64, 3_f64));
+ assert_eq!(v.xwyw(), dvec4(1_f64, 4_f64, 2_f64, 4_f64));
+ assert_eq!(v.xwzx(), dvec4(1_f64, 4_f64, 3_f64, 1_f64));
+ assert_eq!(v.xwzy(), dvec4(1_f64, 4_f64, 3_f64, 2_f64));
+ assert_eq!(v.xwzz(), dvec4(1_f64, 4_f64, 3_f64, 3_f64));
+ assert_eq!(v.xwzw(), dvec4(1_f64, 4_f64, 3_f64, 4_f64));
+ assert_eq!(v.xwwx(), dvec4(1_f64, 4_f64, 4_f64, 1_f64));
+ assert_eq!(v.xwwy(), dvec4(1_f64, 4_f64, 4_f64, 2_f64));
+ assert_eq!(v.xwwz(), dvec4(1_f64, 4_f64, 4_f64, 3_f64));
+ assert_eq!(v.xwww(), dvec4(1_f64, 4_f64, 4_f64, 4_f64));
+ assert_eq!(v.yxxx(), dvec4(2_f64, 1_f64, 1_f64, 1_f64));
+ assert_eq!(v.yxxy(), dvec4(2_f64, 1_f64, 1_f64, 2_f64));
+ assert_eq!(v.yxxz(), dvec4(2_f64, 1_f64, 1_f64, 3_f64));
+ assert_eq!(v.yxxw(), dvec4(2_f64, 1_f64, 1_f64, 4_f64));
+ assert_eq!(v.yxyx(), dvec4(2_f64, 1_f64, 2_f64, 1_f64));
+ assert_eq!(v.yxyy(), dvec4(2_f64, 1_f64, 2_f64, 2_f64));
+ assert_eq!(v.yxyz(), dvec4(2_f64, 1_f64, 2_f64, 3_f64));
+ assert_eq!(v.yxyw(), dvec4(2_f64, 1_f64, 2_f64, 4_f64));
+ assert_eq!(v.yxzx(), dvec4(2_f64, 1_f64, 3_f64, 1_f64));
+ assert_eq!(v.yxzy(), dvec4(2_f64, 1_f64, 3_f64, 2_f64));
+ assert_eq!(v.yxzz(), dvec4(2_f64, 1_f64, 3_f64, 3_f64));
+ assert_eq!(v.yxzw(), dvec4(2_f64, 1_f64, 3_f64, 4_f64));
+ assert_eq!(v.yxwx(), dvec4(2_f64, 1_f64, 4_f64, 1_f64));
+ assert_eq!(v.yxwy(), dvec4(2_f64, 1_f64, 4_f64, 2_f64));
+ assert_eq!(v.yxwz(), dvec4(2_f64, 1_f64, 4_f64, 3_f64));
+ assert_eq!(v.yxww(), dvec4(2_f64, 1_f64, 4_f64, 4_f64));
+ assert_eq!(v.yyxx(), dvec4(2_f64, 2_f64, 1_f64, 1_f64));
+ assert_eq!(v.yyxy(), dvec4(2_f64, 2_f64, 1_f64, 2_f64));
+ assert_eq!(v.yyxz(), dvec4(2_f64, 2_f64, 1_f64, 3_f64));
+ assert_eq!(v.yyxw(), dvec4(2_f64, 2_f64, 1_f64, 4_f64));
+ assert_eq!(v.yyyx(), dvec4(2_f64, 2_f64, 2_f64, 1_f64));
+ assert_eq!(v.yyyy(), dvec4(2_f64, 2_f64, 2_f64, 2_f64));
+ assert_eq!(v.yyyz(), dvec4(2_f64, 2_f64, 2_f64, 3_f64));
+ assert_eq!(v.yyyw(), dvec4(2_f64, 2_f64, 2_f64, 4_f64));
+ assert_eq!(v.yyzx(), dvec4(2_f64, 2_f64, 3_f64, 1_f64));
+ assert_eq!(v.yyzy(), dvec4(2_f64, 2_f64, 3_f64, 2_f64));
+ assert_eq!(v.yyzz(), dvec4(2_f64, 2_f64, 3_f64, 3_f64));
+ assert_eq!(v.yyzw(), dvec4(2_f64, 2_f64, 3_f64, 4_f64));
+ assert_eq!(v.yywx(), dvec4(2_f64, 2_f64, 4_f64, 1_f64));
+ assert_eq!(v.yywy(), dvec4(2_f64, 2_f64, 4_f64, 2_f64));
+ assert_eq!(v.yywz(), dvec4(2_f64, 2_f64, 4_f64, 3_f64));
+ assert_eq!(v.yyww(), dvec4(2_f64, 2_f64, 4_f64, 4_f64));
+ assert_eq!(v.yzxx(), dvec4(2_f64, 3_f64, 1_f64, 1_f64));
+ assert_eq!(v.yzxy(), dvec4(2_f64, 3_f64, 1_f64, 2_f64));
+ assert_eq!(v.yzxz(), dvec4(2_f64, 3_f64, 1_f64, 3_f64));
+ assert_eq!(v.yzxw(), dvec4(2_f64, 3_f64, 1_f64, 4_f64));
+ assert_eq!(v.yzyx(), dvec4(2_f64, 3_f64, 2_f64, 1_f64));
+ assert_eq!(v.yzyy(), dvec4(2_f64, 3_f64, 2_f64, 2_f64));
+ assert_eq!(v.yzyz(), dvec4(2_f64, 3_f64, 2_f64, 3_f64));
+ assert_eq!(v.yzyw(), dvec4(2_f64, 3_f64, 2_f64, 4_f64));
+ assert_eq!(v.yzzx(), dvec4(2_f64, 3_f64, 3_f64, 1_f64));
+ assert_eq!(v.yzzy(), dvec4(2_f64, 3_f64, 3_f64, 2_f64));
+ assert_eq!(v.yzzz(), dvec4(2_f64, 3_f64, 3_f64, 3_f64));
+ assert_eq!(v.yzzw(), dvec4(2_f64, 3_f64, 3_f64, 4_f64));
+ assert_eq!(v.yzwx(), dvec4(2_f64, 3_f64, 4_f64, 1_f64));
+ assert_eq!(v.yzwy(), dvec4(2_f64, 3_f64, 4_f64, 2_f64));
+ assert_eq!(v.yzwz(), dvec4(2_f64, 3_f64, 4_f64, 3_f64));
+ assert_eq!(v.yzww(), dvec4(2_f64, 3_f64, 4_f64, 4_f64));
+ assert_eq!(v.ywxx(), dvec4(2_f64, 4_f64, 1_f64, 1_f64));
+ assert_eq!(v.ywxy(), dvec4(2_f64, 4_f64, 1_f64, 2_f64));
+ assert_eq!(v.ywxz(), dvec4(2_f64, 4_f64, 1_f64, 3_f64));
+ assert_eq!(v.ywxw(), dvec4(2_f64, 4_f64, 1_f64, 4_f64));
+ assert_eq!(v.ywyx(), dvec4(2_f64, 4_f64, 2_f64, 1_f64));
+ assert_eq!(v.ywyy(), dvec4(2_f64, 4_f64, 2_f64, 2_f64));
+ assert_eq!(v.ywyz(), dvec4(2_f64, 4_f64, 2_f64, 3_f64));
+ assert_eq!(v.ywyw(), dvec4(2_f64, 4_f64, 2_f64, 4_f64));
+ assert_eq!(v.ywzx(), dvec4(2_f64, 4_f64, 3_f64, 1_f64));
+ assert_eq!(v.ywzy(), dvec4(2_f64, 4_f64, 3_f64, 2_f64));
+ assert_eq!(v.ywzz(), dvec4(2_f64, 4_f64, 3_f64, 3_f64));
+ assert_eq!(v.ywzw(), dvec4(2_f64, 4_f64, 3_f64, 4_f64));
+ assert_eq!(v.ywwx(), dvec4(2_f64, 4_f64, 4_f64, 1_f64));
+ assert_eq!(v.ywwy(), dvec4(2_f64, 4_f64, 4_f64, 2_f64));
+ assert_eq!(v.ywwz(), dvec4(2_f64, 4_f64, 4_f64, 3_f64));
+ assert_eq!(v.ywww(), dvec4(2_f64, 4_f64, 4_f64, 4_f64));
+ assert_eq!(v.zxxx(), dvec4(3_f64, 1_f64, 1_f64, 1_f64));
+ assert_eq!(v.zxxy(), dvec4(3_f64, 1_f64, 1_f64, 2_f64));
+ assert_eq!(v.zxxz(), dvec4(3_f64, 1_f64, 1_f64, 3_f64));
+ assert_eq!(v.zxxw(), dvec4(3_f64, 1_f64, 1_f64, 4_f64));
+ assert_eq!(v.zxyx(), dvec4(3_f64, 1_f64, 2_f64, 1_f64));
+ assert_eq!(v.zxyy(), dvec4(3_f64, 1_f64, 2_f64, 2_f64));
+ assert_eq!(v.zxyz(), dvec4(3_f64, 1_f64, 2_f64, 3_f64));
+ assert_eq!(v.zxyw(), dvec4(3_f64, 1_f64, 2_f64, 4_f64));
+ assert_eq!(v.zxzx(), dvec4(3_f64, 1_f64, 3_f64, 1_f64));
+ assert_eq!(v.zxzy(), dvec4(3_f64, 1_f64, 3_f64, 2_f64));
+ assert_eq!(v.zxzz(), dvec4(3_f64, 1_f64, 3_f64, 3_f64));
+ assert_eq!(v.zxzw(), dvec4(3_f64, 1_f64, 3_f64, 4_f64));
+ assert_eq!(v.zxwx(), dvec4(3_f64, 1_f64, 4_f64, 1_f64));
+ assert_eq!(v.zxwy(), dvec4(3_f64, 1_f64, 4_f64, 2_f64));
+ assert_eq!(v.zxwz(), dvec4(3_f64, 1_f64, 4_f64, 3_f64));
+ assert_eq!(v.zxww(), dvec4(3_f64, 1_f64, 4_f64, 4_f64));
+ assert_eq!(v.zyxx(), dvec4(3_f64, 2_f64, 1_f64, 1_f64));
+ assert_eq!(v.zyxy(), dvec4(3_f64, 2_f64, 1_f64, 2_f64));
+ assert_eq!(v.zyxz(), dvec4(3_f64, 2_f64, 1_f64, 3_f64));
+ assert_eq!(v.zyxw(), dvec4(3_f64, 2_f64, 1_f64, 4_f64));
+ assert_eq!(v.zyyx(), dvec4(3_f64, 2_f64, 2_f64, 1_f64));
+ assert_eq!(v.zyyy(), dvec4(3_f64, 2_f64, 2_f64, 2_f64));
+ assert_eq!(v.zyyz(), dvec4(3_f64, 2_f64, 2_f64, 3_f64));
+ assert_eq!(v.zyyw(), dvec4(3_f64, 2_f64, 2_f64, 4_f64));
+ assert_eq!(v.zyzx(), dvec4(3_f64, 2_f64, 3_f64, 1_f64));
+ assert_eq!(v.zyzy(), dvec4(3_f64, 2_f64, 3_f64, 2_f64));
+ assert_eq!(v.zyzz(), dvec4(3_f64, 2_f64, 3_f64, 3_f64));
+ assert_eq!(v.zyzw(), dvec4(3_f64, 2_f64, 3_f64, 4_f64));
+ assert_eq!(v.zywx(), dvec4(3_f64, 2_f64, 4_f64, 1_f64));
+ assert_eq!(v.zywy(), dvec4(3_f64, 2_f64, 4_f64, 2_f64));
+ assert_eq!(v.zywz(), dvec4(3_f64, 2_f64, 4_f64, 3_f64));
+ assert_eq!(v.zyww(), dvec4(3_f64, 2_f64, 4_f64, 4_f64));
+ assert_eq!(v.zzxx(), dvec4(3_f64, 3_f64, 1_f64, 1_f64));
+ assert_eq!(v.zzxy(), dvec4(3_f64, 3_f64, 1_f64, 2_f64));
+ assert_eq!(v.zzxz(), dvec4(3_f64, 3_f64, 1_f64, 3_f64));
+ assert_eq!(v.zzxw(), dvec4(3_f64, 3_f64, 1_f64, 4_f64));
+ assert_eq!(v.zzyx(), dvec4(3_f64, 3_f64, 2_f64, 1_f64));
+ assert_eq!(v.zzyy(), dvec4(3_f64, 3_f64, 2_f64, 2_f64));
+ assert_eq!(v.zzyz(), dvec4(3_f64, 3_f64, 2_f64, 3_f64));
+ assert_eq!(v.zzyw(), dvec4(3_f64, 3_f64, 2_f64, 4_f64));
+ assert_eq!(v.zzzx(), dvec4(3_f64, 3_f64, 3_f64, 1_f64));
+ assert_eq!(v.zzzy(), dvec4(3_f64, 3_f64, 3_f64, 2_f64));
+ assert_eq!(v.zzzz(), dvec4(3_f64, 3_f64, 3_f64, 3_f64));
+ assert_eq!(v.zzzw(), dvec4(3_f64, 3_f64, 3_f64, 4_f64));
+ assert_eq!(v.zzwx(), dvec4(3_f64, 3_f64, 4_f64, 1_f64));
+ assert_eq!(v.zzwy(), dvec4(3_f64, 3_f64, 4_f64, 2_f64));
+ assert_eq!(v.zzwz(), dvec4(3_f64, 3_f64, 4_f64, 3_f64));
+ assert_eq!(v.zzww(), dvec4(3_f64, 3_f64, 4_f64, 4_f64));
+ assert_eq!(v.zwxx(), dvec4(3_f64, 4_f64, 1_f64, 1_f64));
+ assert_eq!(v.zwxy(), dvec4(3_f64, 4_f64, 1_f64, 2_f64));
+ assert_eq!(v.zwxz(), dvec4(3_f64, 4_f64, 1_f64, 3_f64));
+ assert_eq!(v.zwxw(), dvec4(3_f64, 4_f64, 1_f64, 4_f64));
+ assert_eq!(v.zwyx(), dvec4(3_f64, 4_f64, 2_f64, 1_f64));
+ assert_eq!(v.zwyy(), dvec4(3_f64, 4_f64, 2_f64, 2_f64));
+ assert_eq!(v.zwyz(), dvec4(3_f64, 4_f64, 2_f64, 3_f64));
+ assert_eq!(v.zwyw(), dvec4(3_f64, 4_f64, 2_f64, 4_f64));
+ assert_eq!(v.zwzx(), dvec4(3_f64, 4_f64, 3_f64, 1_f64));
+ assert_eq!(v.zwzy(), dvec4(3_f64, 4_f64, 3_f64, 2_f64));
+ assert_eq!(v.zwzz(), dvec4(3_f64, 4_f64, 3_f64, 3_f64));
+ assert_eq!(v.zwzw(), dvec4(3_f64, 4_f64, 3_f64, 4_f64));
+ assert_eq!(v.zwwx(), dvec4(3_f64, 4_f64, 4_f64, 1_f64));
+ assert_eq!(v.zwwy(), dvec4(3_f64, 4_f64, 4_f64, 2_f64));
+ assert_eq!(v.zwwz(), dvec4(3_f64, 4_f64, 4_f64, 3_f64));
+ assert_eq!(v.zwww(), dvec4(3_f64, 4_f64, 4_f64, 4_f64));
+ assert_eq!(v.wxxx(), dvec4(4_f64, 1_f64, 1_f64, 1_f64));
+ assert_eq!(v.wxxy(), dvec4(4_f64, 1_f64, 1_f64, 2_f64));
+ assert_eq!(v.wxxz(), dvec4(4_f64, 1_f64, 1_f64, 3_f64));
+ assert_eq!(v.wxxw(), dvec4(4_f64, 1_f64, 1_f64, 4_f64));
+ assert_eq!(v.wxyx(), dvec4(4_f64, 1_f64, 2_f64, 1_f64));
+ assert_eq!(v.wxyy(), dvec4(4_f64, 1_f64, 2_f64, 2_f64));
+ assert_eq!(v.wxyz(), dvec4(4_f64, 1_f64, 2_f64, 3_f64));
+ assert_eq!(v.wxyw(), dvec4(4_f64, 1_f64, 2_f64, 4_f64));
+ assert_eq!(v.wxzx(), dvec4(4_f64, 1_f64, 3_f64, 1_f64));
+ assert_eq!(v.wxzy(), dvec4(4_f64, 1_f64, 3_f64, 2_f64));
+ assert_eq!(v.wxzz(), dvec4(4_f64, 1_f64, 3_f64, 3_f64));
+ assert_eq!(v.wxzw(), dvec4(4_f64, 1_f64, 3_f64, 4_f64));
+ assert_eq!(v.wxwx(), dvec4(4_f64, 1_f64, 4_f64, 1_f64));
+ assert_eq!(v.wxwy(), dvec4(4_f64, 1_f64, 4_f64, 2_f64));
+ assert_eq!(v.wxwz(), dvec4(4_f64, 1_f64, 4_f64, 3_f64));
+ assert_eq!(v.wxww(), dvec4(4_f64, 1_f64, 4_f64, 4_f64));
+ assert_eq!(v.wyxx(), dvec4(4_f64, 2_f64, 1_f64, 1_f64));
+ assert_eq!(v.wyxy(), dvec4(4_f64, 2_f64, 1_f64, 2_f64));
+ assert_eq!(v.wyxz(), dvec4(4_f64, 2_f64, 1_f64, 3_f64));
+ assert_eq!(v.wyxw(), dvec4(4_f64, 2_f64, 1_f64, 4_f64));
+ assert_eq!(v.wyyx(), dvec4(4_f64, 2_f64, 2_f64, 1_f64));
+ assert_eq!(v.wyyy(), dvec4(4_f64, 2_f64, 2_f64, 2_f64));
+ assert_eq!(v.wyyz(), dvec4(4_f64, 2_f64, 2_f64, 3_f64));
+ assert_eq!(v.wyyw(), dvec4(4_f64, 2_f64, 2_f64, 4_f64));
+ assert_eq!(v.wyzx(), dvec4(4_f64, 2_f64, 3_f64, 1_f64));
+ assert_eq!(v.wyzy(), dvec4(4_f64, 2_f64, 3_f64, 2_f64));
+ assert_eq!(v.wyzz(), dvec4(4_f64, 2_f64, 3_f64, 3_f64));
+ assert_eq!(v.wyzw(), dvec4(4_f64, 2_f64, 3_f64, 4_f64));
+ assert_eq!(v.wywx(), dvec4(4_f64, 2_f64, 4_f64, 1_f64));
+ assert_eq!(v.wywy(), dvec4(4_f64, 2_f64, 4_f64, 2_f64));
+ assert_eq!(v.wywz(), dvec4(4_f64, 2_f64, 4_f64, 3_f64));
+ assert_eq!(v.wyww(), dvec4(4_f64, 2_f64, 4_f64, 4_f64));
+ assert_eq!(v.wzxx(), dvec4(4_f64, 3_f64, 1_f64, 1_f64));
+ assert_eq!(v.wzxy(), dvec4(4_f64, 3_f64, 1_f64, 2_f64));
+ assert_eq!(v.wzxz(), dvec4(4_f64, 3_f64, 1_f64, 3_f64));
+ assert_eq!(v.wzxw(), dvec4(4_f64, 3_f64, 1_f64, 4_f64));
+ assert_eq!(v.wzyx(), dvec4(4_f64, 3_f64, 2_f64, 1_f64));
+ assert_eq!(v.wzyy(), dvec4(4_f64, 3_f64, 2_f64, 2_f64));
+ assert_eq!(v.wzyz(), dvec4(4_f64, 3_f64, 2_f64, 3_f64));
+ assert_eq!(v.wzyw(), dvec4(4_f64, 3_f64, 2_f64, 4_f64));
+ assert_eq!(v.wzzx(), dvec4(4_f64, 3_f64, 3_f64, 1_f64));
+ assert_eq!(v.wzzy(), dvec4(4_f64, 3_f64, 3_f64, 2_f64));
+ assert_eq!(v.wzzz(), dvec4(4_f64, 3_f64, 3_f64, 3_f64));
+ assert_eq!(v.wzzw(), dvec4(4_f64, 3_f64, 3_f64, 4_f64));
+ assert_eq!(v.wzwx(), dvec4(4_f64, 3_f64, 4_f64, 1_f64));
+ assert_eq!(v.wzwy(), dvec4(4_f64, 3_f64, 4_f64, 2_f64));
+ assert_eq!(v.wzwz(), dvec4(4_f64, 3_f64, 4_f64, 3_f64));
+ assert_eq!(v.wzww(), dvec4(4_f64, 3_f64, 4_f64, 4_f64));
+ assert_eq!(v.wwxx(), dvec4(4_f64, 4_f64, 1_f64, 1_f64));
+ assert_eq!(v.wwxy(), dvec4(4_f64, 4_f64, 1_f64, 2_f64));
+ assert_eq!(v.wwxz(), dvec4(4_f64, 4_f64, 1_f64, 3_f64));
+ assert_eq!(v.wwxw(), dvec4(4_f64, 4_f64, 1_f64, 4_f64));
+ assert_eq!(v.wwyx(), dvec4(4_f64, 4_f64, 2_f64, 1_f64));
+ assert_eq!(v.wwyy(), dvec4(4_f64, 4_f64, 2_f64, 2_f64));
+ assert_eq!(v.wwyz(), dvec4(4_f64, 4_f64, 2_f64, 3_f64));
+ assert_eq!(v.wwyw(), dvec4(4_f64, 4_f64, 2_f64, 4_f64));
+ assert_eq!(v.wwzx(), dvec4(4_f64, 4_f64, 3_f64, 1_f64));
+ assert_eq!(v.wwzy(), dvec4(4_f64, 4_f64, 3_f64, 2_f64));
+ assert_eq!(v.wwzz(), dvec4(4_f64, 4_f64, 3_f64, 3_f64));
+ assert_eq!(v.wwzw(), dvec4(4_f64, 4_f64, 3_f64, 4_f64));
+ assert_eq!(v.wwwx(), dvec4(4_f64, 4_f64, 4_f64, 1_f64));
+ assert_eq!(v.wwwy(), dvec4(4_f64, 4_f64, 4_f64, 2_f64));
+ assert_eq!(v.wwwz(), dvec4(4_f64, 4_f64, 4_f64, 3_f64));
+ assert_eq!(v.wwww(), dvec4(4_f64, 4_f64, 4_f64, 4_f64));
+});
+
+glam_test!(test_dvec4_swizzles_2, {
+ let v = dvec4(1_f64, 2_f64, 3_f64, 4_f64);
+ assert_eq!(v.xxx(), dvec3(1_f64, 1_f64, 1_f64));
+ assert_eq!(v.xxy(), dvec3(1_f64, 1_f64, 2_f64));
+ assert_eq!(v.xxz(), dvec3(1_f64, 1_f64, 3_f64));
+ assert_eq!(v.xxw(), dvec3(1_f64, 1_f64, 4_f64));
+ assert_eq!(v.xyx(), dvec3(1_f64, 2_f64, 1_f64));
+ assert_eq!(v.xyy(), dvec3(1_f64, 2_f64, 2_f64));
+ assert_eq!(v.xyz(), dvec3(1_f64, 2_f64, 3_f64));
+ assert_eq!(v.xyw(), dvec3(1_f64, 2_f64, 4_f64));
+ assert_eq!(v.xzx(), dvec3(1_f64, 3_f64, 1_f64));
+ assert_eq!(v.xzy(), dvec3(1_f64, 3_f64, 2_f64));
+ assert_eq!(v.xzz(), dvec3(1_f64, 3_f64, 3_f64));
+ assert_eq!(v.xzw(), dvec3(1_f64, 3_f64, 4_f64));
+ assert_eq!(v.xwx(), dvec3(1_f64, 4_f64, 1_f64));
+ assert_eq!(v.xwy(), dvec3(1_f64, 4_f64, 2_f64));
+ assert_eq!(v.xwz(), dvec3(1_f64, 4_f64, 3_f64));
+ assert_eq!(v.xww(), dvec3(1_f64, 4_f64, 4_f64));
+ assert_eq!(v.yxx(), dvec3(2_f64, 1_f64, 1_f64));
+ assert_eq!(v.yxy(), dvec3(2_f64, 1_f64, 2_f64));
+ assert_eq!(v.yxz(), dvec3(2_f64, 1_f64, 3_f64));
+ assert_eq!(v.yxw(), dvec3(2_f64, 1_f64, 4_f64));
+ assert_eq!(v.yyx(), dvec3(2_f64, 2_f64, 1_f64));
+ assert_eq!(v.yyy(), dvec3(2_f64, 2_f64, 2_f64));
+ assert_eq!(v.yyz(), dvec3(2_f64, 2_f64, 3_f64));
+ assert_eq!(v.yyw(), dvec3(2_f64, 2_f64, 4_f64));
+ assert_eq!(v.yzx(), dvec3(2_f64, 3_f64, 1_f64));
+ assert_eq!(v.yzy(), dvec3(2_f64, 3_f64, 2_f64));
+ assert_eq!(v.yzz(), dvec3(2_f64, 3_f64, 3_f64));
+ assert_eq!(v.yzw(), dvec3(2_f64, 3_f64, 4_f64));
+ assert_eq!(v.ywx(), dvec3(2_f64, 4_f64, 1_f64));
+ assert_eq!(v.ywy(), dvec3(2_f64, 4_f64, 2_f64));
+ assert_eq!(v.ywz(), dvec3(2_f64, 4_f64, 3_f64));
+ assert_eq!(v.yww(), dvec3(2_f64, 4_f64, 4_f64));
+ assert_eq!(v.zxx(), dvec3(3_f64, 1_f64, 1_f64));
+ assert_eq!(v.zxy(), dvec3(3_f64, 1_f64, 2_f64));
+ assert_eq!(v.zxz(), dvec3(3_f64, 1_f64, 3_f64));
+ assert_eq!(v.zxw(), dvec3(3_f64, 1_f64, 4_f64));
+ assert_eq!(v.zyx(), dvec3(3_f64, 2_f64, 1_f64));
+ assert_eq!(v.zyy(), dvec3(3_f64, 2_f64, 2_f64));
+ assert_eq!(v.zyz(), dvec3(3_f64, 2_f64, 3_f64));
+ assert_eq!(v.zyw(), dvec3(3_f64, 2_f64, 4_f64));
+ assert_eq!(v.zzx(), dvec3(3_f64, 3_f64, 1_f64));
+ assert_eq!(v.zzy(), dvec3(3_f64, 3_f64, 2_f64));
+ assert_eq!(v.zzz(), dvec3(3_f64, 3_f64, 3_f64));
+ assert_eq!(v.zzw(), dvec3(3_f64, 3_f64, 4_f64));
+ assert_eq!(v.zwx(), dvec3(3_f64, 4_f64, 1_f64));
+ assert_eq!(v.zwy(), dvec3(3_f64, 4_f64, 2_f64));
+ assert_eq!(v.zwz(), dvec3(3_f64, 4_f64, 3_f64));
+ assert_eq!(v.zww(), dvec3(3_f64, 4_f64, 4_f64));
+ assert_eq!(v.wxx(), dvec3(4_f64, 1_f64, 1_f64));
+ assert_eq!(v.wxy(), dvec3(4_f64, 1_f64, 2_f64));
+ assert_eq!(v.wxz(), dvec3(4_f64, 1_f64, 3_f64));
+ assert_eq!(v.wxw(), dvec3(4_f64, 1_f64, 4_f64));
+ assert_eq!(v.wyx(), dvec3(4_f64, 2_f64, 1_f64));
+ assert_eq!(v.wyy(), dvec3(4_f64, 2_f64, 2_f64));
+ assert_eq!(v.wyz(), dvec3(4_f64, 2_f64, 3_f64));
+ assert_eq!(v.wyw(), dvec3(4_f64, 2_f64, 4_f64));
+ assert_eq!(v.wzx(), dvec3(4_f64, 3_f64, 1_f64));
+ assert_eq!(v.wzy(), dvec3(4_f64, 3_f64, 2_f64));
+ assert_eq!(v.wzz(), dvec3(4_f64, 3_f64, 3_f64));
+ assert_eq!(v.wzw(), dvec3(4_f64, 3_f64, 4_f64));
+ assert_eq!(v.wwx(), dvec3(4_f64, 4_f64, 1_f64));
+ assert_eq!(v.wwy(), dvec3(4_f64, 4_f64, 2_f64));
+ assert_eq!(v.wwz(), dvec3(4_f64, 4_f64, 3_f64));
+ assert_eq!(v.www(), dvec3(4_f64, 4_f64, 4_f64));
+ assert_eq!(v.xx(), dvec2(1_f64, 1_f64));
+ assert_eq!(v.xy(), dvec2(1_f64, 2_f64));
+ assert_eq!(v.xz(), dvec2(1_f64, 3_f64));
+ assert_eq!(v.xw(), dvec2(1_f64, 4_f64));
+ assert_eq!(v.yx(), dvec2(2_f64, 1_f64));
+ assert_eq!(v.yy(), dvec2(2_f64, 2_f64));
+ assert_eq!(v.yz(), dvec2(2_f64, 3_f64));
+ assert_eq!(v.yw(), dvec2(2_f64, 4_f64));
+ assert_eq!(v.zx(), dvec2(3_f64, 1_f64));
+ assert_eq!(v.zy(), dvec2(3_f64, 2_f64));
+ assert_eq!(v.zz(), dvec2(3_f64, 3_f64));
+ assert_eq!(v.zw(), dvec2(3_f64, 4_f64));
+ assert_eq!(v.wx(), dvec2(4_f64, 1_f64));
+ assert_eq!(v.wy(), dvec2(4_f64, 2_f64));
+ assert_eq!(v.wz(), dvec2(4_f64, 3_f64));
+ assert_eq!(v.ww(), dvec2(4_f64, 4_f64));
+});
+
+glam_test!(test_dvec3_swizzles, {
+ let v = dvec3(1_f64, 2_f64, 3_f64);
+ assert_eq!(v, v.xyz());
+ assert_eq!(v.xxxx(), dvec4(1_f64, 1_f64, 1_f64, 1_f64));
+ assert_eq!(v.xxxy(), dvec4(1_f64, 1_f64, 1_f64, 2_f64));
+ assert_eq!(v.xxxz(), dvec4(1_f64, 1_f64, 1_f64, 3_f64));
+ assert_eq!(v.xxyx(), dvec4(1_f64, 1_f64, 2_f64, 1_f64));
+ assert_eq!(v.xxyy(), dvec4(1_f64, 1_f64, 2_f64, 2_f64));
+ assert_eq!(v.xxyz(), dvec4(1_f64, 1_f64, 2_f64, 3_f64));
+ assert_eq!(v.xxzx(), dvec4(1_f64, 1_f64, 3_f64, 1_f64));
+ assert_eq!(v.xxzy(), dvec4(1_f64, 1_f64, 3_f64, 2_f64));
+ assert_eq!(v.xxzz(), dvec4(1_f64, 1_f64, 3_f64, 3_f64));
+ assert_eq!(v.xyxx(), dvec4(1_f64, 2_f64, 1_f64, 1_f64));
+ assert_eq!(v.xyxy(), dvec4(1_f64, 2_f64, 1_f64, 2_f64));
+ assert_eq!(v.xyxz(), dvec4(1_f64, 2_f64, 1_f64, 3_f64));
+ assert_eq!(v.xyyx(), dvec4(1_f64, 2_f64, 2_f64, 1_f64));
+ assert_eq!(v.xyyy(), dvec4(1_f64, 2_f64, 2_f64, 2_f64));
+ assert_eq!(v.xyyz(), dvec4(1_f64, 2_f64, 2_f64, 3_f64));
+ assert_eq!(v.xyzx(), dvec4(1_f64, 2_f64, 3_f64, 1_f64));
+ assert_eq!(v.xyzy(), dvec4(1_f64, 2_f64, 3_f64, 2_f64));
+ assert_eq!(v.xyzz(), dvec4(1_f64, 2_f64, 3_f64, 3_f64));
+ assert_eq!(v.xzxx(), dvec4(1_f64, 3_f64, 1_f64, 1_f64));
+ assert_eq!(v.xzxy(), dvec4(1_f64, 3_f64, 1_f64, 2_f64));
+ assert_eq!(v.xzxz(), dvec4(1_f64, 3_f64, 1_f64, 3_f64));
+ assert_eq!(v.xzyx(), dvec4(1_f64, 3_f64, 2_f64, 1_f64));
+ assert_eq!(v.xzyy(), dvec4(1_f64, 3_f64, 2_f64, 2_f64));
+ assert_eq!(v.xzyz(), dvec4(1_f64, 3_f64, 2_f64, 3_f64));
+ assert_eq!(v.xzzx(), dvec4(1_f64, 3_f64, 3_f64, 1_f64));
+ assert_eq!(v.xzzy(), dvec4(1_f64, 3_f64, 3_f64, 2_f64));
+ assert_eq!(v.xzzz(), dvec4(1_f64, 3_f64, 3_f64, 3_f64));
+ assert_eq!(v.yxxx(), dvec4(2_f64, 1_f64, 1_f64, 1_f64));
+ assert_eq!(v.yxxy(), dvec4(2_f64, 1_f64, 1_f64, 2_f64));
+ assert_eq!(v.yxxz(), dvec4(2_f64, 1_f64, 1_f64, 3_f64));
+ assert_eq!(v.yxyx(), dvec4(2_f64, 1_f64, 2_f64, 1_f64));
+ assert_eq!(v.yxyy(), dvec4(2_f64, 1_f64, 2_f64, 2_f64));
+ assert_eq!(v.yxyz(), dvec4(2_f64, 1_f64, 2_f64, 3_f64));
+ assert_eq!(v.yxzx(), dvec4(2_f64, 1_f64, 3_f64, 1_f64));
+ assert_eq!(v.yxzy(), dvec4(2_f64, 1_f64, 3_f64, 2_f64));
+ assert_eq!(v.yxzz(), dvec4(2_f64, 1_f64, 3_f64, 3_f64));
+ assert_eq!(v.yyxx(), dvec4(2_f64, 2_f64, 1_f64, 1_f64));
+ assert_eq!(v.yyxy(), dvec4(2_f64, 2_f64, 1_f64, 2_f64));
+ assert_eq!(v.yyxz(), dvec4(2_f64, 2_f64, 1_f64, 3_f64));
+ assert_eq!(v.yyyx(), dvec4(2_f64, 2_f64, 2_f64, 1_f64));
+ assert_eq!(v.yyyy(), dvec4(2_f64, 2_f64, 2_f64, 2_f64));
+ assert_eq!(v.yyyz(), dvec4(2_f64, 2_f64, 2_f64, 3_f64));
+ assert_eq!(v.yyzx(), dvec4(2_f64, 2_f64, 3_f64, 1_f64));
+ assert_eq!(v.yyzy(), dvec4(2_f64, 2_f64, 3_f64, 2_f64));
+ assert_eq!(v.yyzz(), dvec4(2_f64, 2_f64, 3_f64, 3_f64));
+ assert_eq!(v.yzxx(), dvec4(2_f64, 3_f64, 1_f64, 1_f64));
+ assert_eq!(v.yzxy(), dvec4(2_f64, 3_f64, 1_f64, 2_f64));
+ assert_eq!(v.yzxz(), dvec4(2_f64, 3_f64, 1_f64, 3_f64));
+ assert_eq!(v.yzyx(), dvec4(2_f64, 3_f64, 2_f64, 1_f64));
+ assert_eq!(v.yzyy(), dvec4(2_f64, 3_f64, 2_f64, 2_f64));
+ assert_eq!(v.yzyz(), dvec4(2_f64, 3_f64, 2_f64, 3_f64));
+ assert_eq!(v.yzzx(), dvec4(2_f64, 3_f64, 3_f64, 1_f64));
+ assert_eq!(v.yzzy(), dvec4(2_f64, 3_f64, 3_f64, 2_f64));
+ assert_eq!(v.yzzz(), dvec4(2_f64, 3_f64, 3_f64, 3_f64));
+ assert_eq!(v.zxxx(), dvec4(3_f64, 1_f64, 1_f64, 1_f64));
+ assert_eq!(v.zxxy(), dvec4(3_f64, 1_f64, 1_f64, 2_f64));
+ assert_eq!(v.zxxz(), dvec4(3_f64, 1_f64, 1_f64, 3_f64));
+ assert_eq!(v.zxyx(), dvec4(3_f64, 1_f64, 2_f64, 1_f64));
+ assert_eq!(v.zxyy(), dvec4(3_f64, 1_f64, 2_f64, 2_f64));
+ assert_eq!(v.zxyz(), dvec4(3_f64, 1_f64, 2_f64, 3_f64));
+ assert_eq!(v.zxzx(), dvec4(3_f64, 1_f64, 3_f64, 1_f64));
+ assert_eq!(v.zxzy(), dvec4(3_f64, 1_f64, 3_f64, 2_f64));
+ assert_eq!(v.zxzz(), dvec4(3_f64, 1_f64, 3_f64, 3_f64));
+ assert_eq!(v.zyxx(), dvec4(3_f64, 2_f64, 1_f64, 1_f64));
+ assert_eq!(v.zyxy(), dvec4(3_f64, 2_f64, 1_f64, 2_f64));
+ assert_eq!(v.zyxz(), dvec4(3_f64, 2_f64, 1_f64, 3_f64));
+ assert_eq!(v.zyyx(), dvec4(3_f64, 2_f64, 2_f64, 1_f64));
+ assert_eq!(v.zyyy(), dvec4(3_f64, 2_f64, 2_f64, 2_f64));
+ assert_eq!(v.zyyz(), dvec4(3_f64, 2_f64, 2_f64, 3_f64));
+ assert_eq!(v.zyzx(), dvec4(3_f64, 2_f64, 3_f64, 1_f64));
+ assert_eq!(v.zyzy(), dvec4(3_f64, 2_f64, 3_f64, 2_f64));
+ assert_eq!(v.zyzz(), dvec4(3_f64, 2_f64, 3_f64, 3_f64));
+ assert_eq!(v.zzxx(), dvec4(3_f64, 3_f64, 1_f64, 1_f64));
+ assert_eq!(v.zzxy(), dvec4(3_f64, 3_f64, 1_f64, 2_f64));
+ assert_eq!(v.zzxz(), dvec4(3_f64, 3_f64, 1_f64, 3_f64));
+ assert_eq!(v.zzyx(), dvec4(3_f64, 3_f64, 2_f64, 1_f64));
+ assert_eq!(v.zzyy(), dvec4(3_f64, 3_f64, 2_f64, 2_f64));
+ assert_eq!(v.zzyz(), dvec4(3_f64, 3_f64, 2_f64, 3_f64));
+ assert_eq!(v.zzzx(), dvec4(3_f64, 3_f64, 3_f64, 1_f64));
+ assert_eq!(v.zzzy(), dvec4(3_f64, 3_f64, 3_f64, 2_f64));
+ assert_eq!(v.zzzz(), dvec4(3_f64, 3_f64, 3_f64, 3_f64));
+ assert_eq!(v.xxx(), dvec3(1_f64, 1_f64, 1_f64));
+ assert_eq!(v.xxy(), dvec3(1_f64, 1_f64, 2_f64));
+ assert_eq!(v.xxz(), dvec3(1_f64, 1_f64, 3_f64));
+ assert_eq!(v.xyx(), dvec3(1_f64, 2_f64, 1_f64));
+ assert_eq!(v.xyy(), dvec3(1_f64, 2_f64, 2_f64));
+ assert_eq!(v.xzx(), dvec3(1_f64, 3_f64, 1_f64));
+ assert_eq!(v.xzy(), dvec3(1_f64, 3_f64, 2_f64));
+ assert_eq!(v.xzz(), dvec3(1_f64, 3_f64, 3_f64));
+ assert_eq!(v.yxx(), dvec3(2_f64, 1_f64, 1_f64));
+ assert_eq!(v.yxy(), dvec3(2_f64, 1_f64, 2_f64));
+ assert_eq!(v.yxz(), dvec3(2_f64, 1_f64, 3_f64));
+ assert_eq!(v.yyx(), dvec3(2_f64, 2_f64, 1_f64));
+ assert_eq!(v.yyy(), dvec3(2_f64, 2_f64, 2_f64));
+ assert_eq!(v.yyz(), dvec3(2_f64, 2_f64, 3_f64));
+ assert_eq!(v.yzx(), dvec3(2_f64, 3_f64, 1_f64));
+ assert_eq!(v.yzy(), dvec3(2_f64, 3_f64, 2_f64));
+ assert_eq!(v.yzz(), dvec3(2_f64, 3_f64, 3_f64));
+ assert_eq!(v.zxx(), dvec3(3_f64, 1_f64, 1_f64));
+ assert_eq!(v.zxy(), dvec3(3_f64, 1_f64, 2_f64));
+ assert_eq!(v.zxz(), dvec3(3_f64, 1_f64, 3_f64));
+ assert_eq!(v.zyx(), dvec3(3_f64, 2_f64, 1_f64));
+ assert_eq!(v.zyy(), dvec3(3_f64, 2_f64, 2_f64));
+ assert_eq!(v.zyz(), dvec3(3_f64, 2_f64, 3_f64));
+ assert_eq!(v.zzx(), dvec3(3_f64, 3_f64, 1_f64));
+ assert_eq!(v.zzy(), dvec3(3_f64, 3_f64, 2_f64));
+ assert_eq!(v.zzz(), dvec3(3_f64, 3_f64, 3_f64));
+ assert_eq!(v.xx(), dvec2(1_f64, 1_f64));
+ assert_eq!(v.xy(), dvec2(1_f64, 2_f64));
+ assert_eq!(v.xz(), dvec2(1_f64, 3_f64));
+ assert_eq!(v.yx(), dvec2(2_f64, 1_f64));
+ assert_eq!(v.yy(), dvec2(2_f64, 2_f64));
+ assert_eq!(v.yz(), dvec2(2_f64, 3_f64));
+ assert_eq!(v.zx(), dvec2(3_f64, 1_f64));
+ assert_eq!(v.zy(), dvec2(3_f64, 2_f64));
+ assert_eq!(v.zz(), dvec2(3_f64, 3_f64));
+});
+
+glam_test!(test_dvec2_swizzles, {
+ let v = dvec2(1_f64, 2_f64);
+ assert_eq!(v, v.xy());
+ assert_eq!(v.xxxx(), dvec4(1_f64, 1_f64, 1_f64, 1_f64));
+ assert_eq!(v.xxxy(), dvec4(1_f64, 1_f64, 1_f64, 2_f64));
+ assert_eq!(v.xxyx(), dvec4(1_f64, 1_f64, 2_f64, 1_f64));
+ assert_eq!(v.xxyy(), dvec4(1_f64, 1_f64, 2_f64, 2_f64));
+ assert_eq!(v.xyxx(), dvec4(1_f64, 2_f64, 1_f64, 1_f64));
+ assert_eq!(v.xyxy(), dvec4(1_f64, 2_f64, 1_f64, 2_f64));
+ assert_eq!(v.xyyx(), dvec4(1_f64, 2_f64, 2_f64, 1_f64));
+ assert_eq!(v.xyyy(), dvec4(1_f64, 2_f64, 2_f64, 2_f64));
+ assert_eq!(v.yxxx(), dvec4(2_f64, 1_f64, 1_f64, 1_f64));
+ assert_eq!(v.yxxy(), dvec4(2_f64, 1_f64, 1_f64, 2_f64));
+ assert_eq!(v.yxyx(), dvec4(2_f64, 1_f64, 2_f64, 1_f64));
+ assert_eq!(v.yxyy(), dvec4(2_f64, 1_f64, 2_f64, 2_f64));
+ assert_eq!(v.yyxx(), dvec4(2_f64, 2_f64, 1_f64, 1_f64));
+ assert_eq!(v.yyxy(), dvec4(2_f64, 2_f64, 1_f64, 2_f64));
+ assert_eq!(v.yyyx(), dvec4(2_f64, 2_f64, 2_f64, 1_f64));
+ assert_eq!(v.yyyy(), dvec4(2_f64, 2_f64, 2_f64, 2_f64));
+ assert_eq!(v.xxx(), dvec3(1_f64, 1_f64, 1_f64));
+ assert_eq!(v.xxy(), dvec3(1_f64, 1_f64, 2_f64));
+ assert_eq!(v.xyx(), dvec3(1_f64, 2_f64, 1_f64));
+ assert_eq!(v.xyy(), dvec3(1_f64, 2_f64, 2_f64));
+ assert_eq!(v.yxx(), dvec3(2_f64, 1_f64, 1_f64));
+ assert_eq!(v.yxy(), dvec3(2_f64, 1_f64, 2_f64));
+ assert_eq!(v.yyx(), dvec3(2_f64, 2_f64, 1_f64));
+ assert_eq!(v.yyy(), dvec3(2_f64, 2_f64, 2_f64));
+ assert_eq!(v.xx(), dvec2(1_f64, 1_f64));
+ assert_eq!(v.yx(), dvec2(2_f64, 1_f64));
+ assert_eq!(v.yy(), dvec2(2_f64, 2_f64));
+});
diff --git a/tests/swizzles_i32.rs b/tests/swizzles_i32.rs
new file mode 100644
index 0000000..3635315
--- /dev/null
+++ b/tests/swizzles_i32.rs
@@ -0,0 +1,497 @@
+// Generated by swizzlegen. Do not edit.
+#[macro_use]
+mod support;
+use glam::*;
+
+glam_test!(test_ivec4_swizzles, {
+ let v = ivec4(1_i32, 2_i32, 3_i32, 4_i32);
+ assert_eq!(v, v.xyzw());
+ assert_eq!(v.xxxx(), ivec4(1_i32, 1_i32, 1_i32, 1_i32));
+ assert_eq!(v.xxxy(), ivec4(1_i32, 1_i32, 1_i32, 2_i32));
+ assert_eq!(v.xxxz(), ivec4(1_i32, 1_i32, 1_i32, 3_i32));
+ assert_eq!(v.xxxw(), ivec4(1_i32, 1_i32, 1_i32, 4_i32));
+ assert_eq!(v.xxyx(), ivec4(1_i32, 1_i32, 2_i32, 1_i32));
+ assert_eq!(v.xxyy(), ivec4(1_i32, 1_i32, 2_i32, 2_i32));
+ assert_eq!(v.xxyz(), ivec4(1_i32, 1_i32, 2_i32, 3_i32));
+ assert_eq!(v.xxyw(), ivec4(1_i32, 1_i32, 2_i32, 4_i32));
+ assert_eq!(v.xxzx(), ivec4(1_i32, 1_i32, 3_i32, 1_i32));
+ assert_eq!(v.xxzy(), ivec4(1_i32, 1_i32, 3_i32, 2_i32));
+ assert_eq!(v.xxzz(), ivec4(1_i32, 1_i32, 3_i32, 3_i32));
+ assert_eq!(v.xxzw(), ivec4(1_i32, 1_i32, 3_i32, 4_i32));
+ assert_eq!(v.xxwx(), ivec4(1_i32, 1_i32, 4_i32, 1_i32));
+ assert_eq!(v.xxwy(), ivec4(1_i32, 1_i32, 4_i32, 2_i32));
+ assert_eq!(v.xxwz(), ivec4(1_i32, 1_i32, 4_i32, 3_i32));
+ assert_eq!(v.xxww(), ivec4(1_i32, 1_i32, 4_i32, 4_i32));
+ assert_eq!(v.xyxx(), ivec4(1_i32, 2_i32, 1_i32, 1_i32));
+ assert_eq!(v.xyxy(), ivec4(1_i32, 2_i32, 1_i32, 2_i32));
+ assert_eq!(v.xyxz(), ivec4(1_i32, 2_i32, 1_i32, 3_i32));
+ assert_eq!(v.xyxw(), ivec4(1_i32, 2_i32, 1_i32, 4_i32));
+ assert_eq!(v.xyyx(), ivec4(1_i32, 2_i32, 2_i32, 1_i32));
+ assert_eq!(v.xyyy(), ivec4(1_i32, 2_i32, 2_i32, 2_i32));
+ assert_eq!(v.xyyz(), ivec4(1_i32, 2_i32, 2_i32, 3_i32));
+ assert_eq!(v.xyyw(), ivec4(1_i32, 2_i32, 2_i32, 4_i32));
+ assert_eq!(v.xyzx(), ivec4(1_i32, 2_i32, 3_i32, 1_i32));
+ assert_eq!(v.xyzy(), ivec4(1_i32, 2_i32, 3_i32, 2_i32));
+ assert_eq!(v.xyzz(), ivec4(1_i32, 2_i32, 3_i32, 3_i32));
+ assert_eq!(v.xywx(), ivec4(1_i32, 2_i32, 4_i32, 1_i32));
+ assert_eq!(v.xywy(), ivec4(1_i32, 2_i32, 4_i32, 2_i32));
+ assert_eq!(v.xywz(), ivec4(1_i32, 2_i32, 4_i32, 3_i32));
+ assert_eq!(v.xyww(), ivec4(1_i32, 2_i32, 4_i32, 4_i32));
+ assert_eq!(v.xzxx(), ivec4(1_i32, 3_i32, 1_i32, 1_i32));
+ assert_eq!(v.xzxy(), ivec4(1_i32, 3_i32, 1_i32, 2_i32));
+ assert_eq!(v.xzxz(), ivec4(1_i32, 3_i32, 1_i32, 3_i32));
+ assert_eq!(v.xzxw(), ivec4(1_i32, 3_i32, 1_i32, 4_i32));
+ assert_eq!(v.xzyx(), ivec4(1_i32, 3_i32, 2_i32, 1_i32));
+ assert_eq!(v.xzyy(), ivec4(1_i32, 3_i32, 2_i32, 2_i32));
+ assert_eq!(v.xzyz(), ivec4(1_i32, 3_i32, 2_i32, 3_i32));
+ assert_eq!(v.xzyw(), ivec4(1_i32, 3_i32, 2_i32, 4_i32));
+ assert_eq!(v.xzzx(), ivec4(1_i32, 3_i32, 3_i32, 1_i32));
+ assert_eq!(v.xzzy(), ivec4(1_i32, 3_i32, 3_i32, 2_i32));
+ assert_eq!(v.xzzz(), ivec4(1_i32, 3_i32, 3_i32, 3_i32));
+ assert_eq!(v.xzzw(), ivec4(1_i32, 3_i32, 3_i32, 4_i32));
+ assert_eq!(v.xzwx(), ivec4(1_i32, 3_i32, 4_i32, 1_i32));
+ assert_eq!(v.xzwy(), ivec4(1_i32, 3_i32, 4_i32, 2_i32));
+ assert_eq!(v.xzwz(), ivec4(1_i32, 3_i32, 4_i32, 3_i32));
+ assert_eq!(v.xzww(), ivec4(1_i32, 3_i32, 4_i32, 4_i32));
+ assert_eq!(v.xwxx(), ivec4(1_i32, 4_i32, 1_i32, 1_i32));
+ assert_eq!(v.xwxy(), ivec4(1_i32, 4_i32, 1_i32, 2_i32));
+ assert_eq!(v.xwxz(), ivec4(1_i32, 4_i32, 1_i32, 3_i32));
+ assert_eq!(v.xwxw(), ivec4(1_i32, 4_i32, 1_i32, 4_i32));
+ assert_eq!(v.xwyx(), ivec4(1_i32, 4_i32, 2_i32, 1_i32));
+ assert_eq!(v.xwyy(), ivec4(1_i32, 4_i32, 2_i32, 2_i32));
+ assert_eq!(v.xwyz(), ivec4(1_i32, 4_i32, 2_i32, 3_i32));
+ assert_eq!(v.xwyw(), ivec4(1_i32, 4_i32, 2_i32, 4_i32));
+ assert_eq!(v.xwzx(), ivec4(1_i32, 4_i32, 3_i32, 1_i32));
+ assert_eq!(v.xwzy(), ivec4(1_i32, 4_i32, 3_i32, 2_i32));
+ assert_eq!(v.xwzz(), ivec4(1_i32, 4_i32, 3_i32, 3_i32));
+ assert_eq!(v.xwzw(), ivec4(1_i32, 4_i32, 3_i32, 4_i32));
+ assert_eq!(v.xwwx(), ivec4(1_i32, 4_i32, 4_i32, 1_i32));
+ assert_eq!(v.xwwy(), ivec4(1_i32, 4_i32, 4_i32, 2_i32));
+ assert_eq!(v.xwwz(), ivec4(1_i32, 4_i32, 4_i32, 3_i32));
+ assert_eq!(v.xwww(), ivec4(1_i32, 4_i32, 4_i32, 4_i32));
+ assert_eq!(v.yxxx(), ivec4(2_i32, 1_i32, 1_i32, 1_i32));
+ assert_eq!(v.yxxy(), ivec4(2_i32, 1_i32, 1_i32, 2_i32));
+ assert_eq!(v.yxxz(), ivec4(2_i32, 1_i32, 1_i32, 3_i32));
+ assert_eq!(v.yxxw(), ivec4(2_i32, 1_i32, 1_i32, 4_i32));
+ assert_eq!(v.yxyx(), ivec4(2_i32, 1_i32, 2_i32, 1_i32));
+ assert_eq!(v.yxyy(), ivec4(2_i32, 1_i32, 2_i32, 2_i32));
+ assert_eq!(v.yxyz(), ivec4(2_i32, 1_i32, 2_i32, 3_i32));
+ assert_eq!(v.yxyw(), ivec4(2_i32, 1_i32, 2_i32, 4_i32));
+ assert_eq!(v.yxzx(), ivec4(2_i32, 1_i32, 3_i32, 1_i32));
+ assert_eq!(v.yxzy(), ivec4(2_i32, 1_i32, 3_i32, 2_i32));
+ assert_eq!(v.yxzz(), ivec4(2_i32, 1_i32, 3_i32, 3_i32));
+ assert_eq!(v.yxzw(), ivec4(2_i32, 1_i32, 3_i32, 4_i32));
+ assert_eq!(v.yxwx(), ivec4(2_i32, 1_i32, 4_i32, 1_i32));
+ assert_eq!(v.yxwy(), ivec4(2_i32, 1_i32, 4_i32, 2_i32));
+ assert_eq!(v.yxwz(), ivec4(2_i32, 1_i32, 4_i32, 3_i32));
+ assert_eq!(v.yxww(), ivec4(2_i32, 1_i32, 4_i32, 4_i32));
+ assert_eq!(v.yyxx(), ivec4(2_i32, 2_i32, 1_i32, 1_i32));
+ assert_eq!(v.yyxy(), ivec4(2_i32, 2_i32, 1_i32, 2_i32));
+ assert_eq!(v.yyxz(), ivec4(2_i32, 2_i32, 1_i32, 3_i32));
+ assert_eq!(v.yyxw(), ivec4(2_i32, 2_i32, 1_i32, 4_i32));
+ assert_eq!(v.yyyx(), ivec4(2_i32, 2_i32, 2_i32, 1_i32));
+ assert_eq!(v.yyyy(), ivec4(2_i32, 2_i32, 2_i32, 2_i32));
+ assert_eq!(v.yyyz(), ivec4(2_i32, 2_i32, 2_i32, 3_i32));
+ assert_eq!(v.yyyw(), ivec4(2_i32, 2_i32, 2_i32, 4_i32));
+ assert_eq!(v.yyzx(), ivec4(2_i32, 2_i32, 3_i32, 1_i32));
+ assert_eq!(v.yyzy(), ivec4(2_i32, 2_i32, 3_i32, 2_i32));
+ assert_eq!(v.yyzz(), ivec4(2_i32, 2_i32, 3_i32, 3_i32));
+ assert_eq!(v.yyzw(), ivec4(2_i32, 2_i32, 3_i32, 4_i32));
+ assert_eq!(v.yywx(), ivec4(2_i32, 2_i32, 4_i32, 1_i32));
+ assert_eq!(v.yywy(), ivec4(2_i32, 2_i32, 4_i32, 2_i32));
+ assert_eq!(v.yywz(), ivec4(2_i32, 2_i32, 4_i32, 3_i32));
+ assert_eq!(v.yyww(), ivec4(2_i32, 2_i32, 4_i32, 4_i32));
+ assert_eq!(v.yzxx(), ivec4(2_i32, 3_i32, 1_i32, 1_i32));
+ assert_eq!(v.yzxy(), ivec4(2_i32, 3_i32, 1_i32, 2_i32));
+ assert_eq!(v.yzxz(), ivec4(2_i32, 3_i32, 1_i32, 3_i32));
+ assert_eq!(v.yzxw(), ivec4(2_i32, 3_i32, 1_i32, 4_i32));
+ assert_eq!(v.yzyx(), ivec4(2_i32, 3_i32, 2_i32, 1_i32));
+ assert_eq!(v.yzyy(), ivec4(2_i32, 3_i32, 2_i32, 2_i32));
+ assert_eq!(v.yzyz(), ivec4(2_i32, 3_i32, 2_i32, 3_i32));
+ assert_eq!(v.yzyw(), ivec4(2_i32, 3_i32, 2_i32, 4_i32));
+ assert_eq!(v.yzzx(), ivec4(2_i32, 3_i32, 3_i32, 1_i32));
+ assert_eq!(v.yzzy(), ivec4(2_i32, 3_i32, 3_i32, 2_i32));
+ assert_eq!(v.yzzz(), ivec4(2_i32, 3_i32, 3_i32, 3_i32));
+ assert_eq!(v.yzzw(), ivec4(2_i32, 3_i32, 3_i32, 4_i32));
+ assert_eq!(v.yzwx(), ivec4(2_i32, 3_i32, 4_i32, 1_i32));
+ assert_eq!(v.yzwy(), ivec4(2_i32, 3_i32, 4_i32, 2_i32));
+ assert_eq!(v.yzwz(), ivec4(2_i32, 3_i32, 4_i32, 3_i32));
+ assert_eq!(v.yzww(), ivec4(2_i32, 3_i32, 4_i32, 4_i32));
+ assert_eq!(v.ywxx(), ivec4(2_i32, 4_i32, 1_i32, 1_i32));
+ assert_eq!(v.ywxy(), ivec4(2_i32, 4_i32, 1_i32, 2_i32));
+ assert_eq!(v.ywxz(), ivec4(2_i32, 4_i32, 1_i32, 3_i32));
+ assert_eq!(v.ywxw(), ivec4(2_i32, 4_i32, 1_i32, 4_i32));
+ assert_eq!(v.ywyx(), ivec4(2_i32, 4_i32, 2_i32, 1_i32));
+ assert_eq!(v.ywyy(), ivec4(2_i32, 4_i32, 2_i32, 2_i32));
+ assert_eq!(v.ywyz(), ivec4(2_i32, 4_i32, 2_i32, 3_i32));
+ assert_eq!(v.ywyw(), ivec4(2_i32, 4_i32, 2_i32, 4_i32));
+ assert_eq!(v.ywzx(), ivec4(2_i32, 4_i32, 3_i32, 1_i32));
+ assert_eq!(v.ywzy(), ivec4(2_i32, 4_i32, 3_i32, 2_i32));
+ assert_eq!(v.ywzz(), ivec4(2_i32, 4_i32, 3_i32, 3_i32));
+ assert_eq!(v.ywzw(), ivec4(2_i32, 4_i32, 3_i32, 4_i32));
+ assert_eq!(v.ywwx(), ivec4(2_i32, 4_i32, 4_i32, 1_i32));
+ assert_eq!(v.ywwy(), ivec4(2_i32, 4_i32, 4_i32, 2_i32));
+ assert_eq!(v.ywwz(), ivec4(2_i32, 4_i32, 4_i32, 3_i32));
+ assert_eq!(v.ywww(), ivec4(2_i32, 4_i32, 4_i32, 4_i32));
+ assert_eq!(v.zxxx(), ivec4(3_i32, 1_i32, 1_i32, 1_i32));
+ assert_eq!(v.zxxy(), ivec4(3_i32, 1_i32, 1_i32, 2_i32));
+ assert_eq!(v.zxxz(), ivec4(3_i32, 1_i32, 1_i32, 3_i32));
+ assert_eq!(v.zxxw(), ivec4(3_i32, 1_i32, 1_i32, 4_i32));
+ assert_eq!(v.zxyx(), ivec4(3_i32, 1_i32, 2_i32, 1_i32));
+ assert_eq!(v.zxyy(), ivec4(3_i32, 1_i32, 2_i32, 2_i32));
+ assert_eq!(v.zxyz(), ivec4(3_i32, 1_i32, 2_i32, 3_i32));
+ assert_eq!(v.zxyw(), ivec4(3_i32, 1_i32, 2_i32, 4_i32));
+ assert_eq!(v.zxzx(), ivec4(3_i32, 1_i32, 3_i32, 1_i32));
+ assert_eq!(v.zxzy(), ivec4(3_i32, 1_i32, 3_i32, 2_i32));
+ assert_eq!(v.zxzz(), ivec4(3_i32, 1_i32, 3_i32, 3_i32));
+ assert_eq!(v.zxzw(), ivec4(3_i32, 1_i32, 3_i32, 4_i32));
+ assert_eq!(v.zxwx(), ivec4(3_i32, 1_i32, 4_i32, 1_i32));
+ assert_eq!(v.zxwy(), ivec4(3_i32, 1_i32, 4_i32, 2_i32));
+ assert_eq!(v.zxwz(), ivec4(3_i32, 1_i32, 4_i32, 3_i32));
+ assert_eq!(v.zxww(), ivec4(3_i32, 1_i32, 4_i32, 4_i32));
+ assert_eq!(v.zyxx(), ivec4(3_i32, 2_i32, 1_i32, 1_i32));
+ assert_eq!(v.zyxy(), ivec4(3_i32, 2_i32, 1_i32, 2_i32));
+ assert_eq!(v.zyxz(), ivec4(3_i32, 2_i32, 1_i32, 3_i32));
+ assert_eq!(v.zyxw(), ivec4(3_i32, 2_i32, 1_i32, 4_i32));
+ assert_eq!(v.zyyx(), ivec4(3_i32, 2_i32, 2_i32, 1_i32));
+ assert_eq!(v.zyyy(), ivec4(3_i32, 2_i32, 2_i32, 2_i32));
+ assert_eq!(v.zyyz(), ivec4(3_i32, 2_i32, 2_i32, 3_i32));
+ assert_eq!(v.zyyw(), ivec4(3_i32, 2_i32, 2_i32, 4_i32));
+ assert_eq!(v.zyzx(), ivec4(3_i32, 2_i32, 3_i32, 1_i32));
+ assert_eq!(v.zyzy(), ivec4(3_i32, 2_i32, 3_i32, 2_i32));
+ assert_eq!(v.zyzz(), ivec4(3_i32, 2_i32, 3_i32, 3_i32));
+ assert_eq!(v.zyzw(), ivec4(3_i32, 2_i32, 3_i32, 4_i32));
+ assert_eq!(v.zywx(), ivec4(3_i32, 2_i32, 4_i32, 1_i32));
+ assert_eq!(v.zywy(), ivec4(3_i32, 2_i32, 4_i32, 2_i32));
+ assert_eq!(v.zywz(), ivec4(3_i32, 2_i32, 4_i32, 3_i32));
+ assert_eq!(v.zyww(), ivec4(3_i32, 2_i32, 4_i32, 4_i32));
+ assert_eq!(v.zzxx(), ivec4(3_i32, 3_i32, 1_i32, 1_i32));
+ assert_eq!(v.zzxy(), ivec4(3_i32, 3_i32, 1_i32, 2_i32));
+ assert_eq!(v.zzxz(), ivec4(3_i32, 3_i32, 1_i32, 3_i32));
+ assert_eq!(v.zzxw(), ivec4(3_i32, 3_i32, 1_i32, 4_i32));
+ assert_eq!(v.zzyx(), ivec4(3_i32, 3_i32, 2_i32, 1_i32));
+ assert_eq!(v.zzyy(), ivec4(3_i32, 3_i32, 2_i32, 2_i32));
+ assert_eq!(v.zzyz(), ivec4(3_i32, 3_i32, 2_i32, 3_i32));
+ assert_eq!(v.zzyw(), ivec4(3_i32, 3_i32, 2_i32, 4_i32));
+ assert_eq!(v.zzzx(), ivec4(3_i32, 3_i32, 3_i32, 1_i32));
+ assert_eq!(v.zzzy(), ivec4(3_i32, 3_i32, 3_i32, 2_i32));
+ assert_eq!(v.zzzz(), ivec4(3_i32, 3_i32, 3_i32, 3_i32));
+ assert_eq!(v.zzzw(), ivec4(3_i32, 3_i32, 3_i32, 4_i32));
+ assert_eq!(v.zzwx(), ivec4(3_i32, 3_i32, 4_i32, 1_i32));
+ assert_eq!(v.zzwy(), ivec4(3_i32, 3_i32, 4_i32, 2_i32));
+ assert_eq!(v.zzwz(), ivec4(3_i32, 3_i32, 4_i32, 3_i32));
+ assert_eq!(v.zzww(), ivec4(3_i32, 3_i32, 4_i32, 4_i32));
+ assert_eq!(v.zwxx(), ivec4(3_i32, 4_i32, 1_i32, 1_i32));
+ assert_eq!(v.zwxy(), ivec4(3_i32, 4_i32, 1_i32, 2_i32));
+ assert_eq!(v.zwxz(), ivec4(3_i32, 4_i32, 1_i32, 3_i32));
+ assert_eq!(v.zwxw(), ivec4(3_i32, 4_i32, 1_i32, 4_i32));
+ assert_eq!(v.zwyx(), ivec4(3_i32, 4_i32, 2_i32, 1_i32));
+ assert_eq!(v.zwyy(), ivec4(3_i32, 4_i32, 2_i32, 2_i32));
+ assert_eq!(v.zwyz(), ivec4(3_i32, 4_i32, 2_i32, 3_i32));
+ assert_eq!(v.zwyw(), ivec4(3_i32, 4_i32, 2_i32, 4_i32));
+ assert_eq!(v.zwzx(), ivec4(3_i32, 4_i32, 3_i32, 1_i32));
+ assert_eq!(v.zwzy(), ivec4(3_i32, 4_i32, 3_i32, 2_i32));
+ assert_eq!(v.zwzz(), ivec4(3_i32, 4_i32, 3_i32, 3_i32));
+ assert_eq!(v.zwzw(), ivec4(3_i32, 4_i32, 3_i32, 4_i32));
+ assert_eq!(v.zwwx(), ivec4(3_i32, 4_i32, 4_i32, 1_i32));
+ assert_eq!(v.zwwy(), ivec4(3_i32, 4_i32, 4_i32, 2_i32));
+ assert_eq!(v.zwwz(), ivec4(3_i32, 4_i32, 4_i32, 3_i32));
+ assert_eq!(v.zwww(), ivec4(3_i32, 4_i32, 4_i32, 4_i32));
+ assert_eq!(v.wxxx(), ivec4(4_i32, 1_i32, 1_i32, 1_i32));
+ assert_eq!(v.wxxy(), ivec4(4_i32, 1_i32, 1_i32, 2_i32));
+ assert_eq!(v.wxxz(), ivec4(4_i32, 1_i32, 1_i32, 3_i32));
+ assert_eq!(v.wxxw(), ivec4(4_i32, 1_i32, 1_i32, 4_i32));
+ assert_eq!(v.wxyx(), ivec4(4_i32, 1_i32, 2_i32, 1_i32));
+ assert_eq!(v.wxyy(), ivec4(4_i32, 1_i32, 2_i32, 2_i32));
+ assert_eq!(v.wxyz(), ivec4(4_i32, 1_i32, 2_i32, 3_i32));
+ assert_eq!(v.wxyw(), ivec4(4_i32, 1_i32, 2_i32, 4_i32));
+ assert_eq!(v.wxzx(), ivec4(4_i32, 1_i32, 3_i32, 1_i32));
+ assert_eq!(v.wxzy(), ivec4(4_i32, 1_i32, 3_i32, 2_i32));
+ assert_eq!(v.wxzz(), ivec4(4_i32, 1_i32, 3_i32, 3_i32));
+ assert_eq!(v.wxzw(), ivec4(4_i32, 1_i32, 3_i32, 4_i32));
+ assert_eq!(v.wxwx(), ivec4(4_i32, 1_i32, 4_i32, 1_i32));
+ assert_eq!(v.wxwy(), ivec4(4_i32, 1_i32, 4_i32, 2_i32));
+ assert_eq!(v.wxwz(), ivec4(4_i32, 1_i32, 4_i32, 3_i32));
+ assert_eq!(v.wxww(), ivec4(4_i32, 1_i32, 4_i32, 4_i32));
+ assert_eq!(v.wyxx(), ivec4(4_i32, 2_i32, 1_i32, 1_i32));
+ assert_eq!(v.wyxy(), ivec4(4_i32, 2_i32, 1_i32, 2_i32));
+ assert_eq!(v.wyxz(), ivec4(4_i32, 2_i32, 1_i32, 3_i32));
+ assert_eq!(v.wyxw(), ivec4(4_i32, 2_i32, 1_i32, 4_i32));
+ assert_eq!(v.wyyx(), ivec4(4_i32, 2_i32, 2_i32, 1_i32));
+ assert_eq!(v.wyyy(), ivec4(4_i32, 2_i32, 2_i32, 2_i32));
+ assert_eq!(v.wyyz(), ivec4(4_i32, 2_i32, 2_i32, 3_i32));
+ assert_eq!(v.wyyw(), ivec4(4_i32, 2_i32, 2_i32, 4_i32));
+ assert_eq!(v.wyzx(), ivec4(4_i32, 2_i32, 3_i32, 1_i32));
+ assert_eq!(v.wyzy(), ivec4(4_i32, 2_i32, 3_i32, 2_i32));
+ assert_eq!(v.wyzz(), ivec4(4_i32, 2_i32, 3_i32, 3_i32));
+ assert_eq!(v.wyzw(), ivec4(4_i32, 2_i32, 3_i32, 4_i32));
+ assert_eq!(v.wywx(), ivec4(4_i32, 2_i32, 4_i32, 1_i32));
+ assert_eq!(v.wywy(), ivec4(4_i32, 2_i32, 4_i32, 2_i32));
+ assert_eq!(v.wywz(), ivec4(4_i32, 2_i32, 4_i32, 3_i32));
+ assert_eq!(v.wyww(), ivec4(4_i32, 2_i32, 4_i32, 4_i32));
+ assert_eq!(v.wzxx(), ivec4(4_i32, 3_i32, 1_i32, 1_i32));
+ assert_eq!(v.wzxy(), ivec4(4_i32, 3_i32, 1_i32, 2_i32));
+ assert_eq!(v.wzxz(), ivec4(4_i32, 3_i32, 1_i32, 3_i32));
+ assert_eq!(v.wzxw(), ivec4(4_i32, 3_i32, 1_i32, 4_i32));
+ assert_eq!(v.wzyx(), ivec4(4_i32, 3_i32, 2_i32, 1_i32));
+ assert_eq!(v.wzyy(), ivec4(4_i32, 3_i32, 2_i32, 2_i32));
+ assert_eq!(v.wzyz(), ivec4(4_i32, 3_i32, 2_i32, 3_i32));
+ assert_eq!(v.wzyw(), ivec4(4_i32, 3_i32, 2_i32, 4_i32));
+ assert_eq!(v.wzzx(), ivec4(4_i32, 3_i32, 3_i32, 1_i32));
+ assert_eq!(v.wzzy(), ivec4(4_i32, 3_i32, 3_i32, 2_i32));
+ assert_eq!(v.wzzz(), ivec4(4_i32, 3_i32, 3_i32, 3_i32));
+ assert_eq!(v.wzzw(), ivec4(4_i32, 3_i32, 3_i32, 4_i32));
+ assert_eq!(v.wzwx(), ivec4(4_i32, 3_i32, 4_i32, 1_i32));
+ assert_eq!(v.wzwy(), ivec4(4_i32, 3_i32, 4_i32, 2_i32));
+ assert_eq!(v.wzwz(), ivec4(4_i32, 3_i32, 4_i32, 3_i32));
+ assert_eq!(v.wzww(), ivec4(4_i32, 3_i32, 4_i32, 4_i32));
+ assert_eq!(v.wwxx(), ivec4(4_i32, 4_i32, 1_i32, 1_i32));
+ assert_eq!(v.wwxy(), ivec4(4_i32, 4_i32, 1_i32, 2_i32));
+ assert_eq!(v.wwxz(), ivec4(4_i32, 4_i32, 1_i32, 3_i32));
+ assert_eq!(v.wwxw(), ivec4(4_i32, 4_i32, 1_i32, 4_i32));
+ assert_eq!(v.wwyx(), ivec4(4_i32, 4_i32, 2_i32, 1_i32));
+ assert_eq!(v.wwyy(), ivec4(4_i32, 4_i32, 2_i32, 2_i32));
+ assert_eq!(v.wwyz(), ivec4(4_i32, 4_i32, 2_i32, 3_i32));
+ assert_eq!(v.wwyw(), ivec4(4_i32, 4_i32, 2_i32, 4_i32));
+ assert_eq!(v.wwzx(), ivec4(4_i32, 4_i32, 3_i32, 1_i32));
+ assert_eq!(v.wwzy(), ivec4(4_i32, 4_i32, 3_i32, 2_i32));
+ assert_eq!(v.wwzz(), ivec4(4_i32, 4_i32, 3_i32, 3_i32));
+ assert_eq!(v.wwzw(), ivec4(4_i32, 4_i32, 3_i32, 4_i32));
+ assert_eq!(v.wwwx(), ivec4(4_i32, 4_i32, 4_i32, 1_i32));
+ assert_eq!(v.wwwy(), ivec4(4_i32, 4_i32, 4_i32, 2_i32));
+ assert_eq!(v.wwwz(), ivec4(4_i32, 4_i32, 4_i32, 3_i32));
+ assert_eq!(v.wwww(), ivec4(4_i32, 4_i32, 4_i32, 4_i32));
+ assert_eq!(v.xxx(), ivec3(1_i32, 1_i32, 1_i32));
+ assert_eq!(v.xxy(), ivec3(1_i32, 1_i32, 2_i32));
+ assert_eq!(v.xxz(), ivec3(1_i32, 1_i32, 3_i32));
+ assert_eq!(v.xxw(), ivec3(1_i32, 1_i32, 4_i32));
+ assert_eq!(v.xyx(), ivec3(1_i32, 2_i32, 1_i32));
+ assert_eq!(v.xyy(), ivec3(1_i32, 2_i32, 2_i32));
+ assert_eq!(v.xyz(), ivec3(1_i32, 2_i32, 3_i32));
+ assert_eq!(v.xyw(), ivec3(1_i32, 2_i32, 4_i32));
+ assert_eq!(v.xzx(), ivec3(1_i32, 3_i32, 1_i32));
+ assert_eq!(v.xzy(), ivec3(1_i32, 3_i32, 2_i32));
+ assert_eq!(v.xzz(), ivec3(1_i32, 3_i32, 3_i32));
+ assert_eq!(v.xzw(), ivec3(1_i32, 3_i32, 4_i32));
+ assert_eq!(v.xwx(), ivec3(1_i32, 4_i32, 1_i32));
+ assert_eq!(v.xwy(), ivec3(1_i32, 4_i32, 2_i32));
+ assert_eq!(v.xwz(), ivec3(1_i32, 4_i32, 3_i32));
+ assert_eq!(v.xww(), ivec3(1_i32, 4_i32, 4_i32));
+ assert_eq!(v.yxx(), ivec3(2_i32, 1_i32, 1_i32));
+ assert_eq!(v.yxy(), ivec3(2_i32, 1_i32, 2_i32));
+ assert_eq!(v.yxz(), ivec3(2_i32, 1_i32, 3_i32));
+ assert_eq!(v.yxw(), ivec3(2_i32, 1_i32, 4_i32));
+ assert_eq!(v.yyx(), ivec3(2_i32, 2_i32, 1_i32));
+ assert_eq!(v.yyy(), ivec3(2_i32, 2_i32, 2_i32));
+ assert_eq!(v.yyz(), ivec3(2_i32, 2_i32, 3_i32));
+ assert_eq!(v.yyw(), ivec3(2_i32, 2_i32, 4_i32));
+ assert_eq!(v.yzx(), ivec3(2_i32, 3_i32, 1_i32));
+ assert_eq!(v.yzy(), ivec3(2_i32, 3_i32, 2_i32));
+ assert_eq!(v.yzz(), ivec3(2_i32, 3_i32, 3_i32));
+ assert_eq!(v.yzw(), ivec3(2_i32, 3_i32, 4_i32));
+ assert_eq!(v.ywx(), ivec3(2_i32, 4_i32, 1_i32));
+ assert_eq!(v.ywy(), ivec3(2_i32, 4_i32, 2_i32));
+ assert_eq!(v.ywz(), ivec3(2_i32, 4_i32, 3_i32));
+ assert_eq!(v.yww(), ivec3(2_i32, 4_i32, 4_i32));
+ assert_eq!(v.zxx(), ivec3(3_i32, 1_i32, 1_i32));
+ assert_eq!(v.zxy(), ivec3(3_i32, 1_i32, 2_i32));
+ assert_eq!(v.zxz(), ivec3(3_i32, 1_i32, 3_i32));
+ assert_eq!(v.zxw(), ivec3(3_i32, 1_i32, 4_i32));
+ assert_eq!(v.zyx(), ivec3(3_i32, 2_i32, 1_i32));
+ assert_eq!(v.zyy(), ivec3(3_i32, 2_i32, 2_i32));
+ assert_eq!(v.zyz(), ivec3(3_i32, 2_i32, 3_i32));
+ assert_eq!(v.zyw(), ivec3(3_i32, 2_i32, 4_i32));
+ assert_eq!(v.zzx(), ivec3(3_i32, 3_i32, 1_i32));
+ assert_eq!(v.zzy(), ivec3(3_i32, 3_i32, 2_i32));
+ assert_eq!(v.zzz(), ivec3(3_i32, 3_i32, 3_i32));
+ assert_eq!(v.zzw(), ivec3(3_i32, 3_i32, 4_i32));
+ assert_eq!(v.zwx(), ivec3(3_i32, 4_i32, 1_i32));
+ assert_eq!(v.zwy(), ivec3(3_i32, 4_i32, 2_i32));
+ assert_eq!(v.zwz(), ivec3(3_i32, 4_i32, 3_i32));
+ assert_eq!(v.zww(), ivec3(3_i32, 4_i32, 4_i32));
+ assert_eq!(v.wxx(), ivec3(4_i32, 1_i32, 1_i32));
+ assert_eq!(v.wxy(), ivec3(4_i32, 1_i32, 2_i32));
+ assert_eq!(v.wxz(), ivec3(4_i32, 1_i32, 3_i32));
+ assert_eq!(v.wxw(), ivec3(4_i32, 1_i32, 4_i32));
+ assert_eq!(v.wyx(), ivec3(4_i32, 2_i32, 1_i32));
+ assert_eq!(v.wyy(), ivec3(4_i32, 2_i32, 2_i32));
+ assert_eq!(v.wyz(), ivec3(4_i32, 2_i32, 3_i32));
+ assert_eq!(v.wyw(), ivec3(4_i32, 2_i32, 4_i32));
+ assert_eq!(v.wzx(), ivec3(4_i32, 3_i32, 1_i32));
+ assert_eq!(v.wzy(), ivec3(4_i32, 3_i32, 2_i32));
+ assert_eq!(v.wzz(), ivec3(4_i32, 3_i32, 3_i32));
+ assert_eq!(v.wzw(), ivec3(4_i32, 3_i32, 4_i32));
+ assert_eq!(v.wwx(), ivec3(4_i32, 4_i32, 1_i32));
+ assert_eq!(v.wwy(), ivec3(4_i32, 4_i32, 2_i32));
+ assert_eq!(v.wwz(), ivec3(4_i32, 4_i32, 3_i32));
+ assert_eq!(v.www(), ivec3(4_i32, 4_i32, 4_i32));
+ assert_eq!(v.xx(), ivec2(1_i32, 1_i32));
+ assert_eq!(v.xy(), ivec2(1_i32, 2_i32));
+ assert_eq!(v.xz(), ivec2(1_i32, 3_i32));
+ assert_eq!(v.xw(), ivec2(1_i32, 4_i32));
+ assert_eq!(v.yx(), ivec2(2_i32, 1_i32));
+ assert_eq!(v.yy(), ivec2(2_i32, 2_i32));
+ assert_eq!(v.yz(), ivec2(2_i32, 3_i32));
+ assert_eq!(v.yw(), ivec2(2_i32, 4_i32));
+ assert_eq!(v.zx(), ivec2(3_i32, 1_i32));
+ assert_eq!(v.zy(), ivec2(3_i32, 2_i32));
+ assert_eq!(v.zz(), ivec2(3_i32, 3_i32));
+ assert_eq!(v.zw(), ivec2(3_i32, 4_i32));
+ assert_eq!(v.wx(), ivec2(4_i32, 1_i32));
+ assert_eq!(v.wy(), ivec2(4_i32, 2_i32));
+ assert_eq!(v.wz(), ivec2(4_i32, 3_i32));
+ assert_eq!(v.ww(), ivec2(4_i32, 4_i32));
+});
+
+glam_test!(test_ivec3_swizzles, {
+ let v = ivec3(1_i32, 2_i32, 3_i32);
+ assert_eq!(v, v.xyz());
+ assert_eq!(v.xxxx(), ivec4(1_i32, 1_i32, 1_i32, 1_i32));
+ assert_eq!(v.xxxy(), ivec4(1_i32, 1_i32, 1_i32, 2_i32));
+ assert_eq!(v.xxxz(), ivec4(1_i32, 1_i32, 1_i32, 3_i32));
+ assert_eq!(v.xxyx(), ivec4(1_i32, 1_i32, 2_i32, 1_i32));
+ assert_eq!(v.xxyy(), ivec4(1_i32, 1_i32, 2_i32, 2_i32));
+ assert_eq!(v.xxyz(), ivec4(1_i32, 1_i32, 2_i32, 3_i32));
+ assert_eq!(v.xxzx(), ivec4(1_i32, 1_i32, 3_i32, 1_i32));
+ assert_eq!(v.xxzy(), ivec4(1_i32, 1_i32, 3_i32, 2_i32));
+ assert_eq!(v.xxzz(), ivec4(1_i32, 1_i32, 3_i32, 3_i32));
+ assert_eq!(v.xyxx(), ivec4(1_i32, 2_i32, 1_i32, 1_i32));
+ assert_eq!(v.xyxy(), ivec4(1_i32, 2_i32, 1_i32, 2_i32));
+ assert_eq!(v.xyxz(), ivec4(1_i32, 2_i32, 1_i32, 3_i32));
+ assert_eq!(v.xyyx(), ivec4(1_i32, 2_i32, 2_i32, 1_i32));
+ assert_eq!(v.xyyy(), ivec4(1_i32, 2_i32, 2_i32, 2_i32));
+ assert_eq!(v.xyyz(), ivec4(1_i32, 2_i32, 2_i32, 3_i32));
+ assert_eq!(v.xyzx(), ivec4(1_i32, 2_i32, 3_i32, 1_i32));
+ assert_eq!(v.xyzy(), ivec4(1_i32, 2_i32, 3_i32, 2_i32));
+ assert_eq!(v.xyzz(), ivec4(1_i32, 2_i32, 3_i32, 3_i32));
+ assert_eq!(v.xzxx(), ivec4(1_i32, 3_i32, 1_i32, 1_i32));
+ assert_eq!(v.xzxy(), ivec4(1_i32, 3_i32, 1_i32, 2_i32));
+ assert_eq!(v.xzxz(), ivec4(1_i32, 3_i32, 1_i32, 3_i32));
+ assert_eq!(v.xzyx(), ivec4(1_i32, 3_i32, 2_i32, 1_i32));
+ assert_eq!(v.xzyy(), ivec4(1_i32, 3_i32, 2_i32, 2_i32));
+ assert_eq!(v.xzyz(), ivec4(1_i32, 3_i32, 2_i32, 3_i32));
+ assert_eq!(v.xzzx(), ivec4(1_i32, 3_i32, 3_i32, 1_i32));
+ assert_eq!(v.xzzy(), ivec4(1_i32, 3_i32, 3_i32, 2_i32));
+ assert_eq!(v.xzzz(), ivec4(1_i32, 3_i32, 3_i32, 3_i32));
+ assert_eq!(v.yxxx(), ivec4(2_i32, 1_i32, 1_i32, 1_i32));
+ assert_eq!(v.yxxy(), ivec4(2_i32, 1_i32, 1_i32, 2_i32));
+ assert_eq!(v.yxxz(), ivec4(2_i32, 1_i32, 1_i32, 3_i32));
+ assert_eq!(v.yxyx(), ivec4(2_i32, 1_i32, 2_i32, 1_i32));
+ assert_eq!(v.yxyy(), ivec4(2_i32, 1_i32, 2_i32, 2_i32));
+ assert_eq!(v.yxyz(), ivec4(2_i32, 1_i32, 2_i32, 3_i32));
+ assert_eq!(v.yxzx(), ivec4(2_i32, 1_i32, 3_i32, 1_i32));
+ assert_eq!(v.yxzy(), ivec4(2_i32, 1_i32, 3_i32, 2_i32));
+ assert_eq!(v.yxzz(), ivec4(2_i32, 1_i32, 3_i32, 3_i32));
+ assert_eq!(v.yyxx(), ivec4(2_i32, 2_i32, 1_i32, 1_i32));
+ assert_eq!(v.yyxy(), ivec4(2_i32, 2_i32, 1_i32, 2_i32));
+ assert_eq!(v.yyxz(), ivec4(2_i32, 2_i32, 1_i32, 3_i32));
+ assert_eq!(v.yyyx(), ivec4(2_i32, 2_i32, 2_i32, 1_i32));
+ assert_eq!(v.yyyy(), ivec4(2_i32, 2_i32, 2_i32, 2_i32));
+ assert_eq!(v.yyyz(), ivec4(2_i32, 2_i32, 2_i32, 3_i32));
+ assert_eq!(v.yyzx(), ivec4(2_i32, 2_i32, 3_i32, 1_i32));
+ assert_eq!(v.yyzy(), ivec4(2_i32, 2_i32, 3_i32, 2_i32));
+ assert_eq!(v.yyzz(), ivec4(2_i32, 2_i32, 3_i32, 3_i32));
+ assert_eq!(v.yzxx(), ivec4(2_i32, 3_i32, 1_i32, 1_i32));
+ assert_eq!(v.yzxy(), ivec4(2_i32, 3_i32, 1_i32, 2_i32));
+ assert_eq!(v.yzxz(), ivec4(2_i32, 3_i32, 1_i32, 3_i32));
+ assert_eq!(v.yzyx(), ivec4(2_i32, 3_i32, 2_i32, 1_i32));
+ assert_eq!(v.yzyy(), ivec4(2_i32, 3_i32, 2_i32, 2_i32));
+ assert_eq!(v.yzyz(), ivec4(2_i32, 3_i32, 2_i32, 3_i32));
+ assert_eq!(v.yzzx(), ivec4(2_i32, 3_i32, 3_i32, 1_i32));
+ assert_eq!(v.yzzy(), ivec4(2_i32, 3_i32, 3_i32, 2_i32));
+ assert_eq!(v.yzzz(), ivec4(2_i32, 3_i32, 3_i32, 3_i32));
+ assert_eq!(v.zxxx(), ivec4(3_i32, 1_i32, 1_i32, 1_i32));
+ assert_eq!(v.zxxy(), ivec4(3_i32, 1_i32, 1_i32, 2_i32));
+ assert_eq!(v.zxxz(), ivec4(3_i32, 1_i32, 1_i32, 3_i32));
+ assert_eq!(v.zxyx(), ivec4(3_i32, 1_i32, 2_i32, 1_i32));
+ assert_eq!(v.zxyy(), ivec4(3_i32, 1_i32, 2_i32, 2_i32));
+ assert_eq!(v.zxyz(), ivec4(3_i32, 1_i32, 2_i32, 3_i32));
+ assert_eq!(v.zxzx(), ivec4(3_i32, 1_i32, 3_i32, 1_i32));
+ assert_eq!(v.zxzy(), ivec4(3_i32, 1_i32, 3_i32, 2_i32));
+ assert_eq!(v.zxzz(), ivec4(3_i32, 1_i32, 3_i32, 3_i32));
+ assert_eq!(v.zyxx(), ivec4(3_i32, 2_i32, 1_i32, 1_i32));
+ assert_eq!(v.zyxy(), ivec4(3_i32, 2_i32, 1_i32, 2_i32));
+ assert_eq!(v.zyxz(), ivec4(3_i32, 2_i32, 1_i32, 3_i32));
+ assert_eq!(v.zyyx(), ivec4(3_i32, 2_i32, 2_i32, 1_i32));
+ assert_eq!(v.zyyy(), ivec4(3_i32, 2_i32, 2_i32, 2_i32));
+ assert_eq!(v.zyyz(), ivec4(3_i32, 2_i32, 2_i32, 3_i32));
+ assert_eq!(v.zyzx(), ivec4(3_i32, 2_i32, 3_i32, 1_i32));
+ assert_eq!(v.zyzy(), ivec4(3_i32, 2_i32, 3_i32, 2_i32));
+ assert_eq!(v.zyzz(), ivec4(3_i32, 2_i32, 3_i32, 3_i32));
+ assert_eq!(v.zzxx(), ivec4(3_i32, 3_i32, 1_i32, 1_i32));
+ assert_eq!(v.zzxy(), ivec4(3_i32, 3_i32, 1_i32, 2_i32));
+ assert_eq!(v.zzxz(), ivec4(3_i32, 3_i32, 1_i32, 3_i32));
+ assert_eq!(v.zzyx(), ivec4(3_i32, 3_i32, 2_i32, 1_i32));
+ assert_eq!(v.zzyy(), ivec4(3_i32, 3_i32, 2_i32, 2_i32));
+ assert_eq!(v.zzyz(), ivec4(3_i32, 3_i32, 2_i32, 3_i32));
+ assert_eq!(v.zzzx(), ivec4(3_i32, 3_i32, 3_i32, 1_i32));
+ assert_eq!(v.zzzy(), ivec4(3_i32, 3_i32, 3_i32, 2_i32));
+ assert_eq!(v.zzzz(), ivec4(3_i32, 3_i32, 3_i32, 3_i32));
+ assert_eq!(v.xxx(), ivec3(1_i32, 1_i32, 1_i32));
+ assert_eq!(v.xxy(), ivec3(1_i32, 1_i32, 2_i32));
+ assert_eq!(v.xxz(), ivec3(1_i32, 1_i32, 3_i32));
+ assert_eq!(v.xyx(), ivec3(1_i32, 2_i32, 1_i32));
+ assert_eq!(v.xyy(), ivec3(1_i32, 2_i32, 2_i32));
+ assert_eq!(v.xzx(), ivec3(1_i32, 3_i32, 1_i32));
+ assert_eq!(v.xzy(), ivec3(1_i32, 3_i32, 2_i32));
+ assert_eq!(v.xzz(), ivec3(1_i32, 3_i32, 3_i32));
+ assert_eq!(v.yxx(), ivec3(2_i32, 1_i32, 1_i32));
+ assert_eq!(v.yxy(), ivec3(2_i32, 1_i32, 2_i32));
+ assert_eq!(v.yxz(), ivec3(2_i32, 1_i32, 3_i32));
+ assert_eq!(v.yyx(), ivec3(2_i32, 2_i32, 1_i32));
+ assert_eq!(v.yyy(), ivec3(2_i32, 2_i32, 2_i32));
+ assert_eq!(v.yyz(), ivec3(2_i32, 2_i32, 3_i32));
+ assert_eq!(v.yzx(), ivec3(2_i32, 3_i32, 1_i32));
+ assert_eq!(v.yzy(), ivec3(2_i32, 3_i32, 2_i32));
+ assert_eq!(v.yzz(), ivec3(2_i32, 3_i32, 3_i32));
+ assert_eq!(v.zxx(), ivec3(3_i32, 1_i32, 1_i32));
+ assert_eq!(v.zxy(), ivec3(3_i32, 1_i32, 2_i32));
+ assert_eq!(v.zxz(), ivec3(3_i32, 1_i32, 3_i32));
+ assert_eq!(v.zyx(), ivec3(3_i32, 2_i32, 1_i32));
+ assert_eq!(v.zyy(), ivec3(3_i32, 2_i32, 2_i32));
+ assert_eq!(v.zyz(), ivec3(3_i32, 2_i32, 3_i32));
+ assert_eq!(v.zzx(), ivec3(3_i32, 3_i32, 1_i32));
+ assert_eq!(v.zzy(), ivec3(3_i32, 3_i32, 2_i32));
+ assert_eq!(v.zzz(), ivec3(3_i32, 3_i32, 3_i32));
+ assert_eq!(v.xx(), ivec2(1_i32, 1_i32));
+ assert_eq!(v.xy(), ivec2(1_i32, 2_i32));
+ assert_eq!(v.xz(), ivec2(1_i32, 3_i32));
+ assert_eq!(v.yx(), ivec2(2_i32, 1_i32));
+ assert_eq!(v.yy(), ivec2(2_i32, 2_i32));
+ assert_eq!(v.yz(), ivec2(2_i32, 3_i32));
+ assert_eq!(v.zx(), ivec2(3_i32, 1_i32));
+ assert_eq!(v.zy(), ivec2(3_i32, 2_i32));
+ assert_eq!(v.zz(), ivec2(3_i32, 3_i32));
+});
+
+glam_test!(test_ivec2_swizzles, {
+ let v = ivec2(1_i32, 2_i32);
+ assert_eq!(v, v.xy());
+ assert_eq!(v.xxxx(), ivec4(1_i32, 1_i32, 1_i32, 1_i32));
+ assert_eq!(v.xxxy(), ivec4(1_i32, 1_i32, 1_i32, 2_i32));
+ assert_eq!(v.xxyx(), ivec4(1_i32, 1_i32, 2_i32, 1_i32));
+ assert_eq!(v.xxyy(), ivec4(1_i32, 1_i32, 2_i32, 2_i32));
+ assert_eq!(v.xyxx(), ivec4(1_i32, 2_i32, 1_i32, 1_i32));
+ assert_eq!(v.xyxy(), ivec4(1_i32, 2_i32, 1_i32, 2_i32));
+ assert_eq!(v.xyyx(), ivec4(1_i32, 2_i32, 2_i32, 1_i32));
+ assert_eq!(v.xyyy(), ivec4(1_i32, 2_i32, 2_i32, 2_i32));
+ assert_eq!(v.yxxx(), ivec4(2_i32, 1_i32, 1_i32, 1_i32));
+ assert_eq!(v.yxxy(), ivec4(2_i32, 1_i32, 1_i32, 2_i32));
+ assert_eq!(v.yxyx(), ivec4(2_i32, 1_i32, 2_i32, 1_i32));
+ assert_eq!(v.yxyy(), ivec4(2_i32, 1_i32, 2_i32, 2_i32));
+ assert_eq!(v.yyxx(), ivec4(2_i32, 2_i32, 1_i32, 1_i32));
+ assert_eq!(v.yyxy(), ivec4(2_i32, 2_i32, 1_i32, 2_i32));
+ assert_eq!(v.yyyx(), ivec4(2_i32, 2_i32, 2_i32, 1_i32));
+ assert_eq!(v.yyyy(), ivec4(2_i32, 2_i32, 2_i32, 2_i32));
+ assert_eq!(v.xxx(), ivec3(1_i32, 1_i32, 1_i32));
+ assert_eq!(v.xxy(), ivec3(1_i32, 1_i32, 2_i32));
+ assert_eq!(v.xyx(), ivec3(1_i32, 2_i32, 1_i32));
+ assert_eq!(v.xyy(), ivec3(1_i32, 2_i32, 2_i32));
+ assert_eq!(v.yxx(), ivec3(2_i32, 1_i32, 1_i32));
+ assert_eq!(v.yxy(), ivec3(2_i32, 1_i32, 2_i32));
+ assert_eq!(v.yyx(), ivec3(2_i32, 2_i32, 1_i32));
+ assert_eq!(v.yyy(), ivec3(2_i32, 2_i32, 2_i32));
+ assert_eq!(v.xx(), ivec2(1_i32, 1_i32));
+ assert_eq!(v.yx(), ivec2(2_i32, 1_i32));
+ assert_eq!(v.yy(), ivec2(2_i32, 2_i32));
+});
diff --git a/tests/swizzles_u32.rs b/tests/swizzles_u32.rs
new file mode 100644
index 0000000..8649be1
--- /dev/null
+++ b/tests/swizzles_u32.rs
@@ -0,0 +1,497 @@
+// Generated by swizzlegen. Do not edit.
+#[macro_use]
+mod support;
+use glam::*;
+
+glam_test!(test_uvec4_swizzles, {
+ let v = uvec4(1_u32, 2_u32, 3_u32, 4_u32);
+ assert_eq!(v, v.xyzw());
+ assert_eq!(v.xxxx(), uvec4(1_u32, 1_u32, 1_u32, 1_u32));
+ assert_eq!(v.xxxy(), uvec4(1_u32, 1_u32, 1_u32, 2_u32));
+ assert_eq!(v.xxxz(), uvec4(1_u32, 1_u32, 1_u32, 3_u32));
+ assert_eq!(v.xxxw(), uvec4(1_u32, 1_u32, 1_u32, 4_u32));
+ assert_eq!(v.xxyx(), uvec4(1_u32, 1_u32, 2_u32, 1_u32));
+ assert_eq!(v.xxyy(), uvec4(1_u32, 1_u32, 2_u32, 2_u32));
+ assert_eq!(v.xxyz(), uvec4(1_u32, 1_u32, 2_u32, 3_u32));
+ assert_eq!(v.xxyw(), uvec4(1_u32, 1_u32, 2_u32, 4_u32));
+ assert_eq!(v.xxzx(), uvec4(1_u32, 1_u32, 3_u32, 1_u32));
+ assert_eq!(v.xxzy(), uvec4(1_u32, 1_u32, 3_u32, 2_u32));
+ assert_eq!(v.xxzz(), uvec4(1_u32, 1_u32, 3_u32, 3_u32));
+ assert_eq!(v.xxzw(), uvec4(1_u32, 1_u32, 3_u32, 4_u32));
+ assert_eq!(v.xxwx(), uvec4(1_u32, 1_u32, 4_u32, 1_u32));
+ assert_eq!(v.xxwy(), uvec4(1_u32, 1_u32, 4_u32, 2_u32));
+ assert_eq!(v.xxwz(), uvec4(1_u32, 1_u32, 4_u32, 3_u32));
+ assert_eq!(v.xxww(), uvec4(1_u32, 1_u32, 4_u32, 4_u32));
+ assert_eq!(v.xyxx(), uvec4(1_u32, 2_u32, 1_u32, 1_u32));
+ assert_eq!(v.xyxy(), uvec4(1_u32, 2_u32, 1_u32, 2_u32));
+ assert_eq!(v.xyxz(), uvec4(1_u32, 2_u32, 1_u32, 3_u32));
+ assert_eq!(v.xyxw(), uvec4(1_u32, 2_u32, 1_u32, 4_u32));
+ assert_eq!(v.xyyx(), uvec4(1_u32, 2_u32, 2_u32, 1_u32));
+ assert_eq!(v.xyyy(), uvec4(1_u32, 2_u32, 2_u32, 2_u32));
+ assert_eq!(v.xyyz(), uvec4(1_u32, 2_u32, 2_u32, 3_u32));
+ assert_eq!(v.xyyw(), uvec4(1_u32, 2_u32, 2_u32, 4_u32));
+ assert_eq!(v.xyzx(), uvec4(1_u32, 2_u32, 3_u32, 1_u32));
+ assert_eq!(v.xyzy(), uvec4(1_u32, 2_u32, 3_u32, 2_u32));
+ assert_eq!(v.xyzz(), uvec4(1_u32, 2_u32, 3_u32, 3_u32));
+ assert_eq!(v.xywx(), uvec4(1_u32, 2_u32, 4_u32, 1_u32));
+ assert_eq!(v.xywy(), uvec4(1_u32, 2_u32, 4_u32, 2_u32));
+ assert_eq!(v.xywz(), uvec4(1_u32, 2_u32, 4_u32, 3_u32));
+ assert_eq!(v.xyww(), uvec4(1_u32, 2_u32, 4_u32, 4_u32));
+ assert_eq!(v.xzxx(), uvec4(1_u32, 3_u32, 1_u32, 1_u32));
+ assert_eq!(v.xzxy(), uvec4(1_u32, 3_u32, 1_u32, 2_u32));
+ assert_eq!(v.xzxz(), uvec4(1_u32, 3_u32, 1_u32, 3_u32));
+ assert_eq!(v.xzxw(), uvec4(1_u32, 3_u32, 1_u32, 4_u32));
+ assert_eq!(v.xzyx(), uvec4(1_u32, 3_u32, 2_u32, 1_u32));
+ assert_eq!(v.xzyy(), uvec4(1_u32, 3_u32, 2_u32, 2_u32));
+ assert_eq!(v.xzyz(), uvec4(1_u32, 3_u32, 2_u32, 3_u32));
+ assert_eq!(v.xzyw(), uvec4(1_u32, 3_u32, 2_u32, 4_u32));
+ assert_eq!(v.xzzx(), uvec4(1_u32, 3_u32, 3_u32, 1_u32));
+ assert_eq!(v.xzzy(), uvec4(1_u32, 3_u32, 3_u32, 2_u32));
+ assert_eq!(v.xzzz(), uvec4(1_u32, 3_u32, 3_u32, 3_u32));
+ assert_eq!(v.xzzw(), uvec4(1_u32, 3_u32, 3_u32, 4_u32));
+ assert_eq!(v.xzwx(), uvec4(1_u32, 3_u32, 4_u32, 1_u32));
+ assert_eq!(v.xzwy(), uvec4(1_u32, 3_u32, 4_u32, 2_u32));
+ assert_eq!(v.xzwz(), uvec4(1_u32, 3_u32, 4_u32, 3_u32));
+ assert_eq!(v.xzww(), uvec4(1_u32, 3_u32, 4_u32, 4_u32));
+ assert_eq!(v.xwxx(), uvec4(1_u32, 4_u32, 1_u32, 1_u32));
+ assert_eq!(v.xwxy(), uvec4(1_u32, 4_u32, 1_u32, 2_u32));
+ assert_eq!(v.xwxz(), uvec4(1_u32, 4_u32, 1_u32, 3_u32));
+ assert_eq!(v.xwxw(), uvec4(1_u32, 4_u32, 1_u32, 4_u32));
+ assert_eq!(v.xwyx(), uvec4(1_u32, 4_u32, 2_u32, 1_u32));
+ assert_eq!(v.xwyy(), uvec4(1_u32, 4_u32, 2_u32, 2_u32));
+ assert_eq!(v.xwyz(), uvec4(1_u32, 4_u32, 2_u32, 3_u32));
+ assert_eq!(v.xwyw(), uvec4(1_u32, 4_u32, 2_u32, 4_u32));
+ assert_eq!(v.xwzx(), uvec4(1_u32, 4_u32, 3_u32, 1_u32));
+ assert_eq!(v.xwzy(), uvec4(1_u32, 4_u32, 3_u32, 2_u32));
+ assert_eq!(v.xwzz(), uvec4(1_u32, 4_u32, 3_u32, 3_u32));
+ assert_eq!(v.xwzw(), uvec4(1_u32, 4_u32, 3_u32, 4_u32));
+ assert_eq!(v.xwwx(), uvec4(1_u32, 4_u32, 4_u32, 1_u32));
+ assert_eq!(v.xwwy(), uvec4(1_u32, 4_u32, 4_u32, 2_u32));
+ assert_eq!(v.xwwz(), uvec4(1_u32, 4_u32, 4_u32, 3_u32));
+ assert_eq!(v.xwww(), uvec4(1_u32, 4_u32, 4_u32, 4_u32));
+ assert_eq!(v.yxxx(), uvec4(2_u32, 1_u32, 1_u32, 1_u32));
+ assert_eq!(v.yxxy(), uvec4(2_u32, 1_u32, 1_u32, 2_u32));
+ assert_eq!(v.yxxz(), uvec4(2_u32, 1_u32, 1_u32, 3_u32));
+ assert_eq!(v.yxxw(), uvec4(2_u32, 1_u32, 1_u32, 4_u32));
+ assert_eq!(v.yxyx(), uvec4(2_u32, 1_u32, 2_u32, 1_u32));
+ assert_eq!(v.yxyy(), uvec4(2_u32, 1_u32, 2_u32, 2_u32));
+ assert_eq!(v.yxyz(), uvec4(2_u32, 1_u32, 2_u32, 3_u32));
+ assert_eq!(v.yxyw(), uvec4(2_u32, 1_u32, 2_u32, 4_u32));
+ assert_eq!(v.yxzx(), uvec4(2_u32, 1_u32, 3_u32, 1_u32));
+ assert_eq!(v.yxzy(), uvec4(2_u32, 1_u32, 3_u32, 2_u32));
+ assert_eq!(v.yxzz(), uvec4(2_u32, 1_u32, 3_u32, 3_u32));
+ assert_eq!(v.yxzw(), uvec4(2_u32, 1_u32, 3_u32, 4_u32));
+ assert_eq!(v.yxwx(), uvec4(2_u32, 1_u32, 4_u32, 1_u32));
+ assert_eq!(v.yxwy(), uvec4(2_u32, 1_u32, 4_u32, 2_u32));
+ assert_eq!(v.yxwz(), uvec4(2_u32, 1_u32, 4_u32, 3_u32));
+ assert_eq!(v.yxww(), uvec4(2_u32, 1_u32, 4_u32, 4_u32));
+ assert_eq!(v.yyxx(), uvec4(2_u32, 2_u32, 1_u32, 1_u32));
+ assert_eq!(v.yyxy(), uvec4(2_u32, 2_u32, 1_u32, 2_u32));
+ assert_eq!(v.yyxz(), uvec4(2_u32, 2_u32, 1_u32, 3_u32));
+ assert_eq!(v.yyxw(), uvec4(2_u32, 2_u32, 1_u32, 4_u32));
+ assert_eq!(v.yyyx(), uvec4(2_u32, 2_u32, 2_u32, 1_u32));
+ assert_eq!(v.yyyy(), uvec4(2_u32, 2_u32, 2_u32, 2_u32));
+ assert_eq!(v.yyyz(), uvec4(2_u32, 2_u32, 2_u32, 3_u32));
+ assert_eq!(v.yyyw(), uvec4(2_u32, 2_u32, 2_u32, 4_u32));
+ assert_eq!(v.yyzx(), uvec4(2_u32, 2_u32, 3_u32, 1_u32));
+ assert_eq!(v.yyzy(), uvec4(2_u32, 2_u32, 3_u32, 2_u32));
+ assert_eq!(v.yyzz(), uvec4(2_u32, 2_u32, 3_u32, 3_u32));
+ assert_eq!(v.yyzw(), uvec4(2_u32, 2_u32, 3_u32, 4_u32));
+ assert_eq!(v.yywx(), uvec4(2_u32, 2_u32, 4_u32, 1_u32));
+ assert_eq!(v.yywy(), uvec4(2_u32, 2_u32, 4_u32, 2_u32));
+ assert_eq!(v.yywz(), uvec4(2_u32, 2_u32, 4_u32, 3_u32));
+ assert_eq!(v.yyww(), uvec4(2_u32, 2_u32, 4_u32, 4_u32));
+ assert_eq!(v.yzxx(), uvec4(2_u32, 3_u32, 1_u32, 1_u32));
+ assert_eq!(v.yzxy(), uvec4(2_u32, 3_u32, 1_u32, 2_u32));
+ assert_eq!(v.yzxz(), uvec4(2_u32, 3_u32, 1_u32, 3_u32));
+ assert_eq!(v.yzxw(), uvec4(2_u32, 3_u32, 1_u32, 4_u32));
+ assert_eq!(v.yzyx(), uvec4(2_u32, 3_u32, 2_u32, 1_u32));
+ assert_eq!(v.yzyy(), uvec4(2_u32, 3_u32, 2_u32, 2_u32));
+ assert_eq!(v.yzyz(), uvec4(2_u32, 3_u32, 2_u32, 3_u32));
+ assert_eq!(v.yzyw(), uvec4(2_u32, 3_u32, 2_u32, 4_u32));
+ assert_eq!(v.yzzx(), uvec4(2_u32, 3_u32, 3_u32, 1_u32));
+ assert_eq!(v.yzzy(), uvec4(2_u32, 3_u32, 3_u32, 2_u32));
+ assert_eq!(v.yzzz(), uvec4(2_u32, 3_u32, 3_u32, 3_u32));
+ assert_eq!(v.yzzw(), uvec4(2_u32, 3_u32, 3_u32, 4_u32));
+ assert_eq!(v.yzwx(), uvec4(2_u32, 3_u32, 4_u32, 1_u32));
+ assert_eq!(v.yzwy(), uvec4(2_u32, 3_u32, 4_u32, 2_u32));
+ assert_eq!(v.yzwz(), uvec4(2_u32, 3_u32, 4_u32, 3_u32));
+ assert_eq!(v.yzww(), uvec4(2_u32, 3_u32, 4_u32, 4_u32));
+ assert_eq!(v.ywxx(), uvec4(2_u32, 4_u32, 1_u32, 1_u32));
+ assert_eq!(v.ywxy(), uvec4(2_u32, 4_u32, 1_u32, 2_u32));
+ assert_eq!(v.ywxz(), uvec4(2_u32, 4_u32, 1_u32, 3_u32));
+ assert_eq!(v.ywxw(), uvec4(2_u32, 4_u32, 1_u32, 4_u32));
+ assert_eq!(v.ywyx(), uvec4(2_u32, 4_u32, 2_u32, 1_u32));
+ assert_eq!(v.ywyy(), uvec4(2_u32, 4_u32, 2_u32, 2_u32));
+ assert_eq!(v.ywyz(), uvec4(2_u32, 4_u32, 2_u32, 3_u32));
+ assert_eq!(v.ywyw(), uvec4(2_u32, 4_u32, 2_u32, 4_u32));
+ assert_eq!(v.ywzx(), uvec4(2_u32, 4_u32, 3_u32, 1_u32));
+ assert_eq!(v.ywzy(), uvec4(2_u32, 4_u32, 3_u32, 2_u32));
+ assert_eq!(v.ywzz(), uvec4(2_u32, 4_u32, 3_u32, 3_u32));
+ assert_eq!(v.ywzw(), uvec4(2_u32, 4_u32, 3_u32, 4_u32));
+ assert_eq!(v.ywwx(), uvec4(2_u32, 4_u32, 4_u32, 1_u32));
+ assert_eq!(v.ywwy(), uvec4(2_u32, 4_u32, 4_u32, 2_u32));
+ assert_eq!(v.ywwz(), uvec4(2_u32, 4_u32, 4_u32, 3_u32));
+ assert_eq!(v.ywww(), uvec4(2_u32, 4_u32, 4_u32, 4_u32));
+ assert_eq!(v.zxxx(), uvec4(3_u32, 1_u32, 1_u32, 1_u32));
+ assert_eq!(v.zxxy(), uvec4(3_u32, 1_u32, 1_u32, 2_u32));
+ assert_eq!(v.zxxz(), uvec4(3_u32, 1_u32, 1_u32, 3_u32));
+ assert_eq!(v.zxxw(), uvec4(3_u32, 1_u32, 1_u32, 4_u32));
+ assert_eq!(v.zxyx(), uvec4(3_u32, 1_u32, 2_u32, 1_u32));
+ assert_eq!(v.zxyy(), uvec4(3_u32, 1_u32, 2_u32, 2_u32));
+ assert_eq!(v.zxyz(), uvec4(3_u32, 1_u32, 2_u32, 3_u32));
+ assert_eq!(v.zxyw(), uvec4(3_u32, 1_u32, 2_u32, 4_u32));
+ assert_eq!(v.zxzx(), uvec4(3_u32, 1_u32, 3_u32, 1_u32));
+ assert_eq!(v.zxzy(), uvec4(3_u32, 1_u32, 3_u32, 2_u32));
+ assert_eq!(v.zxzz(), uvec4(3_u32, 1_u32, 3_u32, 3_u32));
+ assert_eq!(v.zxzw(), uvec4(3_u32, 1_u32, 3_u32, 4_u32));
+ assert_eq!(v.zxwx(), uvec4(3_u32, 1_u32, 4_u32, 1_u32));
+ assert_eq!(v.zxwy(), uvec4(3_u32, 1_u32, 4_u32, 2_u32));
+ assert_eq!(v.zxwz(), uvec4(3_u32, 1_u32, 4_u32, 3_u32));
+ assert_eq!(v.zxww(), uvec4(3_u32, 1_u32, 4_u32, 4_u32));
+ assert_eq!(v.zyxx(), uvec4(3_u32, 2_u32, 1_u32, 1_u32));
+ assert_eq!(v.zyxy(), uvec4(3_u32, 2_u32, 1_u32, 2_u32));
+ assert_eq!(v.zyxz(), uvec4(3_u32, 2_u32, 1_u32, 3_u32));
+ assert_eq!(v.zyxw(), uvec4(3_u32, 2_u32, 1_u32, 4_u32));
+ assert_eq!(v.zyyx(), uvec4(3_u32, 2_u32, 2_u32, 1_u32));
+ assert_eq!(v.zyyy(), uvec4(3_u32, 2_u32, 2_u32, 2_u32));
+ assert_eq!(v.zyyz(), uvec4(3_u32, 2_u32, 2_u32, 3_u32));
+ assert_eq!(v.zyyw(), uvec4(3_u32, 2_u32, 2_u32, 4_u32));
+ assert_eq!(v.zyzx(), uvec4(3_u32, 2_u32, 3_u32, 1_u32));
+ assert_eq!(v.zyzy(), uvec4(3_u32, 2_u32, 3_u32, 2_u32));
+ assert_eq!(v.zyzz(), uvec4(3_u32, 2_u32, 3_u32, 3_u32));
+ assert_eq!(v.zyzw(), uvec4(3_u32, 2_u32, 3_u32, 4_u32));
+ assert_eq!(v.zywx(), uvec4(3_u32, 2_u32, 4_u32, 1_u32));
+ assert_eq!(v.zywy(), uvec4(3_u32, 2_u32, 4_u32, 2_u32));
+ assert_eq!(v.zywz(), uvec4(3_u32, 2_u32, 4_u32, 3_u32));
+ assert_eq!(v.zyww(), uvec4(3_u32, 2_u32, 4_u32, 4_u32));
+ assert_eq!(v.zzxx(), uvec4(3_u32, 3_u32, 1_u32, 1_u32));
+ assert_eq!(v.zzxy(), uvec4(3_u32, 3_u32, 1_u32, 2_u32));
+ assert_eq!(v.zzxz(), uvec4(3_u32, 3_u32, 1_u32, 3_u32));
+ assert_eq!(v.zzxw(), uvec4(3_u32, 3_u32, 1_u32, 4_u32));
+ assert_eq!(v.zzyx(), uvec4(3_u32, 3_u32, 2_u32, 1_u32));
+ assert_eq!(v.zzyy(), uvec4(3_u32, 3_u32, 2_u32, 2_u32));
+ assert_eq!(v.zzyz(), uvec4(3_u32, 3_u32, 2_u32, 3_u32));
+ assert_eq!(v.zzyw(), uvec4(3_u32, 3_u32, 2_u32, 4_u32));
+ assert_eq!(v.zzzx(), uvec4(3_u32, 3_u32, 3_u32, 1_u32));
+ assert_eq!(v.zzzy(), uvec4(3_u32, 3_u32, 3_u32, 2_u32));
+ assert_eq!(v.zzzz(), uvec4(3_u32, 3_u32, 3_u32, 3_u32));
+ assert_eq!(v.zzzw(), uvec4(3_u32, 3_u32, 3_u32, 4_u32));
+ assert_eq!(v.zzwx(), uvec4(3_u32, 3_u32, 4_u32, 1_u32));
+ assert_eq!(v.zzwy(), uvec4(3_u32, 3_u32, 4_u32, 2_u32));
+ assert_eq!(v.zzwz(), uvec4(3_u32, 3_u32, 4_u32, 3_u32));
+ assert_eq!(v.zzww(), uvec4(3_u32, 3_u32, 4_u32, 4_u32));
+ assert_eq!(v.zwxx(), uvec4(3_u32, 4_u32, 1_u32, 1_u32));
+ assert_eq!(v.zwxy(), uvec4(3_u32, 4_u32, 1_u32, 2_u32));
+ assert_eq!(v.zwxz(), uvec4(3_u32, 4_u32, 1_u32, 3_u32));
+ assert_eq!(v.zwxw(), uvec4(3_u32, 4_u32, 1_u32, 4_u32));
+ assert_eq!(v.zwyx(), uvec4(3_u32, 4_u32, 2_u32, 1_u32));
+ assert_eq!(v.zwyy(), uvec4(3_u32, 4_u32, 2_u32, 2_u32));
+ assert_eq!(v.zwyz(), uvec4(3_u32, 4_u32, 2_u32, 3_u32));
+ assert_eq!(v.zwyw(), uvec4(3_u32, 4_u32, 2_u32, 4_u32));
+ assert_eq!(v.zwzx(), uvec4(3_u32, 4_u32, 3_u32, 1_u32));
+ assert_eq!(v.zwzy(), uvec4(3_u32, 4_u32, 3_u32, 2_u32));
+ assert_eq!(v.zwzz(), uvec4(3_u32, 4_u32, 3_u32, 3_u32));
+ assert_eq!(v.zwzw(), uvec4(3_u32, 4_u32, 3_u32, 4_u32));
+ assert_eq!(v.zwwx(), uvec4(3_u32, 4_u32, 4_u32, 1_u32));
+ assert_eq!(v.zwwy(), uvec4(3_u32, 4_u32, 4_u32, 2_u32));
+ assert_eq!(v.zwwz(), uvec4(3_u32, 4_u32, 4_u32, 3_u32));
+ assert_eq!(v.zwww(), uvec4(3_u32, 4_u32, 4_u32, 4_u32));
+ assert_eq!(v.wxxx(), uvec4(4_u32, 1_u32, 1_u32, 1_u32));
+ assert_eq!(v.wxxy(), uvec4(4_u32, 1_u32, 1_u32, 2_u32));
+ assert_eq!(v.wxxz(), uvec4(4_u32, 1_u32, 1_u32, 3_u32));
+ assert_eq!(v.wxxw(), uvec4(4_u32, 1_u32, 1_u32, 4_u32));
+ assert_eq!(v.wxyx(), uvec4(4_u32, 1_u32, 2_u32, 1_u32));
+ assert_eq!(v.wxyy(), uvec4(4_u32, 1_u32, 2_u32, 2_u32));
+ assert_eq!(v.wxyz(), uvec4(4_u32, 1_u32, 2_u32, 3_u32));
+ assert_eq!(v.wxyw(), uvec4(4_u32, 1_u32, 2_u32, 4_u32));
+ assert_eq!(v.wxzx(), uvec4(4_u32, 1_u32, 3_u32, 1_u32));
+ assert_eq!(v.wxzy(), uvec4(4_u32, 1_u32, 3_u32, 2_u32));
+ assert_eq!(v.wxzz(), uvec4(4_u32, 1_u32, 3_u32, 3_u32));
+ assert_eq!(v.wxzw(), uvec4(4_u32, 1_u32, 3_u32, 4_u32));
+ assert_eq!(v.wxwx(), uvec4(4_u32, 1_u32, 4_u32, 1_u32));
+ assert_eq!(v.wxwy(), uvec4(4_u32, 1_u32, 4_u32, 2_u32));
+ assert_eq!(v.wxwz(), uvec4(4_u32, 1_u32, 4_u32, 3_u32));
+ assert_eq!(v.wxww(), uvec4(4_u32, 1_u32, 4_u32, 4_u32));
+ assert_eq!(v.wyxx(), uvec4(4_u32, 2_u32, 1_u32, 1_u32));
+ assert_eq!(v.wyxy(), uvec4(4_u32, 2_u32, 1_u32, 2_u32));
+ assert_eq!(v.wyxz(), uvec4(4_u32, 2_u32, 1_u32, 3_u32));
+ assert_eq!(v.wyxw(), uvec4(4_u32, 2_u32, 1_u32, 4_u32));
+ assert_eq!(v.wyyx(), uvec4(4_u32, 2_u32, 2_u32, 1_u32));
+ assert_eq!(v.wyyy(), uvec4(4_u32, 2_u32, 2_u32, 2_u32));
+ assert_eq!(v.wyyz(), uvec4(4_u32, 2_u32, 2_u32, 3_u32));
+ assert_eq!(v.wyyw(), uvec4(4_u32, 2_u32, 2_u32, 4_u32));
+ assert_eq!(v.wyzx(), uvec4(4_u32, 2_u32, 3_u32, 1_u32));
+ assert_eq!(v.wyzy(), uvec4(4_u32, 2_u32, 3_u32, 2_u32));
+ assert_eq!(v.wyzz(), uvec4(4_u32, 2_u32, 3_u32, 3_u32));
+ assert_eq!(v.wyzw(), uvec4(4_u32, 2_u32, 3_u32, 4_u32));
+ assert_eq!(v.wywx(), uvec4(4_u32, 2_u32, 4_u32, 1_u32));
+ assert_eq!(v.wywy(), uvec4(4_u32, 2_u32, 4_u32, 2_u32));
+ assert_eq!(v.wywz(), uvec4(4_u32, 2_u32, 4_u32, 3_u32));
+ assert_eq!(v.wyww(), uvec4(4_u32, 2_u32, 4_u32, 4_u32));
+ assert_eq!(v.wzxx(), uvec4(4_u32, 3_u32, 1_u32, 1_u32));
+ assert_eq!(v.wzxy(), uvec4(4_u32, 3_u32, 1_u32, 2_u32));
+ assert_eq!(v.wzxz(), uvec4(4_u32, 3_u32, 1_u32, 3_u32));
+ assert_eq!(v.wzxw(), uvec4(4_u32, 3_u32, 1_u32, 4_u32));
+ assert_eq!(v.wzyx(), uvec4(4_u32, 3_u32, 2_u32, 1_u32));
+ assert_eq!(v.wzyy(), uvec4(4_u32, 3_u32, 2_u32, 2_u32));
+ assert_eq!(v.wzyz(), uvec4(4_u32, 3_u32, 2_u32, 3_u32));
+ assert_eq!(v.wzyw(), uvec4(4_u32, 3_u32, 2_u32, 4_u32));
+ assert_eq!(v.wzzx(), uvec4(4_u32, 3_u32, 3_u32, 1_u32));
+ assert_eq!(v.wzzy(), uvec4(4_u32, 3_u32, 3_u32, 2_u32));
+ assert_eq!(v.wzzz(), uvec4(4_u32, 3_u32, 3_u32, 3_u32));
+ assert_eq!(v.wzzw(), uvec4(4_u32, 3_u32, 3_u32, 4_u32));
+ assert_eq!(v.wzwx(), uvec4(4_u32, 3_u32, 4_u32, 1_u32));
+ assert_eq!(v.wzwy(), uvec4(4_u32, 3_u32, 4_u32, 2_u32));
+ assert_eq!(v.wzwz(), uvec4(4_u32, 3_u32, 4_u32, 3_u32));
+ assert_eq!(v.wzww(), uvec4(4_u32, 3_u32, 4_u32, 4_u32));
+ assert_eq!(v.wwxx(), uvec4(4_u32, 4_u32, 1_u32, 1_u32));
+ assert_eq!(v.wwxy(), uvec4(4_u32, 4_u32, 1_u32, 2_u32));
+ assert_eq!(v.wwxz(), uvec4(4_u32, 4_u32, 1_u32, 3_u32));
+ assert_eq!(v.wwxw(), uvec4(4_u32, 4_u32, 1_u32, 4_u32));
+ assert_eq!(v.wwyx(), uvec4(4_u32, 4_u32, 2_u32, 1_u32));
+ assert_eq!(v.wwyy(), uvec4(4_u32, 4_u32, 2_u32, 2_u32));
+ assert_eq!(v.wwyz(), uvec4(4_u32, 4_u32, 2_u32, 3_u32));
+ assert_eq!(v.wwyw(), uvec4(4_u32, 4_u32, 2_u32, 4_u32));
+ assert_eq!(v.wwzx(), uvec4(4_u32, 4_u32, 3_u32, 1_u32));
+ assert_eq!(v.wwzy(), uvec4(4_u32, 4_u32, 3_u32, 2_u32));
+ assert_eq!(v.wwzz(), uvec4(4_u32, 4_u32, 3_u32, 3_u32));
+ assert_eq!(v.wwzw(), uvec4(4_u32, 4_u32, 3_u32, 4_u32));
+ assert_eq!(v.wwwx(), uvec4(4_u32, 4_u32, 4_u32, 1_u32));
+ assert_eq!(v.wwwy(), uvec4(4_u32, 4_u32, 4_u32, 2_u32));
+ assert_eq!(v.wwwz(), uvec4(4_u32, 4_u32, 4_u32, 3_u32));
+ assert_eq!(v.wwww(), uvec4(4_u32, 4_u32, 4_u32, 4_u32));
+ assert_eq!(v.xxx(), uvec3(1_u32, 1_u32, 1_u32));
+ assert_eq!(v.xxy(), uvec3(1_u32, 1_u32, 2_u32));
+ assert_eq!(v.xxz(), uvec3(1_u32, 1_u32, 3_u32));
+ assert_eq!(v.xxw(), uvec3(1_u32, 1_u32, 4_u32));
+ assert_eq!(v.xyx(), uvec3(1_u32, 2_u32, 1_u32));
+ assert_eq!(v.xyy(), uvec3(1_u32, 2_u32, 2_u32));
+ assert_eq!(v.xyz(), uvec3(1_u32, 2_u32, 3_u32));
+ assert_eq!(v.xyw(), uvec3(1_u32, 2_u32, 4_u32));
+ assert_eq!(v.xzx(), uvec3(1_u32, 3_u32, 1_u32));
+ assert_eq!(v.xzy(), uvec3(1_u32, 3_u32, 2_u32));
+ assert_eq!(v.xzz(), uvec3(1_u32, 3_u32, 3_u32));
+ assert_eq!(v.xzw(), uvec3(1_u32, 3_u32, 4_u32));
+ assert_eq!(v.xwx(), uvec3(1_u32, 4_u32, 1_u32));
+ assert_eq!(v.xwy(), uvec3(1_u32, 4_u32, 2_u32));
+ assert_eq!(v.xwz(), uvec3(1_u32, 4_u32, 3_u32));
+ assert_eq!(v.xww(), uvec3(1_u32, 4_u32, 4_u32));
+ assert_eq!(v.yxx(), uvec3(2_u32, 1_u32, 1_u32));
+ assert_eq!(v.yxy(), uvec3(2_u32, 1_u32, 2_u32));
+ assert_eq!(v.yxz(), uvec3(2_u32, 1_u32, 3_u32));
+ assert_eq!(v.yxw(), uvec3(2_u32, 1_u32, 4_u32));
+ assert_eq!(v.yyx(), uvec3(2_u32, 2_u32, 1_u32));
+ assert_eq!(v.yyy(), uvec3(2_u32, 2_u32, 2_u32));
+ assert_eq!(v.yyz(), uvec3(2_u32, 2_u32, 3_u32));
+ assert_eq!(v.yyw(), uvec3(2_u32, 2_u32, 4_u32));
+ assert_eq!(v.yzx(), uvec3(2_u32, 3_u32, 1_u32));
+ assert_eq!(v.yzy(), uvec3(2_u32, 3_u32, 2_u32));
+ assert_eq!(v.yzz(), uvec3(2_u32, 3_u32, 3_u32));
+ assert_eq!(v.yzw(), uvec3(2_u32, 3_u32, 4_u32));
+ assert_eq!(v.ywx(), uvec3(2_u32, 4_u32, 1_u32));
+ assert_eq!(v.ywy(), uvec3(2_u32, 4_u32, 2_u32));
+ assert_eq!(v.ywz(), uvec3(2_u32, 4_u32, 3_u32));
+ assert_eq!(v.yww(), uvec3(2_u32, 4_u32, 4_u32));
+ assert_eq!(v.zxx(), uvec3(3_u32, 1_u32, 1_u32));
+ assert_eq!(v.zxy(), uvec3(3_u32, 1_u32, 2_u32));
+ assert_eq!(v.zxz(), uvec3(3_u32, 1_u32, 3_u32));
+ assert_eq!(v.zxw(), uvec3(3_u32, 1_u32, 4_u32));
+ assert_eq!(v.zyx(), uvec3(3_u32, 2_u32, 1_u32));
+ assert_eq!(v.zyy(), uvec3(3_u32, 2_u32, 2_u32));
+ assert_eq!(v.zyz(), uvec3(3_u32, 2_u32, 3_u32));
+ assert_eq!(v.zyw(), uvec3(3_u32, 2_u32, 4_u32));
+ assert_eq!(v.zzx(), uvec3(3_u32, 3_u32, 1_u32));
+ assert_eq!(v.zzy(), uvec3(3_u32, 3_u32, 2_u32));
+ assert_eq!(v.zzz(), uvec3(3_u32, 3_u32, 3_u32));
+ assert_eq!(v.zzw(), uvec3(3_u32, 3_u32, 4_u32));
+ assert_eq!(v.zwx(), uvec3(3_u32, 4_u32, 1_u32));
+ assert_eq!(v.zwy(), uvec3(3_u32, 4_u32, 2_u32));
+ assert_eq!(v.zwz(), uvec3(3_u32, 4_u32, 3_u32));
+ assert_eq!(v.zww(), uvec3(3_u32, 4_u32, 4_u32));
+ assert_eq!(v.wxx(), uvec3(4_u32, 1_u32, 1_u32));
+ assert_eq!(v.wxy(), uvec3(4_u32, 1_u32, 2_u32));
+ assert_eq!(v.wxz(), uvec3(4_u32, 1_u32, 3_u32));
+ assert_eq!(v.wxw(), uvec3(4_u32, 1_u32, 4_u32));
+ assert_eq!(v.wyx(), uvec3(4_u32, 2_u32, 1_u32));
+ assert_eq!(v.wyy(), uvec3(4_u32, 2_u32, 2_u32));
+ assert_eq!(v.wyz(), uvec3(4_u32, 2_u32, 3_u32));
+ assert_eq!(v.wyw(), uvec3(4_u32, 2_u32, 4_u32));
+ assert_eq!(v.wzx(), uvec3(4_u32, 3_u32, 1_u32));
+ assert_eq!(v.wzy(), uvec3(4_u32, 3_u32, 2_u32));
+ assert_eq!(v.wzz(), uvec3(4_u32, 3_u32, 3_u32));
+ assert_eq!(v.wzw(), uvec3(4_u32, 3_u32, 4_u32));
+ assert_eq!(v.wwx(), uvec3(4_u32, 4_u32, 1_u32));
+ assert_eq!(v.wwy(), uvec3(4_u32, 4_u32, 2_u32));
+ assert_eq!(v.wwz(), uvec3(4_u32, 4_u32, 3_u32));
+ assert_eq!(v.www(), uvec3(4_u32, 4_u32, 4_u32));
+ assert_eq!(v.xx(), uvec2(1_u32, 1_u32));
+ assert_eq!(v.xy(), uvec2(1_u32, 2_u32));
+ assert_eq!(v.xz(), uvec2(1_u32, 3_u32));
+ assert_eq!(v.xw(), uvec2(1_u32, 4_u32));
+ assert_eq!(v.yx(), uvec2(2_u32, 1_u32));
+ assert_eq!(v.yy(), uvec2(2_u32, 2_u32));
+ assert_eq!(v.yz(), uvec2(2_u32, 3_u32));
+ assert_eq!(v.yw(), uvec2(2_u32, 4_u32));
+ assert_eq!(v.zx(), uvec2(3_u32, 1_u32));
+ assert_eq!(v.zy(), uvec2(3_u32, 2_u32));
+ assert_eq!(v.zz(), uvec2(3_u32, 3_u32));
+ assert_eq!(v.zw(), uvec2(3_u32, 4_u32));
+ assert_eq!(v.wx(), uvec2(4_u32, 1_u32));
+ assert_eq!(v.wy(), uvec2(4_u32, 2_u32));
+ assert_eq!(v.wz(), uvec2(4_u32, 3_u32));
+ assert_eq!(v.ww(), uvec2(4_u32, 4_u32));
+});
+
+glam_test!(test_uvec3_swizzles, {
+ let v = uvec3(1_u32, 2_u32, 3_u32);
+ assert_eq!(v, v.xyz());
+ assert_eq!(v.xxxx(), uvec4(1_u32, 1_u32, 1_u32, 1_u32));
+ assert_eq!(v.xxxy(), uvec4(1_u32, 1_u32, 1_u32, 2_u32));
+ assert_eq!(v.xxxz(), uvec4(1_u32, 1_u32, 1_u32, 3_u32));
+ assert_eq!(v.xxyx(), uvec4(1_u32, 1_u32, 2_u32, 1_u32));
+ assert_eq!(v.xxyy(), uvec4(1_u32, 1_u32, 2_u32, 2_u32));
+ assert_eq!(v.xxyz(), uvec4(1_u32, 1_u32, 2_u32, 3_u32));
+ assert_eq!(v.xxzx(), uvec4(1_u32, 1_u32, 3_u32, 1_u32));
+ assert_eq!(v.xxzy(), uvec4(1_u32, 1_u32, 3_u32, 2_u32));
+ assert_eq!(v.xxzz(), uvec4(1_u32, 1_u32, 3_u32, 3_u32));
+ assert_eq!(v.xyxx(), uvec4(1_u32, 2_u32, 1_u32, 1_u32));
+ assert_eq!(v.xyxy(), uvec4(1_u32, 2_u32, 1_u32, 2_u32));
+ assert_eq!(v.xyxz(), uvec4(1_u32, 2_u32, 1_u32, 3_u32));
+ assert_eq!(v.xyyx(), uvec4(1_u32, 2_u32, 2_u32, 1_u32));
+ assert_eq!(v.xyyy(), uvec4(1_u32, 2_u32, 2_u32, 2_u32));
+ assert_eq!(v.xyyz(), uvec4(1_u32, 2_u32, 2_u32, 3_u32));
+ assert_eq!(v.xyzx(), uvec4(1_u32, 2_u32, 3_u32, 1_u32));
+ assert_eq!(v.xyzy(), uvec4(1_u32, 2_u32, 3_u32, 2_u32));
+ assert_eq!(v.xyzz(), uvec4(1_u32, 2_u32, 3_u32, 3_u32));
+ assert_eq!(v.xzxx(), uvec4(1_u32, 3_u32, 1_u32, 1_u32));
+ assert_eq!(v.xzxy(), uvec4(1_u32, 3_u32, 1_u32, 2_u32));
+ assert_eq!(v.xzxz(), uvec4(1_u32, 3_u32, 1_u32, 3_u32));
+ assert_eq!(v.xzyx(), uvec4(1_u32, 3_u32, 2_u32, 1_u32));
+ assert_eq!(v.xzyy(), uvec4(1_u32, 3_u32, 2_u32, 2_u32));
+ assert_eq!(v.xzyz(), uvec4(1_u32, 3_u32, 2_u32, 3_u32));
+ assert_eq!(v.xzzx(), uvec4(1_u32, 3_u32, 3_u32, 1_u32));
+ assert_eq!(v.xzzy(), uvec4(1_u32, 3_u32, 3_u32, 2_u32));
+ assert_eq!(v.xzzz(), uvec4(1_u32, 3_u32, 3_u32, 3_u32));
+ assert_eq!(v.yxxx(), uvec4(2_u32, 1_u32, 1_u32, 1_u32));
+ assert_eq!(v.yxxy(), uvec4(2_u32, 1_u32, 1_u32, 2_u32));
+ assert_eq!(v.yxxz(), uvec4(2_u32, 1_u32, 1_u32, 3_u32));
+ assert_eq!(v.yxyx(), uvec4(2_u32, 1_u32, 2_u32, 1_u32));
+ assert_eq!(v.yxyy(), uvec4(2_u32, 1_u32, 2_u32, 2_u32));
+ assert_eq!(v.yxyz(), uvec4(2_u32, 1_u32, 2_u32, 3_u32));
+ assert_eq!(v.yxzx(), uvec4(2_u32, 1_u32, 3_u32, 1_u32));
+ assert_eq!(v.yxzy(), uvec4(2_u32, 1_u32, 3_u32, 2_u32));
+ assert_eq!(v.yxzz(), uvec4(2_u32, 1_u32, 3_u32, 3_u32));
+ assert_eq!(v.yyxx(), uvec4(2_u32, 2_u32, 1_u32, 1_u32));
+ assert_eq!(v.yyxy(), uvec4(2_u32, 2_u32, 1_u32, 2_u32));
+ assert_eq!(v.yyxz(), uvec4(2_u32, 2_u32, 1_u32, 3_u32));
+ assert_eq!(v.yyyx(), uvec4(2_u32, 2_u32, 2_u32, 1_u32));
+ assert_eq!(v.yyyy(), uvec4(2_u32, 2_u32, 2_u32, 2_u32));
+ assert_eq!(v.yyyz(), uvec4(2_u32, 2_u32, 2_u32, 3_u32));
+ assert_eq!(v.yyzx(), uvec4(2_u32, 2_u32, 3_u32, 1_u32));
+ assert_eq!(v.yyzy(), uvec4(2_u32, 2_u32, 3_u32, 2_u32));
+ assert_eq!(v.yyzz(), uvec4(2_u32, 2_u32, 3_u32, 3_u32));
+ assert_eq!(v.yzxx(), uvec4(2_u32, 3_u32, 1_u32, 1_u32));
+ assert_eq!(v.yzxy(), uvec4(2_u32, 3_u32, 1_u32, 2_u32));
+ assert_eq!(v.yzxz(), uvec4(2_u32, 3_u32, 1_u32, 3_u32));
+ assert_eq!(v.yzyx(), uvec4(2_u32, 3_u32, 2_u32, 1_u32));
+ assert_eq!(v.yzyy(), uvec4(2_u32, 3_u32, 2_u32, 2_u32));
+ assert_eq!(v.yzyz(), uvec4(2_u32, 3_u32, 2_u32, 3_u32));
+ assert_eq!(v.yzzx(), uvec4(2_u32, 3_u32, 3_u32, 1_u32));
+ assert_eq!(v.yzzy(), uvec4(2_u32, 3_u32, 3_u32, 2_u32));
+ assert_eq!(v.yzzz(), uvec4(2_u32, 3_u32, 3_u32, 3_u32));
+ assert_eq!(v.zxxx(), uvec4(3_u32, 1_u32, 1_u32, 1_u32));
+ assert_eq!(v.zxxy(), uvec4(3_u32, 1_u32, 1_u32, 2_u32));
+ assert_eq!(v.zxxz(), uvec4(3_u32, 1_u32, 1_u32, 3_u32));
+ assert_eq!(v.zxyx(), uvec4(3_u32, 1_u32, 2_u32, 1_u32));
+ assert_eq!(v.zxyy(), uvec4(3_u32, 1_u32, 2_u32, 2_u32));
+ assert_eq!(v.zxyz(), uvec4(3_u32, 1_u32, 2_u32, 3_u32));
+ assert_eq!(v.zxzx(), uvec4(3_u32, 1_u32, 3_u32, 1_u32));
+ assert_eq!(v.zxzy(), uvec4(3_u32, 1_u32, 3_u32, 2_u32));
+ assert_eq!(v.zxzz(), uvec4(3_u32, 1_u32, 3_u32, 3_u32));
+ assert_eq!(v.zyxx(), uvec4(3_u32, 2_u32, 1_u32, 1_u32));
+ assert_eq!(v.zyxy(), uvec4(3_u32, 2_u32, 1_u32, 2_u32));
+ assert_eq!(v.zyxz(), uvec4(3_u32, 2_u32, 1_u32, 3_u32));
+ assert_eq!(v.zyyx(), uvec4(3_u32, 2_u32, 2_u32, 1_u32));
+ assert_eq!(v.zyyy(), uvec4(3_u32, 2_u32, 2_u32, 2_u32));
+ assert_eq!(v.zyyz(), uvec4(3_u32, 2_u32, 2_u32, 3_u32));
+ assert_eq!(v.zyzx(), uvec4(3_u32, 2_u32, 3_u32, 1_u32));
+ assert_eq!(v.zyzy(), uvec4(3_u32, 2_u32, 3_u32, 2_u32));
+ assert_eq!(v.zyzz(), uvec4(3_u32, 2_u32, 3_u32, 3_u32));
+ assert_eq!(v.zzxx(), uvec4(3_u32, 3_u32, 1_u32, 1_u32));
+ assert_eq!(v.zzxy(), uvec4(3_u32, 3_u32, 1_u32, 2_u32));
+ assert_eq!(v.zzxz(), uvec4(3_u32, 3_u32, 1_u32, 3_u32));
+ assert_eq!(v.zzyx(), uvec4(3_u32, 3_u32, 2_u32, 1_u32));
+ assert_eq!(v.zzyy(), uvec4(3_u32, 3_u32, 2_u32, 2_u32));
+ assert_eq!(v.zzyz(), uvec4(3_u32, 3_u32, 2_u32, 3_u32));
+ assert_eq!(v.zzzx(), uvec4(3_u32, 3_u32, 3_u32, 1_u32));
+ assert_eq!(v.zzzy(), uvec4(3_u32, 3_u32, 3_u32, 2_u32));
+ assert_eq!(v.zzzz(), uvec4(3_u32, 3_u32, 3_u32, 3_u32));
+ assert_eq!(v.xxx(), uvec3(1_u32, 1_u32, 1_u32));
+ assert_eq!(v.xxy(), uvec3(1_u32, 1_u32, 2_u32));
+ assert_eq!(v.xxz(), uvec3(1_u32, 1_u32, 3_u32));
+ assert_eq!(v.xyx(), uvec3(1_u32, 2_u32, 1_u32));
+ assert_eq!(v.xyy(), uvec3(1_u32, 2_u32, 2_u32));
+ assert_eq!(v.xzx(), uvec3(1_u32, 3_u32, 1_u32));
+ assert_eq!(v.xzy(), uvec3(1_u32, 3_u32, 2_u32));
+ assert_eq!(v.xzz(), uvec3(1_u32, 3_u32, 3_u32));
+ assert_eq!(v.yxx(), uvec3(2_u32, 1_u32, 1_u32));
+ assert_eq!(v.yxy(), uvec3(2_u32, 1_u32, 2_u32));
+ assert_eq!(v.yxz(), uvec3(2_u32, 1_u32, 3_u32));
+ assert_eq!(v.yyx(), uvec3(2_u32, 2_u32, 1_u32));
+ assert_eq!(v.yyy(), uvec3(2_u32, 2_u32, 2_u32));
+ assert_eq!(v.yyz(), uvec3(2_u32, 2_u32, 3_u32));
+ assert_eq!(v.yzx(), uvec3(2_u32, 3_u32, 1_u32));
+ assert_eq!(v.yzy(), uvec3(2_u32, 3_u32, 2_u32));
+ assert_eq!(v.yzz(), uvec3(2_u32, 3_u32, 3_u32));
+ assert_eq!(v.zxx(), uvec3(3_u32, 1_u32, 1_u32));
+ assert_eq!(v.zxy(), uvec3(3_u32, 1_u32, 2_u32));
+ assert_eq!(v.zxz(), uvec3(3_u32, 1_u32, 3_u32));
+ assert_eq!(v.zyx(), uvec3(3_u32, 2_u32, 1_u32));
+ assert_eq!(v.zyy(), uvec3(3_u32, 2_u32, 2_u32));
+ assert_eq!(v.zyz(), uvec3(3_u32, 2_u32, 3_u32));
+ assert_eq!(v.zzx(), uvec3(3_u32, 3_u32, 1_u32));
+ assert_eq!(v.zzy(), uvec3(3_u32, 3_u32, 2_u32));
+ assert_eq!(v.zzz(), uvec3(3_u32, 3_u32, 3_u32));
+ assert_eq!(v.xx(), uvec2(1_u32, 1_u32));
+ assert_eq!(v.xy(), uvec2(1_u32, 2_u32));
+ assert_eq!(v.xz(), uvec2(1_u32, 3_u32));
+ assert_eq!(v.yx(), uvec2(2_u32, 1_u32));
+ assert_eq!(v.yy(), uvec2(2_u32, 2_u32));
+ assert_eq!(v.yz(), uvec2(2_u32, 3_u32));
+ assert_eq!(v.zx(), uvec2(3_u32, 1_u32));
+ assert_eq!(v.zy(), uvec2(3_u32, 2_u32));
+ assert_eq!(v.zz(), uvec2(3_u32, 3_u32));
+});
+
+glam_test!(test_uvec2_swizzles, {
+ let v = uvec2(1_u32, 2_u32);
+ assert_eq!(v, v.xy());
+ assert_eq!(v.xxxx(), uvec4(1_u32, 1_u32, 1_u32, 1_u32));
+ assert_eq!(v.xxxy(), uvec4(1_u32, 1_u32, 1_u32, 2_u32));
+ assert_eq!(v.xxyx(), uvec4(1_u32, 1_u32, 2_u32, 1_u32));
+ assert_eq!(v.xxyy(), uvec4(1_u32, 1_u32, 2_u32, 2_u32));
+ assert_eq!(v.xyxx(), uvec4(1_u32, 2_u32, 1_u32, 1_u32));
+ assert_eq!(v.xyxy(), uvec4(1_u32, 2_u32, 1_u32, 2_u32));
+ assert_eq!(v.xyyx(), uvec4(1_u32, 2_u32, 2_u32, 1_u32));
+ assert_eq!(v.xyyy(), uvec4(1_u32, 2_u32, 2_u32, 2_u32));
+ assert_eq!(v.yxxx(), uvec4(2_u32, 1_u32, 1_u32, 1_u32));
+ assert_eq!(v.yxxy(), uvec4(2_u32, 1_u32, 1_u32, 2_u32));
+ assert_eq!(v.yxyx(), uvec4(2_u32, 1_u32, 2_u32, 1_u32));
+ assert_eq!(v.yxyy(), uvec4(2_u32, 1_u32, 2_u32, 2_u32));
+ assert_eq!(v.yyxx(), uvec4(2_u32, 2_u32, 1_u32, 1_u32));
+ assert_eq!(v.yyxy(), uvec4(2_u32, 2_u32, 1_u32, 2_u32));
+ assert_eq!(v.yyyx(), uvec4(2_u32, 2_u32, 2_u32, 1_u32));
+ assert_eq!(v.yyyy(), uvec4(2_u32, 2_u32, 2_u32, 2_u32));
+ assert_eq!(v.xxx(), uvec3(1_u32, 1_u32, 1_u32));
+ assert_eq!(v.xxy(), uvec3(1_u32, 1_u32, 2_u32));
+ assert_eq!(v.xyx(), uvec3(1_u32, 2_u32, 1_u32));
+ assert_eq!(v.xyy(), uvec3(1_u32, 2_u32, 2_u32));
+ assert_eq!(v.yxx(), uvec3(2_u32, 1_u32, 1_u32));
+ assert_eq!(v.yxy(), uvec3(2_u32, 1_u32, 2_u32));
+ assert_eq!(v.yyx(), uvec3(2_u32, 2_u32, 1_u32));
+ assert_eq!(v.yyy(), uvec3(2_u32, 2_u32, 2_u32));
+ assert_eq!(v.xx(), uvec2(1_u32, 1_u32));
+ assert_eq!(v.yx(), uvec2(2_u32, 1_u32));
+ assert_eq!(v.yy(), uvec2(2_u32, 2_u32));
+});
diff --git a/tests/transform.rs b/tests/transform.rs
new file mode 100644
index 0000000..e7f7715
--- /dev/null
+++ b/tests/transform.rs
@@ -0,0 +1,129 @@
+#![allow(deprecated)]
+
+#[cfg(feature = "transform-types")]
+#[macro_use]
+mod support;
+
+#[cfg(feature = "transform-types")]
+mod transform {
+ use crate::support::FloatCompare;
+ use glam::*;
+
+ impl FloatCompare for TransformSRT {
+ #[inline]
+ fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
+ self.abs_diff_eq(*other, max_abs_diff)
+ }
+
+ #[inline]
+ fn abs_diff(&self, other: &Self) -> Self {
+ Self::from_scale_rotation_translation(
+ self.scale.abs_diff(&other.scale),
+ self.rotation.abs_diff(&other.rotation),
+ self.translation.abs_diff(&other.translation),
+ )
+ }
+ }
+
+ impl FloatCompare for TransformRT {
+ #[inline]
+ fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
+ self.abs_diff_eq(*other, max_abs_diff)
+ }
+
+ #[inline]
+ fn abs_diff(&self, other: &Self) -> Self {
+ Self::from_rotation_translation(
+ self.rotation.abs_diff(&other.rotation),
+ self.translation.abs_diff(&other.translation),
+ )
+ }
+ }
+
+ #[test]
+ fn test_identity() {
+ let tr = TransformRT::IDENTITY;
+ assert_eq!(tr.rotation, Quat::IDENTITY);
+ assert_eq!(tr.translation, Vec3::ZERO);
+
+ let srt = TransformSRT::IDENTITY;
+ assert_eq!(srt.scale, Vec3::ONE);
+ assert_eq!(srt.rotation, Quat::IDENTITY);
+ assert_eq!(srt.translation, Vec3::ZERO);
+
+ assert_eq!(srt, tr.into());
+
+ assert_eq!(TransformRT::IDENTITY, TransformRT::default());
+ assert_eq!(TransformSRT::IDENTITY, TransformSRT::default());
+ }
+
+ #[test]
+ fn test_nan() {
+ assert!(TransformRT::NAN.is_nan());
+ assert!(!TransformRT::NAN.is_finite());
+
+ assert!(TransformSRT::NAN.is_nan());
+ assert!(!TransformSRT::NAN.is_finite());
+ }
+
+ #[test]
+ fn test_new() {
+ let t = Vec3::new(1.0, 2.0, 3.0);
+ let r = Quat::from_rotation_y(90.0_f32.to_radians());
+ let s = Vec3::new(-1.0, -2.0, -3.0);
+
+ let tr = TransformRT::from_rotation_translation(r, t);
+ assert_eq!(tr.rotation, r);
+ assert_eq!(tr.translation, t);
+
+ let srt = TransformSRT::from_scale_rotation_translation(s, r, t);
+ assert_eq!(srt.scale, s);
+ assert_eq!(srt.rotation, r);
+ assert_eq!(srt.translation, t);
+
+ assert_eq!(tr, tr);
+ assert_eq!(srt, srt);
+ }
+
+ #[test]
+ fn test_mul() {
+ let tr = TransformRT::from_rotation_translation(
+ Quat::from_rotation_z(-90.0_f32.to_radians()),
+ Vec3::X,
+ );
+ let v0 = Vec3A::Y;
+ let v1 = tr.transform_point3a(v0);
+ assert_approx_eq!(v1, Vec3A::X * 2.0);
+ assert_approx_eq!(v1, tr.transform_point3a(v0));
+ let inv_tr = tr.inverse();
+ let v2 = inv_tr.transform_point3a(v1);
+ assert_approx_eq!(v0, v2);
+
+ assert_eq!(tr * TransformRT::IDENTITY, tr);
+ assert_approx_eq!(tr * inv_tr, TransformRT::IDENTITY);
+
+ assert_eq!(tr * TransformSRT::IDENTITY, TransformSRT::from(tr));
+ assert_eq!(TransformSRT::IDENTITY * tr, TransformSRT::from(tr));
+
+ let s = Vec3::splat(2.0);
+ let r = Quat::from_rotation_y(180.0_f32.to_radians());
+ let t = -Vec3::Y;
+ let srt = TransformSRT::from_scale_rotation_translation(s, r, t);
+ let v0 = Vec3A::X;
+ let v1 = srt.transform_point3a(v0);
+ assert_approx_eq!(v1, (r * (v0 * Vec3A::from(s))) + Vec3A::from(t));
+ assert_approx_eq!(v1, srt.transform_point3a(v0));
+ let inv_srt = srt.inverse();
+ let v2 = inv_srt.transform_point3a(v1);
+ assert_approx_eq!(v0, v2);
+
+ assert_eq!(srt * TransformSRT::IDENTITY, srt);
+ assert_eq!(srt * inv_srt, TransformSRT::IDENTITY);
+
+ // negative scale mul test
+ let s = Vec3::splat(-2.0);
+ let srt = TransformSRT::from_scale_rotation_translation(s, r, t);
+ let inv_srt = srt.inverse();
+ assert_eq!(srt * inv_srt, TransformSRT::IDENTITY);
+ }
+}
diff --git a/tests/vec2.rs b/tests/vec2.rs
new file mode 100644
index 0000000..0d0b798
--- /dev/null
+++ b/tests/vec2.rs
@@ -0,0 +1,901 @@
+#[macro_use]
+mod support;
+
+macro_rules! impl_vec2_tests {
+ ($t:ty, $const_new:ident, $new:ident, $vec2:ident, $vec3:ident, $mask:ident) => {
+ glam_test!(test_const, {
+ const V: $vec2 = $const_new!([1 as $t, 2 as $t]);
+ assert_eq!($vec2::new(1 as $t, 2 as $t), V);
+ });
+
+ glam_test!(test_new, {
+ let v = $new(1 as $t, 2 as $t);
+
+ assert_eq!(v.x, 1 as $t);
+ assert_eq!(v.y, 2 as $t);
+
+ let t = (1 as $t, 2 as $t);
+ let v = $vec2::from(t);
+ assert_eq!(t, v.into());
+
+ let a = [1 as $t, 2 as $t];
+ let v = $vec2::from(a);
+ let a1: [$t; 2] = v.into();
+ assert_eq!(a, a1);
+
+ let v = $vec2::new(t.0, t.1);
+ assert_eq!(t, v.into());
+
+ assert_eq!($vec2::new(1 as $t, 0 as $t), $vec2::X);
+ assert_eq!($vec2::new(0 as $t, 1 as $t), $vec2::Y);
+ });
+
+ glam_test!(test_fmt, {
+ let a = $vec2::new(1 as $t, 2 as $t);
+ assert_eq!(
+ format!("{:?}", a),
+ format!("{}({:?}, {:?})", stringify!($vec2), a.x, a.y)
+ );
+ // assert_eq!(format!("{:#?}", a), "$vec2(\n 1.0,\n 2.0\n)");
+ assert_eq!(format!("{}", a), "[1, 2]");
+ });
+
+ glam_test!(test_zero, {
+ let v = $vec2::ZERO;
+ assert_eq!($new(0 as $t, 0 as $t), v);
+ assert_eq!(v, $vec2::default());
+ });
+
+ glam_test!(test_splat, {
+ let v = $vec2::splat(1 as $t);
+ assert_eq!($vec2::ONE, v);
+ });
+
+ glam_test!(test_accessors, {
+ let mut a = $vec2::ZERO;
+ a.x = 1 as $t;
+ a.y = 2 as $t;
+ assert_eq!(1 as $t, a.x);
+ assert_eq!(2 as $t, a.y);
+ assert_eq!($vec2::new(1 as $t, 2 as $t), a);
+
+ let mut a = $vec2::ZERO;
+ a[0] = 1 as $t;
+ a[1] = 2 as $t;
+ assert_eq!(1 as $t, a[0]);
+ assert_eq!(2 as $t, a[1]);
+ assert_eq!($vec2::new(1 as $t, 2 as $t), a);
+ });
+
+ glam_test!(test_dot_unsigned, {
+ let x = $new(1 as $t, 0 as $t);
+ let y = $new(0 as $t, 1 as $t);
+ assert_eq!(1 as $t, x.dot(x));
+ assert_eq!(0 as $t, x.dot(y));
+ });
+
+ glam_test!(test_ops, {
+ let a = $new(2 as $t, 4 as $t);
+ assert_eq!($new(4 as $t, 8 as $t), (a + a));
+ assert_eq!($new(0 as $t, 0 as $t), (a - a));
+ assert_eq!($new(4 as $t, 16 as $t), (a * a));
+ assert_eq!($new(4 as $t, 8 as $t), (a * 2 as $t));
+ assert_eq!($new(4 as $t, 8 as $t), (2 as $t * a));
+ assert_eq!($new(1 as $t, 1 as $t), (a / a));
+ assert_eq!($new(1 as $t, 2 as $t), (a / 2 as $t));
+ assert_eq!($new(2 as $t, 1 as $t), (4 as $t / a));
+ assert_eq!($new(0 as $t, 0 as $t), a % a);
+ assert_eq!($new(0 as $t, 1 as $t), a % (a - 1 as $t));
+ assert_eq!($new(0 as $t, 0 as $t), a % 1 as $t);
+ assert_eq!($new(2 as $t, 1 as $t), a % 3 as $t);
+ assert_eq!($new(1 as $t, 1 as $t), 17 as $t % a);
+ assert_eq!($new(2 as $t, 4 as $t), a % 8 as $t);
+ });
+
+ glam_test!(test_assign_ops, {
+ let a = $new(1 as $t, 2 as $t);
+ let mut b = a;
+ b += a;
+ assert_eq!($new(2 as $t, 4 as $t), b);
+ b -= a;
+ assert_eq!($new(1 as $t, 2 as $t), b);
+ b *= a;
+ assert_eq!($new(1 as $t, 4 as $t), b);
+ b /= a;
+ assert_eq!($new(1 as $t, 2 as $t), b);
+ b *= 2 as $t;
+ assert_eq!($new(2 as $t, 4 as $t), b);
+ b /= 2 as $t;
+ assert_eq!($new(1 as $t, 2 as $t), b);
+ b %= (b + 1 as $t);
+ assert_eq!($new(1 as $t, 2 as $t), b);
+ b %= b;
+ assert_eq!($new(0 as $t, 0 as $t), b);
+ });
+
+ glam_test!(test_min_max, {
+ let a = $new(0 as $t, 2 as $t);
+ let b = $new(1 as $t, 1 as $t);
+ assert_eq!($new(0 as $t, 1 as $t), a.min(b));
+ assert_eq!($new(0 as $t, 1 as $t), b.min(a));
+ assert_eq!($new(1 as $t, 2 as $t), a.max(b));
+ assert_eq!($new(1 as $t, 2 as $t), b.max(a));
+ });
+
+ glam_test!(test_clamp, {
+ fn vec(x: i32, y: i32) -> $vec2 {
+ $vec2::new(x as $t, y as $t)
+ }
+ let min = vec(1, 3);
+ let max = vec(6, 8);
+ assert_eq!(vec(0, 0).clamp(min, max), vec(1, 3));
+ assert_eq!(vec(2, 2).clamp(min, max), vec(2, 3));
+ assert_eq!(vec(4, 5).clamp(min, max), vec(4, 5));
+ assert_eq!(vec(6, 6).clamp(min, max), vec(6, 6));
+ assert_eq!(vec(7, 7).clamp(min, max), vec(6, 7));
+ assert_eq!(vec(9, 9).clamp(min, max), vec(6, 8));
+
+ should_glam_assert!({ $vec2::clamp($vec2::ZERO, $vec2::ONE, $vec2::ZERO) });
+ });
+
+ glam_test!(test_hmin_hmax, {
+ let a = $new(1 as $t, 2 as $t);
+ assert_eq!(1 as $t, a.min_element());
+ assert_eq!(2 as $t, a.max_element());
+ });
+
+ glam_test!(test_eq, {
+ let a = $new(1 as $t, 1 as $t);
+ let b = $new(1 as $t, 2 as $t);
+ assert!(a.cmpeq(a).all());
+ assert!(b.cmpeq(b).all());
+ assert!(a.cmpne(b).any());
+ assert!(b.cmpne(a).any());
+ assert!(b.cmpeq(a).any());
+ });
+
+ glam_test!(test_cmp, {
+ assert!(!$mask::default().any());
+ assert!(!$mask::default().all());
+ assert_eq!($mask::default().bitmask(), 0x0);
+ let a = $new(1 as $t, 1 as $t);
+ let b = $new(2 as $t, 2 as $t);
+ let c = $new(1 as $t, 1 as $t);
+ let d = $new(2 as $t, 1 as $t);
+ assert_eq!(a.cmplt(a).bitmask(), 0x0);
+ assert_eq!(a.cmplt(b).bitmask(), 0x3);
+ assert_eq!(a.cmplt(d).bitmask(), 0x1);
+ assert_eq!(c.cmple(a).bitmask(), 0x3);
+ assert!(a.cmplt(b).all());
+ assert!(a.cmplt(d).any());
+ assert!(a.cmple(b).all());
+ assert!(a.cmple(a).all());
+ assert!(b.cmpgt(a).all());
+ assert!(b.cmpge(a).all());
+ assert!(b.cmpge(b).all());
+ assert!(!(a.cmpge(d).all()));
+ assert!(c.cmple(c).all());
+ assert!(c.cmpge(c).all());
+ assert!(a == a);
+ });
+
+ glam_test!(test_extend_truncate, {
+ let a = $new(1 as $t, 2 as $t);
+ let b = a.extend(3 as $t);
+ assert_eq!($vec3::new(1 as $t, 2 as $t, 3 as $t), b);
+ });
+
+ glam_test!(test_vec2mask, {
+ // make sure the unused 'z' value doesn't break $vec2 behaviour
+ let a = $vec3::ZERO;
+ let mut b = a.truncate();
+ b.x = 1 as $t;
+ b.y = 1 as $t;
+ assert!(!b.cmpeq($vec2::ZERO).any());
+ assert!(b.cmpeq($vec2::splat(1 as $t)).all());
+ });
+
+ // #[test]
+ // fn test_mask_as_ref() {
+ // assert_eq!($mask::new(false, false).as_ref(), &[0, 0]);
+ // assert_eq!($mask::new(true, false).as_ref(), &[!0, 0]);
+ // assert_eq!($mask::new(false, true).as_ref(), &[0, !0]);
+ // assert_eq!($mask::new(true, true).as_ref(), &[!0, !0]);
+ // }
+
+ glam_test!(test_mask_from, {
+ assert_eq!(Into::<[u32; 2]>::into($mask::new(false, false)), [0, 0]);
+ assert_eq!(Into::<[u32; 2]>::into($mask::new(true, false)), [!0, 0]);
+ assert_eq!(Into::<[u32; 2]>::into($mask::new(false, true)), [0, !0]);
+ assert_eq!(Into::<[u32; 2]>::into($mask::new(true, true)), [!0, !0]);
+ });
+
+ glam_test!(test_mask_bitmask, {
+ assert_eq!($mask::new(false, false).bitmask(), 0b00);
+ assert_eq!($mask::new(true, false).bitmask(), 0b01);
+ assert_eq!($mask::new(false, true).bitmask(), 0b10);
+ assert_eq!($mask::new(true, true).bitmask(), 0b11);
+ });
+
+ glam_test!(test_mask_any, {
+ assert_eq!($mask::new(false, false).any(), false);
+ assert_eq!($mask::new(true, false).any(), true);
+ assert_eq!($mask::new(false, true).any(), true);
+ assert_eq!($mask::new(true, true).any(), true);
+ });
+
+ glam_test!(test_mask_all, {
+ assert_eq!($mask::new(false, false).all(), false);
+ assert_eq!($mask::new(true, false).all(), false);
+ assert_eq!($mask::new(false, true).all(), false);
+ assert_eq!($mask::new(true, true).all(), true);
+ });
+
+ glam_test!(test_mask_select, {
+ let a = $vec2::new(1 as $t, 2 as $t);
+ let b = $vec2::new(3 as $t, 4 as $t);
+ assert_eq!(
+ $vec2::select($mask::new(true, true), a, b),
+ $vec2::new(1 as $t, 2 as $t),
+ );
+ assert_eq!(
+ $vec2::select($mask::new(true, false), a, b),
+ $vec2::new(1 as $t, 4 as $t),
+ );
+ assert_eq!(
+ $vec2::select($mask::new(false, true), a, b),
+ $vec2::new(3 as $t, 2 as $t),
+ );
+ assert_eq!(
+ $vec2::select($mask::new(false, false), a, b),
+ $vec2::new(3 as $t, 4 as $t),
+ );
+ });
+
+ glam_test!(test_mask_and, {
+ assert_eq!(
+ ($mask::new(false, false) & $mask::new(false, false)).bitmask(),
+ 0b00,
+ );
+ assert_eq!(
+ ($mask::new(true, true) & $mask::new(true, false)).bitmask(),
+ 0b01,
+ );
+ assert_eq!(
+ ($mask::new(true, false) & $mask::new(false, true)).bitmask(),
+ 0b00,
+ );
+ assert_eq!(
+ ($mask::new(true, true) & $mask::new(true, true)).bitmask(),
+ 0b11,
+ );
+
+ let mut mask = $mask::new(true, true);
+ mask &= $mask::new(true, false);
+ assert_eq!(mask.bitmask(), 0b01);
+ });
+
+ glam_test!(test_mask_or, {
+ assert_eq!(
+ ($mask::new(false, false) | $mask::new(false, false)).bitmask(),
+ 0b00,
+ );
+ assert_eq!(
+ ($mask::new(false, false) | $mask::new(false, true)).bitmask(),
+ 0b10,
+ );
+ assert_eq!(
+ ($mask::new(true, false) | $mask::new(false, true)).bitmask(),
+ 0b11,
+ );
+ assert_eq!(
+ ($mask::new(true, true) | $mask::new(true, true)).bitmask(),
+ 0b11,
+ );
+
+ let mut mask = $mask::new(true, true);
+ mask |= $mask::new(true, false);
+ assert_eq!(mask.bitmask(), 0b11);
+ });
+
+ glam_test!(test_mask_not, {
+ assert_eq!((!$mask::new(false, false)).bitmask(), 0b11);
+ assert_eq!((!$mask::new(true, false)).bitmask(), 0b10);
+ assert_eq!((!$mask::new(false, true)).bitmask(), 0b01);
+ assert_eq!((!$mask::new(true, true)).bitmask(), 0b00);
+ });
+
+ glam_test!(test_mask_fmt, {
+ let a = $mask::new(true, false);
+
+ assert_eq!(
+ format!("{:?}", a),
+ format!("{}(0xffffffff, 0x0)", stringify!($mask))
+ );
+ assert_eq!(format!("{}", a), "[true, false]");
+ });
+
+ glam_test!(test_mask_eq, {
+ let a = $mask::new(true, false);
+ let b = $mask::new(true, false);
+ let c = $mask::new(false, true);
+
+ assert_eq!(a, b);
+ assert_eq!(b, a);
+ assert_ne!(a, c);
+ assert_ne!(b, c);
+ });
+
+ glam_test!(test_mask_hash, {
+ use std::collections::hash_map::DefaultHasher;
+ use std::hash::Hash;
+ use std::hash::Hasher;
+
+ let a = $mask::new(true, false);
+ let b = $mask::new(true, false);
+ let c = $mask::new(false, true);
+
+ let mut hasher = DefaultHasher::new();
+ a.hash(&mut hasher);
+ let a_hashed = hasher.finish();
+
+ let mut hasher = DefaultHasher::new();
+ b.hash(&mut hasher);
+ let b_hashed = hasher.finish();
+
+ let mut hasher = DefaultHasher::new();
+ c.hash(&mut hasher);
+ let c_hashed = hasher.finish();
+
+ assert_eq!(a, b);
+ assert_eq!(a_hashed, b_hashed);
+ assert_ne!(a, c);
+ assert_ne!(a_hashed, c_hashed);
+ });
+
+ glam_test!(test_to_from_slice, {
+ let v = $vec2::new(1 as $t, 2 as $t);
+ let mut a = [0 as $t, 0 as $t];
+ v.write_to_slice(&mut a);
+ assert_eq!(v, $vec2::from_slice(&a));
+
+ should_panic!({ $vec2::ONE.write_to_slice(&mut [0 as $t]) });
+ should_panic!({ $vec2::from_slice(&[0 as $t]) });
+ });
+
+ glam_test!(test_sum, {
+ let one = $vec2::ONE;
+ assert_eq!(vec![one, one].iter().sum::<$vec2>(), one + one);
+ });
+
+ glam_test!(test_product, {
+ let two = $vec2::new(2 as $t, 2 as $t);
+ assert_eq!(vec![two, two].iter().product::<$vec2>(), two * two);
+ });
+ };
+}
+
+macro_rules! impl_vec2_signed_tests {
+ ($t:ident, $const_new:ident, $new:ident, $vec2:ident, $vec3:ident, $mask:ident) => {
+ impl_vec2_tests!($t, $const_new, $new, $vec2, $vec3, $mask);
+
+ glam_test!(test_dot_signed, {
+ let x = $new(1 as $t, 0 as $t);
+ let y = $new(0 as $t, 1 as $t);
+ assert_eq!(1 as $t, x.dot(x));
+ assert_eq!(0 as $t, x.dot(y));
+ assert_eq!(-1 as $t, x.dot(-x));
+ });
+
+ glam_test!(test_neg, {
+ let a = $new(1 as $t, 2 as $t);
+ assert_eq!($new(-1 as $t, -2 as $t), (-a));
+ });
+ };
+}
+
+macro_rules! impl_vec2_eq_hash_tests {
+ ($t:ident, $new:ident) => {
+ glam_test!(test_ve2_hash, {
+ use std::collections::hash_map::DefaultHasher;
+ use std::hash::Hash;
+ use std::hash::Hasher;
+
+ let a = $new(1 as $t, 2 as $t);
+ let b = $new(1 as $t, 2 as $t);
+ let c = $new(3 as $t, 2 as $t);
+
+ let mut hasher = DefaultHasher::new();
+ a.hash(&mut hasher);
+ let a_hashed = hasher.finish();
+
+ let mut hasher = DefaultHasher::new();
+ b.hash(&mut hasher);
+ let b_hashed = hasher.finish();
+
+ let mut hasher = DefaultHasher::new();
+ c.hash(&mut hasher);
+ let c_hashed = hasher.finish();
+
+ assert_eq!(a, b);
+ assert_eq!(a_hashed, b_hashed);
+ assert_ne!(a, c);
+ assert_ne!(a_hashed, c_hashed);
+ });
+ };
+}
+
+macro_rules! impl_vec2_float_tests {
+ ($t:ident, $const_new:ident, $new:ident, $vec2:ident, $vec3:ident, $mask:ident, $mat2:ident) => {
+ impl_vec2_signed_tests!($t, $const_new, $new, $vec2, $vec3, $mask);
+ impl_vec_float_normalize_tests!($t, $vec2);
+
+ use core::$t::INFINITY;
+ use core::$t::NAN;
+ use core::$t::NEG_INFINITY;
+
+ glam_test!(test_vec2_consts, {
+ assert_eq!($vec2::ZERO, $new(0 as $t, 0 as $t));
+ assert_eq!($vec2::ONE, $new(1 as $t, 1 as $t));
+ assert_eq!($vec2::X, $new(1 as $t, 0 as $t));
+ assert_eq!($vec2::Y, $new(0 as $t, 1 as $t));
+ });
+
+ glam_test!(test_vec2_nan, {
+ assert!($vec2::NAN.is_nan());
+ assert!(!$vec2::NAN.is_finite());
+ });
+
+ glam_test!(test_length, {
+ let x = $new(1.0, 0.0);
+ let y = $new(0.0, 1.0);
+ assert_eq!(4.0, (2.0 * x).length_squared());
+ assert_eq!(9.0, (-3.0 * y).length_squared());
+ assert_eq!(2.0, (-2.0 * x).length());
+ assert_eq!(3.0, (3.0 * y).length());
+ assert_eq!(2.0, x.distance_squared(y));
+ assert_eq!(13.0, (2.0 * x).distance_squared(-3.0 * y));
+ assert_eq!((2.0 as $t).sqrt(), x.distance(y));
+ assert_eq!(5.0, (3.0 * x).distance(-4.0 * y));
+ assert_eq!(13.0, (-5.0 * x).distance(12.0 * y));
+ assert_eq!(x, (2.0 * x).normalize());
+ assert_eq!(1.0 * 3.0 + 2.0 * 4.0, $new(1.0, 2.0).dot($new(3.0, 4.0)));
+ assert_eq!(2.0 * 2.0 + 3.0 * 3.0, $new(2.0, 3.0).length_squared());
+ assert_eq!(
+ (2.0 as $t * 2.0 + 3.0 * 3.0).sqrt(),
+ $new(2.0, 3.0).length()
+ );
+ assert_eq!(
+ 1.0 / (2.0 as $t * 2.0 + 3.0 * 3.0).sqrt(),
+ $new(2.0, 3.0).length_recip()
+ );
+ assert!($new(2.0, 3.0).normalize().is_normalized());
+ assert_eq!(
+ $new(2.0, 3.0) / (2.0 as $t * 2.0 + 3.0 * 3.0).sqrt(),
+ $new(2.0, 3.0).normalize()
+ );
+ assert_eq!($new(0.5, 0.25), $new(2.0, 4.0).recip());
+ });
+
+ glam_test!(test_project_reject, {
+ assert_eq!($new(0.0, 1.0), $new(1.0, 1.0).project_onto($new(0.0, 2.0)));
+ assert_eq!($new(1.0, 0.0), $new(1.0, 1.0).reject_from($new(0.0, 2.0)));
+ assert_eq!(
+ $new(0.0, 1.0),
+ $new(1.0, 1.0).project_onto_normalized($new(0.0, 1.0))
+ );
+ assert_eq!(
+ $new(1.0, 0.0),
+ $new(1.0, 1.0).reject_from_normalized($new(0.0, 1.0))
+ );
+ should_glam_assert!({ $vec2::ONE.project_onto($vec2::ZERO) });
+ should_glam_assert!({ $vec2::ONE.reject_from($vec2::ZERO) });
+ should_glam_assert!({ $vec2::ONE.project_onto_normalized($vec2::ONE) });
+ should_glam_assert!({ $vec2::ONE.reject_from_normalized($vec2::ONE) });
+ });
+
+ glam_test!(test_perp, {
+ let v1 = $vec2::new(1.0, 2.0);
+ let v2 = $vec2::new(1.0, 1.0);
+ let v1_perp = $vec2::new(-2.0, 1.0);
+ let rot90 = $mat2::from_angle($t::to_radians(90.0));
+
+ assert_eq!(v1_perp, v1.perp());
+ assert_eq!(v1.perp().dot(v1), 0.0);
+ assert_eq!(v2.perp().dot(v2), 0.0);
+ assert_eq!(v1.perp().dot(v2), v1.perp_dot(v2));
+
+ assert_approx_eq!(v1.perp(), rot90 * v1);
+ });
+
+ glam_test!(test_sign, {
+ assert_eq!($vec2::ZERO.signum(), $vec2::ONE);
+ assert_eq!(-$vec2::ZERO.signum(), -$vec2::ONE);
+ assert_eq!($vec2::ONE.signum(), $vec2::ONE);
+ assert_eq!((-$vec2::ONE).signum(), -$vec2::ONE);
+ assert_eq!($vec2::splat(INFINITY).signum(), $vec2::ONE);
+ assert_eq!($vec2::splat(NEG_INFINITY).signum(), -$vec2::ONE);
+ assert!($vec2::splat(NAN).signum().is_nan_mask().all());
+ });
+
+ glam_test!(test_abs, {
+ assert_eq!($vec2::ZERO.abs(), $vec2::ZERO);
+ assert_eq!($vec2::ONE.abs(), $vec2::ONE);
+ assert_eq!((-$vec2::ONE).abs(), $vec2::ONE);
+ });
+
+ glam_test!(test_round, {
+ assert_eq!($vec2::new(1.35, 0.0).round().x, 1.0);
+ assert_eq!($vec2::new(0.0, 1.5).round().y, 2.0);
+ assert_eq!($vec2::new(0.0, -15.5).round().y, -16.0);
+ assert_eq!($vec2::new(0.0, 0.0).round().y, 0.0);
+ assert_eq!($vec2::new(0.0, 21.1).round().y, 21.0);
+ assert_eq!($vec2::new(0.0, 11.123).round().y, 11.0);
+ assert_eq!($vec2::new(0.0, 11.499).round().y, 11.0);
+ assert_eq!(
+ $vec2::new(NEG_INFINITY, INFINITY).round(),
+ $vec2::new(NEG_INFINITY, INFINITY)
+ );
+ assert!($vec2::new(NAN, 0.0).round().x.is_nan());
+ });
+
+ glam_test!(test_floor, {
+ assert_eq!($vec2::new(1.35, -1.5).floor(), $vec2::new(1.0, -2.0));
+ assert_eq!(
+ $vec2::new(INFINITY, NEG_INFINITY).floor(),
+ $vec2::new(INFINITY, NEG_INFINITY)
+ );
+ assert!($vec2::new(NAN, 0.0).floor().x.is_nan());
+ assert_eq!(
+ $vec2::new(-2000000.123, 10000000.123).floor(),
+ $vec2::new(-2000001.0, 10000000.0)
+ );
+ });
+
+ glam_test!(test_fract, {
+ assert_approx_eq!($vec2::new(1.35, -1.5).fract(), $vec2::new(0.35, 0.5));
+ assert_approx_eq!(
+ $vec2::new(-2000000.123, 1000000.123).fract(),
+ $vec2::new(0.877, 0.123),
+ 0.002
+ );
+ });
+
+ glam_test!(test_ceil, {
+ assert_eq!($vec2::new(1.35, -1.5).ceil(), $vec2::new(2.0, -1.0));
+ assert_eq!(
+ $vec2::new(INFINITY, NEG_INFINITY).ceil(),
+ $vec2::new(INFINITY, NEG_INFINITY)
+ );
+ assert!($vec2::new(NAN, 0.0).ceil().x.is_nan());
+ assert_eq!(
+ $vec2::new(-2000000.123, 1000000.123).ceil(),
+ $vec2::new(-2000000.0, 1000001.0)
+ );
+ });
+
+ glam_test!(test_lerp, {
+ let v0 = $vec2::new(-1.0, -1.0);
+ let v1 = $vec2::new(1.0, 1.0);
+ assert_approx_eq!(v0, v0.lerp(v1, 0.0));
+ assert_approx_eq!(v1, v0.lerp(v1, 1.0));
+ assert_approx_eq!($vec2::ZERO, v0.lerp(v1, 0.5));
+ });
+
+ glam_test!(test_is_finite, {
+ assert!($vec2::new(0.0, 0.0).is_finite());
+ assert!($vec2::new(-1e-10, 1e10).is_finite());
+ assert!(!$vec2::new(INFINITY, 0.0).is_finite());
+ assert!(!$vec2::new(0.0, NAN).is_finite());
+ assert!(!$vec2::new(0.0, NEG_INFINITY).is_finite());
+ assert!(!$vec2::new(INFINITY, NEG_INFINITY).is_finite());
+ });
+
+ glam_test!(test_powf, {
+ assert_eq!($vec2::new(2.0, 4.0).powf(2.0), $vec2::new(4.0, 16.0));
+ });
+
+ glam_test!(test_exp, {
+ assert_eq!(
+ $vec2::new(1.0, 2.0).exp(),
+ $vec2::new((1.0 as $t).exp(), (2.0 as $t).exp())
+ );
+ });
+
+ glam_test!(test_angle_between, {
+ let angle = $vec2::new(1.0, 0.0).angle_between($vec2::new(0.0, 1.0));
+ assert_approx_eq!(core::$t::consts::FRAC_PI_2, angle, 1e-6);
+
+ let angle = $vec2::new(10.0, 0.0).angle_between($vec2::new(0.0, 5.0));
+ assert_approx_eq!(core::$t::consts::FRAC_PI_2, angle, 1e-6);
+
+ let angle = $vec2::new(-1.0, 0.0).angle_between($vec2::new(0.0, 1.0));
+ assert_approx_eq!(-core::$t::consts::FRAC_PI_2, angle, 1e-6);
+ });
+
+ glam_test!(test_clamp_length, {
+ // Too long gets shortened
+ assert_eq!(
+ $vec2::new(12.0, 16.0).clamp_length(7.0, 10.0),
+ $vec2::new(6.0, 8.0) // shortened to length 10.0
+ );
+ // In the middle is unchanged
+ assert_eq!(
+ $vec2::new(2.0, 1.0).clamp_length(0.5, 5.0),
+ $vec2::new(2.0, 1.0) // unchanged
+ );
+ // Too short gets lengthened
+ assert_eq!(
+ $vec2::new(0.6, 0.8).clamp_length(10.0, 20.0),
+ $vec2::new(6.0, 8.0) // lengthened to length 10.0
+ );
+ should_glam_assert!({ $vec2::ONE.clamp_length(1.0, 0.0) });
+ });
+
+ glam_test!(test_clamp_length_max, {
+ // Too long gets shortened
+ assert_eq!(
+ $vec2::new(12.0, 16.0).clamp_length_max(10.0),
+ $vec2::new(6.0, 8.0) // shortened to length 10.0
+ );
+ // Not too long is unchanged
+ assert_eq!(
+ $vec2::new(2.0, 1.0).clamp_length_max(5.0),
+ $vec2::new(2.0, 1.0) // unchanged
+ );
+ });
+
+ glam_test!(test_clamp_length_min, {
+ // Not too short is unchanged
+ assert_eq!(
+ $vec2::new(2.0, 1.0).clamp_length_min(0.5),
+ $vec2::new(2.0, 1.0) // unchanged
+ );
+ // Too short gets lengthened
+ assert_eq!(
+ $vec2::new(0.6, 0.8).clamp_length_min(10.0),
+ $vec2::new(6.0, 8.0) // lengthened to length 10.0
+ );
+ });
+
+ #[cfg(any(feature = "glam-assert", feature = "debug-glam-assert"))]
+ glam_test!(test_float_glam_assert, {
+ use std::panic::catch_unwind;
+
+ assert!(catch_unwind(|| $vec2::ZERO.normalize()).is_err());
+ });
+
+ glam_test!(test_mul_add, {
+ assert_eq!(
+ $vec2::new(1.0, 1.0).mul_add($vec2::new(0.5, 2.0), $vec2::new(-1.0, -1.0)),
+ $vec2::new(-0.5, 1.0)
+ );
+ });
+ };
+}
+
+macro_rules! impl_vec2_scalar_shift_op_test {
+ ($vec2:ident, $t_min:literal, $t_max:literal, $rhs_min:literal, $rhs_max:literal) => {
+ glam_test!(test_vec2_scalar_shift_ops, {
+ for x in $t_min..$t_max {
+ for y in $t_min..$t_max {
+ for rhs in $rhs_min..$rhs_max {
+ assert_eq!($vec2::new(x, y) << rhs, $vec2::new(x << rhs, y << rhs));
+ assert_eq!($vec2::new(x, y) >> rhs, $vec2::new(x >> rhs, y >> rhs));
+ }
+ }
+ }
+ });
+ };
+}
+
+macro_rules! impl_vec2_scalar_shift_op_tests {
+ ($vec2:ident, $t_min:literal, $t_max:literal) => {
+ mod shift_by_i8 {
+ use glam::$vec2;
+ impl_vec2_scalar_shift_op_test!($vec2, $t_min, $t_max, 0i8, 2);
+ }
+ mod shift_by_i16 {
+ use glam::$vec2;
+ impl_vec2_scalar_shift_op_test!($vec2, $t_min, $t_max, 0i16, 2);
+ }
+ mod shift_by_i32 {
+ use glam::$vec2;
+ impl_vec2_scalar_shift_op_test!($vec2, $t_min, $t_max, 0i32, 2);
+ }
+ mod shift_by_u8 {
+ use glam::$vec2;
+ impl_vec2_scalar_shift_op_test!($vec2, $t_min, $t_max, 0u8, 2);
+ }
+ mod shift_by_u16 {
+ use glam::$vec2;
+ impl_vec2_scalar_shift_op_test!($vec2, $t_min, $t_max, 0u16, 2);
+ }
+ mod shift_by_u32 {
+ use glam::$vec2;
+ impl_vec2_scalar_shift_op_test!($vec2, $t_min, $t_max, 0u32, 2);
+ }
+ };
+}
+
+macro_rules! impl_vec2_shift_op_test {
+ ($vec2:ident, $rhs:ident, $t_min:literal, $t_max:literal) => {
+ glam_test!(test_vec2_shift_ops, {
+ for x1 in $t_min..$t_max {
+ for y1 in $t_min..$t_max {
+ for x2 in $t_min..$t_max {
+ for y2 in $t_min..$t_max {
+ assert_eq!(
+ $vec2::new(x1, y1) << $rhs::new(x2, y2),
+ $vec2::new(x1 << x2, y1 << y2)
+ );
+ assert_eq!(
+ $vec2::new(x1, y1) >> $rhs::new(x2, y2),
+ $vec2::new(x1 >> x2, y1 >> y2)
+ );
+ }
+ }
+ }
+ }
+ });
+ };
+}
+
+macro_rules! impl_vec2_shift_op_tests {
+ ($vec2:ident) => {
+ mod shift_ivec2_by_ivec2 {
+ use super::*;
+ impl_vec2_shift_op_test!($vec2, IVec2, 0, 2);
+ }
+ mod shift_ivec2_by_uvec2 {
+ use super::*;
+ impl_vec2_shift_op_test!($vec2, UVec2, 0, 2);
+ }
+ };
+}
+
+macro_rules! impl_vec2_scalar_bit_op_tests {
+ ($vec2:ident, $t_min:literal, $t_max:literal) => {
+ glam_test!(test_vec2_scalar_bit_ops, {
+ for x in $t_min..$t_max {
+ for y in $t_min..$t_max {
+ for rhs in $t_min..$t_max {
+ assert_eq!($vec2::new(x, y) & rhs, $vec2::new(x & rhs, y & rhs));
+ assert_eq!($vec2::new(x, y) | rhs, $vec2::new(x | rhs, y | rhs));
+ assert_eq!($vec2::new(x, y) ^ rhs, $vec2::new(x ^ rhs, y ^ rhs));
+ }
+ }
+ }
+ });
+ };
+}
+
+macro_rules! impl_vec2_bit_op_tests {
+ ($vec2:ident, $t_min:literal, $t_max:literal) => {
+ glam_test!(test_vec2_bit_ops, {
+ for x1 in $t_min..$t_max {
+ for y1 in $t_min..$t_max {
+ assert_eq!(!$vec2::new(x1, y1), $vec2::new(!x1, !y1));
+
+ for x2 in $t_min..$t_max {
+ for y2 in $t_min..$t_max {
+ assert_eq!(
+ $vec2::new(x1, y1) & $vec2::new(x2, y2),
+ $vec2::new(x1 & x2, y1 & y2)
+ );
+ assert_eq!(
+ $vec2::new(x1, y1) | $vec2::new(x2, y2),
+ $vec2::new(x1 | x2, y1 | y2)
+ );
+ assert_eq!(
+ $vec2::new(x1, y1) ^ $vec2::new(x2, y2),
+ $vec2::new(x1 ^ x2, y1 ^ y2)
+ );
+ }
+ }
+ }
+ }
+ });
+ };
+}
+
+mod vec2 {
+ use glam::{const_vec2, vec2, BVec2, Mat2, Vec2, Vec3};
+
+ glam_test!(test_align, {
+ use core::mem;
+ assert_eq!(8, mem::size_of::<Vec2>());
+ #[cfg(not(feature = "cuda"))]
+ assert_eq!(4, mem::align_of::<Vec2>());
+ #[cfg(feature = "cuda")]
+ assert_eq!(8, mem::align_of::<Vec2>());
+ assert_eq!(2, mem::size_of::<BVec2>());
+ assert_eq!(1, mem::align_of::<BVec2>());
+ });
+
+ glam_test!(test_as, {
+ use glam::{DVec2, IVec2, UVec2};
+ assert_eq!(DVec2::new(-1.0, -2.0), Vec2::new(-1.0, -2.0).as_dvec2());
+ assert_eq!(IVec2::new(-1, -2), Vec2::new(-1.0, -2.0).as_ivec2());
+ assert_eq!(UVec2::new(1, 2), Vec2::new(1.0, 2.0).as_uvec2());
+
+ assert_eq!(IVec2::new(-1, -2), DVec2::new(-1.0, -2.0).as_ivec2());
+ assert_eq!(UVec2::new(1, 2), DVec2::new(1.0, 2.0).as_uvec2());
+ assert_eq!(Vec2::new(-1.0, -2.0), DVec2::new(-1.0, -2.0).as_vec2());
+
+ assert_eq!(DVec2::new(-1.0, -2.0), IVec2::new(-1, -2).as_dvec2());
+ assert_eq!(UVec2::new(1, 2), IVec2::new(1, 2).as_uvec2());
+ assert_eq!(Vec2::new(-1.0, -2.0), IVec2::new(-1, -2).as_vec2());
+
+ assert_eq!(DVec2::new(1.0, 2.0), UVec2::new(1, 2).as_dvec2());
+ assert_eq!(IVec2::new(1, 2), UVec2::new(1, 2).as_ivec2());
+ assert_eq!(Vec2::new(1.0, 2.0), UVec2::new(1, 2).as_vec2());
+ });
+
+ impl_vec2_float_tests!(f32, const_vec2, vec2, Vec2, Vec3, BVec2, Mat2);
+}
+
+mod dvec2 {
+ use glam::{const_dvec2, dvec2, BVec2, DMat2, DVec2, DVec3};
+
+ glam_test!(test_align, {
+ use core::mem;
+ assert_eq!(16, mem::size_of::<DVec2>());
+ #[cfg(not(feature = "cuda"))]
+ assert_eq!(mem::align_of::<f64>(), mem::align_of::<DVec2>());
+ #[cfg(feature = "cuda")]
+ assert_eq!(16, mem::align_of::<DVec2>());
+ assert_eq!(2, mem::size_of::<BVec2>());
+ assert_eq!(1, mem::align_of::<BVec2>());
+ });
+
+ impl_vec2_float_tests!(f64, const_dvec2, dvec2, DVec2, DVec3, BVec2, DMat2);
+}
+
+mod ivec2 {
+ use glam::{const_ivec2, ivec2, BVec2, IVec2, IVec3, UVec2};
+
+ glam_test!(test_align, {
+ use core::mem;
+ assert_eq!(8, mem::size_of::<IVec2>());
+ #[cfg(not(feature = "cuda"))]
+ assert_eq!(4, mem::align_of::<IVec2>());
+ #[cfg(feature = "cuda")]
+ assert_eq!(8, mem::align_of::<IVec2>());
+ assert_eq!(2, mem::size_of::<BVec2>());
+ assert_eq!(1, mem::align_of::<BVec2>());
+ });
+
+ impl_vec2_signed_tests!(i32, const_ivec2, ivec2, IVec2, IVec3, BVec2);
+ impl_vec2_eq_hash_tests!(i32, ivec2);
+
+ impl_vec2_scalar_shift_op_tests!(IVec2, -2, 2);
+ impl_vec2_shift_op_tests!(IVec2);
+
+ impl_vec2_scalar_bit_op_tests!(IVec2, -2, 2);
+ impl_vec2_bit_op_tests!(IVec2, -2, 2);
+}
+
+mod uvec2 {
+ use glam::{const_uvec2, uvec2, BVec2, IVec2, UVec2, UVec3};
+
+ glam_test!(test_align, {
+ use core::mem;
+ assert_eq!(8, mem::size_of::<UVec2>());
+ #[cfg(not(feature = "cuda"))]
+ assert_eq!(4, mem::align_of::<UVec2>());
+ #[cfg(feature = "cuda")]
+ assert_eq!(8, mem::align_of::<UVec2>());
+ assert_eq!(2, mem::size_of::<BVec2>());
+ assert_eq!(1, mem::align_of::<BVec2>());
+ });
+
+ impl_vec2_tests!(u32, const_uvec2, uvec2, UVec2, UVec3, BVec2);
+ impl_vec2_eq_hash_tests!(u32, uvec2);
+
+ impl_vec2_scalar_shift_op_tests!(UVec2, 0, 2);
+ impl_vec2_shift_op_tests!(UVec2);
+
+ impl_vec2_scalar_bit_op_tests!(UVec2, 0, 2);
+ impl_vec2_bit_op_tests!(UVec2, 0, 2);
+}
diff --git a/tests/vec3.rs b/tests/vec3.rs
new file mode 100644
index 0000000..4b2e047
--- /dev/null
+++ b/tests/vec3.rs
@@ -0,0 +1,1102 @@
+#[macro_use]
+mod support;
+
+macro_rules! impl_vec3_tests {
+ ($t:ident, $const_new:ident, $new:ident, $vec3:ident, $mask:ident) => {
+ glam_test!(test_const, {
+ const V: $vec3 = $const_new!([1 as $t, 2 as $t, 3 as $t]);
+ assert_eq!($vec3::new(1 as $t, 2 as $t, 3 as $t), V);
+ });
+
+ glam_test!(test_new, {
+ let v = $new(1 as $t, 2 as $t, 3 as $t);
+
+ assert_eq!(v.x, 1 as $t);
+ assert_eq!(v.y, 2 as $t);
+ assert_eq!(v.z, 3 as $t);
+
+ let t = (1 as $t, 2 as $t, 3 as $t);
+ let v = $vec3::from(t);
+ assert_eq!(t, v.into());
+
+ let a = [1 as $t, 2 as $t, 3 as $t];
+ let v = $vec3::from(a);
+ let a1: [$t; 3] = v.into();
+ assert_eq!(a, a1);
+
+ let v = $vec3::new(t.0, t.1, t.2);
+ assert_eq!(t, v.into());
+
+ assert_eq!($vec3::new(1 as $t, 0 as $t, 0 as $t), $vec3::X);
+ assert_eq!($vec3::new(0 as $t, 1 as $t, 0 as $t), $vec3::Y);
+ assert_eq!($vec3::new(0 as $t, 0 as $t, 1 as $t), $vec3::Z);
+ });
+
+ glam_test!(test_fmt, {
+ let a = $vec3::new(1 as $t, 2 as $t, 3 as $t);
+ assert_eq!(
+ format!("{:?}", a),
+ format!("{}({:?}, {:?}, {:?})", stringify!($vec3), a.x, a.y, a.z)
+ );
+ // assert_eq!(format!("{:#?}", a), "$vec3(\n 1.0,\n 2.0,\n 3.0\n)");
+ assert_eq!(format!("{}", a), "[1, 2, 3]");
+ });
+
+ glam_test!(test_zero, {
+ let v = $vec3::ZERO;
+ assert_eq!((0 as $t, 0 as $t, 0 as $t), v.into());
+ assert_eq!(v, $vec3::default());
+ });
+
+ glam_test!(test_splat, {
+ let v = $vec3::splat(1 as $t);
+ assert_eq!($vec3::ONE, v);
+ });
+
+ glam_test!(test_accessors, {
+ let mut a = $vec3::ZERO;
+ a.x = 1 as $t;
+ a.y = 2 as $t;
+ a.z = 3 as $t;
+ assert_eq!(1 as $t, a.x);
+ assert_eq!(2 as $t, a.y);
+ assert_eq!(3 as $t, a.z);
+ assert_eq!((1 as $t, 2 as $t, 3 as $t), a.into());
+
+ let mut a = $vec3::ZERO;
+ a[0] = 1 as $t;
+ a[1] = 2 as $t;
+ a[2] = 3 as $t;
+ assert_eq!(1 as $t, a[0]);
+ assert_eq!(2 as $t, a[1]);
+ assert_eq!(3 as $t, a[2]);
+ assert_eq!((1 as $t, 2 as $t, 3 as $t), a.into());
+ });
+
+ glam_test!(test_dot_unsigned, {
+ let x = $new(1 as $t, 0 as $t, 0 as $t);
+ let y = $new(0 as $t, 1 as $t, 0 as $t);
+ assert_eq!(1 as $t, x.dot(x));
+ assert_eq!(0 as $t, x.dot(y));
+ });
+
+ glam_test!(test_cross, {
+ let x = $new(1 as $t, 0 as $t, 0 as $t);
+ let y = $new(0 as $t, 1 as $t, 0 as $t);
+ let z = $new(0 as $t, 0 as $t, 1 as $t);
+ assert_eq!(y, z.cross(x));
+ assert_eq!(z, x.cross(y));
+ });
+
+ glam_test!(test_ops, {
+ let a = $new(2 as $t, 4 as $t, 8 as $t);
+ assert_eq!($new(4 as $t, 8 as $t, 16 as $t), a + a);
+ assert_eq!($new(0 as $t, 0 as $t, 0 as $t), a - a);
+ assert_eq!($new(4 as $t, 16 as $t, 64 as $t), a * a);
+ assert_eq!($new(4 as $t, 8 as $t, 16 as $t), a * 2 as $t);
+ assert_eq!($new(4 as $t, 8 as $t, 16 as $t), 2 as $t * a);
+ assert_eq!($new(1 as $t, 1 as $t, 1 as $t), a / a);
+ assert_eq!($new(1 as $t, 2 as $t, 4 as $t), a / 2 as $t);
+ assert_eq!($new(4 as $t, 2 as $t, 1 as $t), 8 as $t / a);
+ assert_eq!($new(0 as $t, 0 as $t, 0 as $t), a % a);
+ assert_eq!($new(0 as $t, 1 as $t, 1 as $t), a % (a - 1 as $t));
+ assert_eq!($new(0 as $t, 0 as $t, 0 as $t), a % 1 as $t);
+ assert_eq!($new(2 as $t, 1 as $t, 2 as $t), a % 3 as $t);
+ assert_eq!($new(1 as $t, 1 as $t, 1 as $t), 17 as $t % a);
+ assert_eq!($new(2 as $t, 4 as $t, 0 as $t), a % 8 as $t);
+ });
+
+ glam_test!(test_assign_ops, {
+ let a = $new(1 as $t, 2 as $t, 3 as $t);
+ let mut b = a;
+ b += a;
+ assert_eq!($new(2 as $t, 4 as $t, 6 as $t), b);
+ b -= a;
+ assert_eq!($new(1 as $t, 2 as $t, 3 as $t), b);
+ b *= a;
+ assert_eq!($new(1 as $t, 4 as $t, 9 as $t), b);
+ b /= a;
+ assert_eq!($new(1 as $t, 2 as $t, 3 as $t), b);
+ b *= 2 as $t;
+ assert_eq!($new(2 as $t, 4 as $t, 6 as $t), b);
+ b /= 2 as $t;
+ assert_eq!($new(1 as $t, 2 as $t, 3 as $t), b);
+ b %= (b + 1 as $t);
+ assert_eq!($new(1 as $t, 2 as $t, 3 as $t), b);
+ b %= b;
+ assert_eq!($new(0 as $t, 0 as $t, 0 as $t), b);
+ });
+
+ glam_test!(test_min_max, {
+ let a = $new(3 as $t, 5 as $t, 1 as $t);
+ let b = $new(4 as $t, 2 as $t, 6 as $t);
+ assert_eq!((3 as $t, 2 as $t, 1 as $t), a.min(b).into());
+ assert_eq!((3 as $t, 2 as $t, 1 as $t), b.min(a).into());
+ assert_eq!((4 as $t, 5 as $t, 6 as $t), a.max(b).into());
+ assert_eq!((4 as $t, 5 as $t, 6 as $t), b.max(a).into());
+ });
+
+ glam_test!(test_clamp, {
+ fn vec(x: i32, y: i32, z: i32) -> $vec3 {
+ $vec3::new(x as $t, y as $t, z as $t)
+ }
+ let min = vec(1, 3, 3);
+ let max = vec(6, 8, 8);
+ assert_eq!(vec(0, 0, 0).clamp(min, max), vec(1, 3, 3));
+ assert_eq!(vec(2, 2, 2).clamp(min, max), vec(2, 3, 3));
+ assert_eq!(vec(4, 5, 5).clamp(min, max), vec(4, 5, 5));
+ assert_eq!(vec(6, 6, 6).clamp(min, max), vec(6, 6, 6));
+ assert_eq!(vec(7, 7, 7).clamp(min, max), vec(6, 7, 7));
+ assert_eq!(vec(9, 9, 9).clamp(min, max), vec(6, 8, 8));
+
+ should_glam_assert!({ $vec3::clamp($vec3::ZERO, $vec3::ONE, $vec3::ZERO) });
+ });
+
+ glam_test!(test_hmin_hmax, {
+ let a = $new(2 as $t, 3 as $t, 1 as $t);
+ assert_eq!(1 as $t, a.min_element());
+ assert_eq!(3 as $t, a.max_element());
+ });
+
+ glam_test!(test_eq, {
+ let a = $new(1 as $t, 1 as $t, 1 as $t);
+ let b = $new(1 as $t, 2 as $t, 3 as $t);
+ assert!(a.cmpeq(a).all());
+ assert!(b.cmpeq(b).all());
+ assert!(a.cmpne(b).any());
+ assert!(b.cmpne(a).any());
+ assert!(b.cmpeq(a).any());
+ });
+
+ glam_test!(test_cmp, {
+ assert!(!$mask::default().any());
+ assert!(!$mask::default().all());
+ assert_eq!($mask::default().bitmask(), 0x0);
+ let a = $new(1 as $t, 1 as $t, 1 as $t);
+ let b = $new(2 as $t, 2 as $t, 2 as $t);
+ let c = $new(1 as $t, 1 as $t, 2 as $t);
+ let d = $new(2 as $t, 1 as $t, 1 as $t);
+ assert_eq!(a.cmplt(a).bitmask(), 0x0);
+ assert_eq!(a.cmplt(b).bitmask(), 0x7);
+ assert_eq!(a.cmplt(c).bitmask(), 0x4);
+ assert_eq!(c.cmple(a).bitmask(), 0x3);
+ assert_eq!(a.cmplt(d).bitmask(), 0x1);
+ assert!(a.cmplt(b).all());
+ assert!(a.cmplt(c).any());
+ assert!(a.cmple(b).all());
+ assert!(a.cmple(a).all());
+ assert!(b.cmpgt(a).all());
+ assert!(b.cmpge(a).all());
+ assert!(b.cmpge(b).all());
+ assert!(!(a.cmpge(c).all()));
+ assert!(c.cmple(c).all());
+ assert!(c.cmpge(c).all());
+ assert!(a == a);
+ });
+
+ glam_test!(test_extend_truncate, {
+ let a = $new(1 as $t, 2 as $t, 3 as $t);
+ let b = a.extend(4 as $t);
+ assert_eq!((1 as $t, 2 as $t, 3 as $t, 4 as $t), b.into());
+ let c = $vec3::from(b.truncate());
+ assert_eq!(a, c);
+ });
+
+ glam_test!(test_mask, {
+ let mut a = $vec3::ZERO;
+ a.x = 1 as $t;
+ a.y = 1 as $t;
+ a.z = 1 as $t;
+ assert!(!a.cmpeq($vec3::ZERO).any());
+ assert!(a.cmpeq($vec3::ONE).all());
+ });
+
+ // #[test]
+ // fn test_mask_as_ref() {
+ // assert_eq!($mask::new(false, false, false).as_ref(), &[0, 0, 0]);
+ // assert_eq!($mask::new(true, false, false).as_ref(), &[!0, 0, 0]);
+ // assert_eq!($mask::new(false, true, true).as_ref(), &[0, !0, !0]);
+ // assert_eq!($mask::new(false, true, false).as_ref(), &[0, !0, 0]);
+ // assert_eq!($mask::new(true, false, true).as_ref(), &[!0, 0, !0]);
+ // assert_eq!($mask::new(true, true, true).as_ref(), &[!0, !0, !0]);
+ // }
+
+ glam_test!(test_mask_from, {
+ assert_eq!(
+ Into::<[u32; 3]>::into($mask::new(false, false, false)),
+ [0, 0, 0]
+ );
+ assert_eq!(
+ Into::<[u32; 3]>::into($mask::new(true, false, false)),
+ [!0, 0, 0]
+ );
+ assert_eq!(
+ Into::<[u32; 3]>::into($mask::new(false, true, true)),
+ [0, !0, !0]
+ );
+ assert_eq!(
+ Into::<[u32; 3]>::into($mask::new(false, true, false)),
+ [0, !0, 0]
+ );
+ assert_eq!(
+ Into::<[u32; 3]>::into($mask::new(true, false, true)),
+ [!0, 0, !0]
+ );
+ assert_eq!(
+ Into::<[u32; 3]>::into($mask::new(true, true, true)),
+ [!0, !0, !0]
+ );
+ });
+
+ glam_test!(test_mask_bitmask, {
+ assert_eq!($mask::new(false, false, false).bitmask(), 0b000);
+ assert_eq!($mask::new(true, false, false).bitmask(), 0b001);
+ assert_eq!($mask::new(false, true, true).bitmask(), 0b110);
+ assert_eq!($mask::new(false, true, false).bitmask(), 0b010);
+ assert_eq!($mask::new(true, false, true).bitmask(), 0b101);
+ assert_eq!($mask::new(true, true, true).bitmask(), 0b111);
+ });
+
+ glam_test!(test_mask_any, {
+ assert_eq!($mask::new(false, false, false).any(), false);
+ assert_eq!($mask::new(true, false, false).any(), true);
+ assert_eq!($mask::new(false, true, false).any(), true);
+ assert_eq!($mask::new(false, false, true).any(), true);
+ });
+
+ glam_test!(test_mask_all, {
+ assert_eq!($mask::new(true, true, true).all(), true);
+ assert_eq!($mask::new(false, true, true).all(), false);
+ assert_eq!($mask::new(true, false, true).all(), false);
+ assert_eq!($mask::new(true, true, false).all(), false);
+ });
+
+ glam_test!(test_mask_select, {
+ let a = $vec3::new(1 as $t, 2 as $t, 3 as $t);
+ let b = $vec3::new(4 as $t, 5 as $t, 6 as $t);
+ assert_eq!(
+ $vec3::select($mask::new(true, true, true), a, b),
+ $vec3::new(1 as $t, 2 as $t, 3 as $t),
+ );
+ assert_eq!(
+ $vec3::select($mask::new(true, false, true), a, b),
+ $vec3::new(1 as $t, 5 as $t, 3 as $t),
+ );
+ assert_eq!(
+ $vec3::select($mask::new(false, true, false), a, b),
+ $vec3::new(4 as $t, 2 as $t, 6 as $t),
+ );
+ assert_eq!(
+ $vec3::select($mask::new(false, false, false), a, b),
+ $vec3::new(4 as $t, 5 as $t, 6 as $t),
+ );
+ });
+
+ glam_test!(test_mask_and, {
+ assert_eq!(
+ ($mask::new(false, false, false) & $mask::new(false, false, false)).bitmask(),
+ 0b000,
+ );
+ assert_eq!(
+ ($mask::new(true, true, true) & $mask::new(true, true, true)).bitmask(),
+ 0b111,
+ );
+ assert_eq!(
+ ($mask::new(true, false, true) & $mask::new(false, true, false)).bitmask(),
+ 0b000,
+ );
+ assert_eq!(
+ ($mask::new(true, false, true) & $mask::new(true, true, true)).bitmask(),
+ 0b101,
+ );
+
+ let mut mask = $mask::new(true, true, false);
+ mask &= $mask::new(true, false, false);
+ assert_eq!(mask.bitmask(), 0b001);
+ });
+
+ glam_test!(test_mask_or, {
+ assert_eq!(
+ ($mask::new(false, false, false) | $mask::new(false, false, false)).bitmask(),
+ 0b000,
+ );
+ assert_eq!(
+ ($mask::new(true, true, true) | $mask::new(true, true, true)).bitmask(),
+ 0b111,
+ );
+ assert_eq!(
+ ($mask::new(true, false, true) | $mask::new(false, true, false)).bitmask(),
+ 0b111,
+ );
+ assert_eq!(
+ ($mask::new(true, false, true) | $mask::new(true, false, true)).bitmask(),
+ 0b101,
+ );
+
+ let mut mask = $mask::new(true, true, false);
+ mask |= $mask::new(true, false, false);
+ assert_eq!(mask.bitmask(), 0b011);
+ });
+
+ glam_test!(test_mask_not, {
+ assert_eq!((!$mask::new(false, false, false)).bitmask(), 0b111);
+ assert_eq!((!$mask::new(true, true, true)).bitmask(), 0b000);
+ assert_eq!((!$mask::new(true, false, true)).bitmask(), 0b010);
+ assert_eq!((!$mask::new(false, true, false)).bitmask(), 0b101);
+ });
+
+ glam_test!(test_mask_fmt, {
+ let a = $mask::new(true, false, false);
+
+ // // debug fmt
+ // assert_eq!(
+ // format!("{:?}", a),
+ // format!("{}(0xffffffff, 0x0, 0x0)", stringify!($mask))
+ // );
+
+ // display fmt
+ assert_eq!(format!("{}", a), "[true, false, false]");
+ });
+
+ glam_test!(test_mask_eq, {
+ let a = $mask::new(true, false, true);
+ let b = $mask::new(true, false, true);
+ let c = $mask::new(false, true, true);
+
+ assert_eq!(a, b);
+ assert_eq!(b, a);
+ assert_ne!(a, c);
+ assert_ne!(b, c);
+ });
+
+ glam_test!(test_mask_hash, {
+ use std::collections::hash_map::DefaultHasher;
+ use std::hash::Hash;
+ use std::hash::Hasher;
+
+ let a = $mask::new(true, false, true);
+ let b = $mask::new(true, false, true);
+ let c = $mask::new(false, true, true);
+
+ let mut hasher = DefaultHasher::new();
+ a.hash(&mut hasher);
+ let a_hashed = hasher.finish();
+
+ let mut hasher = DefaultHasher::new();
+ b.hash(&mut hasher);
+ let b_hashed = hasher.finish();
+
+ let mut hasher = DefaultHasher::new();
+ c.hash(&mut hasher);
+ let c_hashed = hasher.finish();
+
+ assert_eq!(a, b);
+ assert_eq!(a_hashed, b_hashed);
+ assert_ne!(a, c);
+ assert_ne!(a_hashed, c_hashed);
+ });
+
+ glam_test!(test_to_from_slice, {
+ let v = $vec3::new(1 as $t, 2 as $t, 3 as $t);
+ let mut a = [0 as $t, 0 as $t, 0 as $t];
+ v.write_to_slice(&mut a);
+ assert_eq!(v, $vec3::from_slice(&a));
+
+ should_panic!({ $vec3::ONE.write_to_slice(&mut [0 as $t; 2]) });
+ should_panic!({ $vec3::from_slice(&[0 as $t; 2]) });
+ });
+
+ glam_test!(test_sum, {
+ let one = $vec3::ONE;
+ assert_eq!(vec![one, one].iter().sum::<$vec3>(), one + one);
+ });
+
+ glam_test!(test_product, {
+ let two = $vec3::new(2 as $t, 2 as $t, 2 as $t);
+ assert_eq!(vec![two, two].iter().product::<$vec3>(), two * two);
+ });
+ };
+}
+
+macro_rules! impl_vec3_signed_tests {
+ ($t:ident, $const_new:ident, $new:ident, $vec3:ident, $mask:ident) => {
+ impl_vec3_tests!($t, $const_new, $new, $vec3, $mask);
+
+ glam_test!(test_neg, {
+ let a = $new(1 as $t, 2 as $t, 3 as $t);
+ assert_eq!((-1 as $t, -2 as $t, -3 as $t), (-a).into());
+ });
+
+ glam_test!(test_dot_signed, {
+ let x = $new(1 as $t, 0 as $t, 0 as $t);
+ let y = $new(0 as $t, 1 as $t, 0 as $t);
+ let z = $new(0 as $t, 0 as $t, 1 as $t);
+ assert_eq!(1 as $t, x.dot(x));
+ assert_eq!(0 as $t, x.dot(y));
+ assert_eq!(-1 as $t, z.dot(-z));
+ });
+ };
+}
+
+macro_rules! impl_vec3_eq_hash_tests {
+ ($t:ident, $new:ident) => {
+ glam_test!(test_vec3_hash, {
+ use std::collections::hash_map::DefaultHasher;
+ use std::hash::Hash;
+ use std::hash::Hasher;
+
+ let a = $new(1 as $t, 2 as $t, 3 as $t);
+ let b = $new(1 as $t, 2 as $t, 3 as $t);
+ let c = $new(3 as $t, 2 as $t, 1 as $t);
+
+ let mut hasher = DefaultHasher::new();
+ a.hash(&mut hasher);
+ let a_hashed = hasher.finish();
+
+ let mut hasher = DefaultHasher::new();
+ b.hash(&mut hasher);
+ let b_hashed = hasher.finish();
+
+ let mut hasher = DefaultHasher::new();
+ c.hash(&mut hasher);
+ let c_hashed = hasher.finish();
+
+ assert_eq!(a, b);
+ assert_eq!(a_hashed, b_hashed);
+ assert_ne!(a, c);
+ assert_ne!(a_hashed, c_hashed);
+ });
+ };
+}
+
+macro_rules! impl_vec3_float_tests {
+ ($t:ident, $const_new:ident, $new:ident, $vec3:ident, $mask:ident) => {
+ impl_vec3_signed_tests!($t, $const_new, $new, $vec3, $mask);
+ impl_vec_float_normalize_tests!($t, $vec3);
+
+ use core::$t::INFINITY;
+ use core::$t::NAN;
+ use core::$t::NEG_INFINITY;
+
+ glam_test!(test_nan, {
+ assert!($vec3::NAN.is_nan());
+ assert!(!$vec3::NAN.is_finite());
+ });
+
+ glam_test!(test_vec3_consts, {
+ assert_eq!($vec3::ZERO, $new(0 as $t, 0 as $t, 0 as $t));
+ assert_eq!($vec3::ONE, $new(1 as $t, 1 as $t, 1 as $t));
+ assert_eq!($vec3::X, $new(1 as $t, 0 as $t, 0 as $t));
+ assert_eq!($vec3::Y, $new(0 as $t, 1 as $t, 0 as $t));
+ assert_eq!($vec3::Z, $new(0 as $t, 0 as $t, 1 as $t));
+ });
+
+ glam_test!(test_funcs, {
+ let x = $new(1.0, 0.0, 0.0);
+ let y = $new(0.0, 1.0, 0.0);
+ let z = $new(0.0, 0.0, 1.0);
+ assert_eq!(y, z.cross(x));
+ assert_eq!(z, x.cross(y));
+ assert_eq!(4.0, (2.0 * x).length_squared());
+ assert_eq!(9.0, (-3.0 * y).length_squared());
+ assert_eq!(16.0, (4.0 * z).length_squared());
+ assert_eq!(2.0, (-2.0 * x).length());
+ assert_eq!(3.0, (3.0 * y).length());
+ assert_eq!(4.0, (-4.0 * z).length());
+ assert_eq!(2.0, x.distance_squared(y));
+ assert_eq!(13.0, (2.0 * x).distance_squared(-3.0 * z));
+ assert_eq!((2.0 as $t).sqrt(), x.distance(y));
+ assert_eq!(5.0, (3.0 * x).distance(-4.0 * y));
+ assert_eq!(13.0, (-5.0 * z).distance(12.0 * y));
+ assert_eq!(x, (2.0 * x).normalize());
+ assert_eq!(
+ 1.0 * 4.0 + 2.0 * 5.0 + 3.0 * 6.0,
+ $new(1.0, 2.0, 3.0).dot($new(4.0, 5.0, 6.0))
+ );
+ assert_eq!(
+ 2.0 * 2.0 + 3.0 * 3.0 + 4.0 * 4.0,
+ $new(2.0, 3.0, 4.0).length_squared()
+ );
+ assert_eq!(
+ (2.0 as $t * 2.0 + 3.0 * 3.0 + 4.0 * 4.0).sqrt(),
+ $new(2.0, 3.0, 4.0).length()
+ );
+ assert_eq!(
+ 1.0 / (2.0 as $t * 2.0 + 3.0 * 3.0 + 4.0 * 4.0).sqrt(),
+ $new(2.0, 3.0, 4.0).length_recip()
+ );
+ assert!($new(2.0, 3.0, 4.0).normalize().is_normalized());
+ assert_approx_eq!(
+ $new(2.0, 3.0, 4.0) / (2.0 as $t * 2.0 + 3.0 * 3.0 + 4.0 * 4.0).sqrt(),
+ $new(2.0, 3.0, 4.0).normalize()
+ );
+ assert_eq!($new(0.5, 0.25, 0.125), $new(2.0, 4.0, 8.0).recip());
+ });
+
+ glam_test!(test_project_reject, {
+ assert_eq!(
+ $new(0.0, 0.0, 1.0),
+ $new(1.0, 0.0, 1.0).project_onto($new(0.0, 0.0, 2.0))
+ );
+ assert_eq!(
+ $new(1.0, 0.0, 0.0),
+ $new(1.0, 0.0, 1.0).reject_from($new(0.0, 0.0, 2.0))
+ );
+ assert_eq!(
+ $new(0.0, 0.0, 1.0),
+ $new(1.0, 0.0, 1.0).project_onto_normalized($new(0.0, 0.0, 1.0))
+ );
+ assert_eq!(
+ $new(1.0, 0.0, 0.0),
+ $new(1.0, 0.0, 1.0).reject_from_normalized($new(0.0, 0.0, 1.0))
+ );
+ should_glam_assert!({ $vec3::ONE.project_onto($vec3::ZERO) });
+ should_glam_assert!({ $vec3::ONE.reject_from($vec3::ZERO) });
+ should_glam_assert!({ $vec3::ONE.project_onto_normalized($vec3::ONE) });
+ should_glam_assert!({ $vec3::ONE.reject_from_normalized($vec3::ONE) });
+ });
+
+ glam_test!(test_signum, {
+ assert_eq!($vec3::ZERO.signum(), $vec3::ONE);
+ assert_eq!(-$vec3::ZERO.signum(), -$vec3::ONE);
+ assert_eq!($vec3::ONE.signum(), $vec3::ONE);
+ assert_eq!((-$vec3::ONE).signum(), -$vec3::ONE);
+ assert_eq!($vec3::splat(INFINITY).signum(), $vec3::ONE);
+ assert_eq!($vec3::splat(NEG_INFINITY).signum(), -$vec3::ONE);
+ assert!($vec3::splat(NAN).signum().is_nan_mask().all());
+ });
+
+ glam_test!(test_abs, {
+ assert_eq!($vec3::ZERO.abs(), $vec3::ZERO);
+ assert_eq!($vec3::ONE.abs(), $vec3::ONE);
+ assert_eq!((-$vec3::ONE).abs(), $vec3::ONE);
+ });
+
+ glam_test!(test_round, {
+ assert_eq!($vec3::new(1.35, 0.0, 0.0).round().x, 1.0);
+ assert_eq!($vec3::new(0.0, 1.5, 0.0).round().y, 2.0);
+ assert_eq!($vec3::new(0.0, 0.0, -15.5).round().z, -16.0);
+ assert_eq!($vec3::new(0.0, 0.0, 0.0).round().z, 0.0);
+ assert_eq!($vec3::new(0.0, 21.1, 0.0).round().y, 21.0);
+ assert_eq!($vec3::new(0.0, 11.123, 0.0).round().y, 11.0);
+ assert_eq!($vec3::new(0.0, 11.499, 0.0).round().y, 11.0);
+ assert_eq!(
+ $vec3::new(NEG_INFINITY, INFINITY, 0.0).round(),
+ $vec3::new(NEG_INFINITY, INFINITY, 0.0)
+ );
+ assert!($vec3::new(NAN, 0.0, 0.0).round().x.is_nan());
+ });
+
+ glam_test!(test_floor, {
+ assert_eq!(
+ $vec3::new(1.35, 1.5, -1.5).floor(),
+ $vec3::new(1.0, 1.0, -2.0)
+ );
+ assert_eq!(
+ $vec3::new(INFINITY, NEG_INFINITY, 0.0).floor(),
+ $vec3::new(INFINITY, NEG_INFINITY, 0.0)
+ );
+ assert!($vec3::new(NAN, 0.0, 0.0).floor().x.is_nan());
+ assert_eq!(
+ $vec3::new(-2000000.123, 10000000.123, 1000.9).floor(),
+ $vec3::new(-2000001.0, 10000000.0, 1000.0)
+ );
+ });
+
+ glam_test!(test_fract, {
+ assert_approx_eq!(
+ $vec3::new(1.35, 1.5, -1.5).fract(),
+ $vec3::new(0.35, 0.5, 0.5)
+ );
+ assert_approx_eq!(
+ $vec3::new(-200000.123, 1000000.123, 1000.9).fract(),
+ $vec3::new(0.877, 0.123, 0.9),
+ 0.002
+ );
+ });
+
+ glam_test!(test_ceil, {
+ assert_eq!(
+ $vec3::new(1.35, 1.5, -1.5).ceil(),
+ $vec3::new(2.0, 2.0, -1.0)
+ );
+ assert_eq!(
+ $vec3::new(INFINITY, NEG_INFINITY, 0.0).ceil(),
+ $vec3::new(INFINITY, NEG_INFINITY, 0.0)
+ );
+ assert!($vec3::new(NAN, 0.0, 0.0).ceil().x.is_nan());
+ assert_eq!(
+ $vec3::new(-2000000.123, 1000000.123, 1000.9).ceil(),
+ $vec3::new(-2000000.0, 1000001.0, 1001.0)
+ );
+ });
+
+ glam_test!(test_lerp, {
+ let v0 = $vec3::new(-1.0, -1.0, -1.0);
+ let v1 = $vec3::new(1.0, 1.0, 1.0);
+ assert_approx_eq!(v0, v0.lerp(v1, 0.0));
+ assert_approx_eq!(v1, v0.lerp(v1, 1.0));
+ assert_approx_eq!($vec3::ZERO, v0.lerp(v1, 0.5));
+ });
+
+ glam_test!(test_is_finite, {
+ assert!($vec3::new(0.0, 0.0, 0.0).is_finite());
+ assert!($vec3::new(-1e-10, 1.0, 1e10).is_finite());
+ assert!(!$vec3::new(INFINITY, 0.0, 0.0).is_finite());
+ assert!(!$vec3::new(0.0, NAN, 0.0).is_finite());
+ assert!(!$vec3::new(0.0, 0.0, NEG_INFINITY).is_finite());
+ assert!(!$vec3::splat(NAN).is_finite());
+ });
+
+ glam_test!(test_powf, {
+ assert_eq!(
+ $vec3::new(2.0, 4.0, 8.0).powf(2.0),
+ $vec3::new(4.0, 16.0, 64.0)
+ );
+ });
+
+ glam_test!(test_exp, {
+ assert_eq!(
+ $vec3::new(1.0, 2.0, 3.0).exp(),
+ $vec3::new((1.0 as $t).exp(), (2.0 as $t).exp(), (3.0 as $t).exp())
+ );
+ });
+
+ glam_test!(test_angle_between, {
+ let angle = $vec3::new(1.0, 0.0, 1.0).angle_between($vec3::new(1.0, 1.0, 0.0));
+ assert_approx_eq!(core::$t::consts::FRAC_PI_3, angle, 1e-6);
+
+ let angle = $vec3::new(10.0, 0.0, 10.0).angle_between($vec3::new(5.0, 5.0, 0.0));
+ assert_approx_eq!(core::$t::consts::FRAC_PI_3, angle, 1e-6);
+
+ let angle = $vec3::new(-1.0, 0.0, -1.0).angle_between($vec3::new(1.0, -1.0, 0.0));
+ assert_approx_eq!(2.0 * core::$t::consts::FRAC_PI_3, angle, 1e-6);
+ });
+
+ glam_test!(test_clamp_length, {
+ // Too long gets shortened
+ assert_eq!(
+ $vec3::new(12.0, 16.0, 0.0).clamp_length(7.0, 10.0),
+ $vec3::new(6.0, 8.0, 0.0) // shortened to length 10.0
+ );
+ // In the middle is unchanged
+ assert_eq!(
+ $vec3::new(2.0, 1.0, 0.0).clamp_length(0.5, 5.0),
+ $vec3::new(2.0, 1.0, 0.0) // unchanged
+ );
+ // Too short gets lengthened
+ assert_eq!(
+ $vec3::new(0.6, 0.8, 0.0).clamp_length(10.0, 20.0),
+ $vec3::new(6.0, 8.0, 0.0) // lengthened to length 10.0
+ );
+ should_glam_assert!({ $vec3::ONE.clamp_length(1.0, 0.0) });
+ });
+
+ glam_test!(test_clamp_length_max, {
+ // Too long gets shortened
+ assert_eq!(
+ $vec3::new(12.0, 16.0, 0.0).clamp_length_max(10.0),
+ $vec3::new(6.0, 8.0, 0.0) // shortened to length 10.0
+ );
+ // Not too long is unchanged
+ assert_eq!(
+ $vec3::new(2.0, 1.0, 0.0).clamp_length_max(5.0),
+ $vec3::new(2.0, 1.0, 0.0) // unchanged
+ );
+ });
+
+ glam_test!(test_clamp_length_min, {
+ // Not too short is unchanged
+ assert_eq!(
+ $vec3::new(2.0, 1.0, 0.0).clamp_length_min(0.5),
+ $vec3::new(2.0, 1.0, 0.0) // unchanged
+ );
+ // Too short gets lengthened
+ assert_eq!(
+ $vec3::new(0.6, 0.8, 0.0).clamp_length_min(10.0),
+ $vec3::new(6.0, 8.0, 0.0) // lengthened to length 10.0
+ );
+ });
+
+ glam_test!(test_any_ortho, {
+ let eps = 2.0 * core::$t::EPSILON;
+
+ for &v in &vec3_float_test_vectors!($vec3) {
+ let orthogonal = v.any_orthogonal_vector();
+ assert!(orthogonal != $vec3::ZERO && orthogonal.is_finite());
+ assert!(v.dot(orthogonal).abs() < eps);
+
+ let n = v.normalize();
+
+ let orthonormal = n.any_orthonormal_vector();
+ assert!(orthonormal.is_normalized());
+ assert!(n.dot(orthonormal).abs() < eps);
+
+ let (a, b) = n.any_orthonormal_pair();
+ assert!(a.is_normalized() && n.dot(a).abs() < eps);
+ assert!(b.is_normalized() && n.dot(b).abs() < eps);
+ }
+ });
+
+ glam_test!(test_mul_add, {
+ assert_eq!(
+ $vec3::new(1.0, 1.0, 1.0)
+ .mul_add($vec3::new(0.5, 2.0, -4.0), $vec3::new(-1.0, -1.0, -1.0)),
+ $vec3::new(-0.5, 1.0, -5.0)
+ );
+ });
+ };
+}
+
+macro_rules! impl_vec3_scalar_shift_op_test {
+ ($vec3:ident, $t_min:literal, $t_max:literal, $rhs_min:literal, $rhs_max:literal) => {
+ glam_test!(test_vec3_scalar_shift_ops, {
+ for x in $t_min..$t_max {
+ for y in $t_min..$t_max {
+ for z in $t_min..$t_max {
+ for rhs in $rhs_min..$rhs_max {
+ assert_eq!(
+ $vec3::new(x, y, z) << rhs,
+ $vec3::new(x << rhs, y << rhs, z << rhs)
+ );
+ assert_eq!(
+ $vec3::new(x, y, z) >> rhs,
+ $vec3::new(x >> rhs, y >> rhs, z >> rhs)
+ );
+ }
+ }
+ }
+ }
+ });
+ };
+}
+
+macro_rules! impl_vec3_scalar_shift_op_tests {
+ ($vec3:ident, $t_min:literal, $t_max:literal) => {
+ mod shift_by_i8 {
+ use glam::$vec3;
+ impl_vec3_scalar_shift_op_test!($vec3, $t_min, $t_max, 0i8, 2);
+ }
+ mod shift_by_i16 {
+ use glam::$vec3;
+ impl_vec3_scalar_shift_op_test!($vec3, $t_min, $t_max, 0i16, 2);
+ }
+ mod shift_by_i32 {
+ use glam::$vec3;
+ impl_vec3_scalar_shift_op_test!($vec3, $t_min, $t_max, 0i32, 2);
+ }
+ mod shift_by_u8 {
+ use glam::$vec3;
+ impl_vec3_scalar_shift_op_test!($vec3, $t_min, $t_max, 0u8, 2);
+ }
+ mod shift_by_u16 {
+ use glam::$vec3;
+ impl_vec3_scalar_shift_op_test!($vec3, $t_min, $t_max, 0u16, 2);
+ }
+ mod shift_by_u32 {
+ use glam::$vec3;
+ impl_vec3_scalar_shift_op_test!($vec3, $t_min, $t_max, 0u32, 2);
+ }
+ };
+}
+
+macro_rules! impl_vec3_shift_op_test {
+ ($vec3:ident, $rhs:ident, $t_min:literal, $t_max:literal) => {
+ glam_test!(test_vec3_shift_ops, {
+ for x1 in $t_min..$t_max {
+ for y1 in $t_min..$t_max {
+ for z1 in $t_min..$t_max {
+ for x2 in $t_min..$t_max {
+ for y2 in $t_min..$t_max {
+ for z2 in $t_min..$t_max {
+ assert_eq!(
+ $vec3::new(x1, y1, z1) << $rhs::new(x2, y2, z2),
+ $vec3::new(x1 << x2, y1 << y2, z1 << z2)
+ );
+ assert_eq!(
+ $vec3::new(x1, y1, z1) >> $rhs::new(x2, y2, z2),
+ $vec3::new(x1 >> x2, y1 >> y2, z1 >> z2)
+ );
+ }
+ }
+ }
+ }
+ }
+ }
+ });
+ };
+}
+
+macro_rules! impl_vec3_shift_op_tests {
+ ($vec3:ident) => {
+ mod shift_ivec3_by_ivec3 {
+ use super::*;
+ impl_vec3_shift_op_test!($vec3, IVec3, 0, 2);
+ }
+ mod shift_ivec3_by_uvec3 {
+ use super::*;
+ impl_vec3_shift_op_test!($vec3, UVec3, 0, 2);
+ }
+ };
+}
+
+macro_rules! impl_vec3_scalar_bit_op_tests {
+ ($vec3:ident, $t_min:literal, $t_max:literal) => {
+ glam_test!(test_vec3_scalar_bit_ops, {
+ for x in $t_min..$t_max {
+ for y in $t_min..$t_max {
+ for z in $t_min..$t_max {
+ for rhs in $t_min..$t_max {
+ assert_eq!(
+ $vec3::new(x, y, z) & rhs,
+ $vec3::new(x & rhs, y & rhs, z & rhs)
+ );
+ assert_eq!(
+ $vec3::new(x, y, z) | rhs,
+ $vec3::new(x | rhs, y | rhs, z | rhs)
+ );
+ assert_eq!(
+ $vec3::new(x, y, z) ^ rhs,
+ $vec3::new(x ^ rhs, y ^ rhs, z ^ rhs)
+ );
+ }
+ }
+ }
+ }
+ });
+ };
+}
+
+macro_rules! impl_vec3_bit_op_tests {
+ ($vec3:ident, $t_min:literal, $t_max:literal) => {
+ glam_test!(test_vec3_bit_ops, {
+ for x1 in $t_min..$t_max {
+ for y1 in $t_min..$t_max {
+ for z1 in $t_min..$t_max {
+ assert_eq!(!$vec3::new(x1, y1, z1), $vec3::new(!x1, !y1, !z1));
+
+ for x2 in $t_min..$t_max {
+ for y2 in $t_min..$t_max {
+ for z2 in $t_min..$t_max {
+ assert_eq!(
+ $vec3::new(x1, y1, z1) & $vec3::new(x2, y2, z2),
+ $vec3::new(x1 & x2, y1 & y2, z1 & z2)
+ );
+ assert_eq!(
+ $vec3::new(x1, y1, z1) | $vec3::new(x2, y2, z2),
+ $vec3::new(x1 | x2, y1 | y2, z1 | z2)
+ );
+ assert_eq!(
+ $vec3::new(x1, y1, z1) ^ $vec3::new(x2, y2, z2),
+ $vec3::new(x1 ^ x2, y1 ^ y2, z1 ^ z2)
+ );
+ }
+ }
+ }
+ }
+ }
+ }
+ });
+ };
+}
+
+mod vec3 {
+ use glam::{const_vec3, vec3, BVec3, Vec3};
+
+ glam_test!(test_align, {
+ use std::mem;
+ assert_eq!(12, mem::size_of::<Vec3>());
+ assert_eq!(4, mem::align_of::<Vec3>());
+ assert_eq!(3, mem::size_of::<BVec3>());
+ assert_eq!(1, mem::align_of::<BVec3>());
+ });
+
+ glam_test!(test_as, {
+ use glam::{DVec3, IVec3, UVec3, Vec3A};
+ assert_eq!(
+ DVec3::new(-1.0, -2.0, -3.0),
+ Vec3::new(-1.0, -2.0, -3.0).as_dvec3()
+ );
+ assert_eq!(
+ IVec3::new(-1, -2, -3),
+ Vec3::new(-1.0, -2.0, -3.0).as_ivec3()
+ );
+ assert_eq!(UVec3::new(1, 2, 3), Vec3::new(1.0, 2.0, 3.0).as_uvec3());
+
+ assert_eq!(
+ DVec3::new(-1.0, -2.0, -3.0),
+ Vec3A::new(-1.0, -2.0, -3.0).as_dvec3()
+ );
+ assert_eq!(
+ IVec3::new(-1, -2, -3),
+ Vec3A::new(-1.0, -2.0, -3.0).as_ivec3()
+ );
+ assert_eq!(UVec3::new(1, 2, 3), Vec3A::new(1.0, 2.0, 3.0).as_uvec3());
+
+ assert_eq!(
+ IVec3::new(-1, -2, -3),
+ DVec3::new(-1.0, -2.0, -3.0).as_ivec3()
+ );
+ assert_eq!(UVec3::new(1, 2, 3), DVec3::new(1.0, 2.0, 3.0).as_uvec3());
+ assert_eq!(
+ Vec3::new(-1.0, -2.0, -3.0),
+ DVec3::new(-1.0, -2.0, -3.0).as_vec3()
+ );
+ assert_eq!(
+ Vec3A::new(-1.0, -2.0, -3.0),
+ DVec3::new(-1.0, -2.0, -3.0).as_vec3a()
+ );
+
+ assert_eq!(
+ DVec3::new(-1.0, -2.0, -3.0),
+ IVec3::new(-1, -2, -3).as_dvec3()
+ );
+ assert_eq!(UVec3::new(1, 2, 3), IVec3::new(1, 2, 3).as_uvec3());
+ assert_eq!(
+ Vec3::new(-1.0, -2.0, -3.0),
+ IVec3::new(-1, -2, -3).as_vec3()
+ );
+ assert_eq!(
+ Vec3A::new(-1.0, -2.0, -3.0),
+ IVec3::new(-1, -2, -3).as_vec3a()
+ );
+
+ assert_eq!(DVec3::new(1.0, 2.0, 3.0), UVec3::new(1, 2, 3).as_dvec3());
+ assert_eq!(IVec3::new(1, 2, 3), UVec3::new(1, 2, 3).as_ivec3());
+ assert_eq!(Vec3::new(1.0, 2.0, 3.0), UVec3::new(1, 2, 3).as_vec3());
+ assert_eq!(Vec3A::new(1.0, 2.0, 3.0), UVec3::new(1, 2, 3).as_vec3a());
+ });
+
+ impl_vec3_float_tests!(f32, const_vec3, vec3, Vec3, BVec3);
+}
+
+mod vec3a {
+ use glam::{const_vec3a, vec3a, BVec3A, Vec3A, Vec4};
+
+ glam_test!(test_align, {
+ use std::mem;
+ assert_eq!(16, mem::size_of::<Vec3A>());
+ assert_eq!(16, mem::align_of::<Vec3A>());
+ if cfg!(all(
+ any(target_feature = "sse2", target_feature = "simd128"),
+ not(feature = "scalar-math")
+ )) {
+ assert_eq!(16, mem::size_of::<BVec3A>());
+ assert_eq!(16, mem::align_of::<BVec3A>());
+ } else {
+ // BVec3A aliases BVec3
+ assert_eq!(3, mem::size_of::<BVec3A>());
+ assert_eq!(1, mem::align_of::<BVec3A>());
+ }
+ });
+
+ glam_test!(test_mask_align16, {
+ // make sure the unused 'w' value doesn't break Vec3Ab behaviour
+ let a = Vec4::ZERO;
+ let mut b = Vec3A::from(a);
+ b.x = 1.0;
+ b.y = 1.0;
+ b.z = 1.0;
+ assert!(!b.cmpeq(Vec3A::ZERO).any());
+ assert!(b.cmpeq(Vec3A::splat(1.0)).all());
+ });
+
+ #[cfg(all(target_feature = "sse2", not(feature = "scalar-math")))]
+ #[test]
+ fn test_m128() {
+ #[cfg(target_arch = "x86")]
+ use core::arch::x86::*;
+ #[cfg(target_arch = "x86_64")]
+ use core::arch::x86_64::*;
+
+ #[repr(C, align(16))]
+ struct F32x3_A16([f32; 3]);
+
+ let v0 = Vec3A::new(1.0, 2.0, 3.0);
+ let m0: __m128 = v0.into();
+ let mut a0 = F32x3_A16([0.0, 0.0, 0.0]);
+ unsafe {
+ _mm_store_ps(a0.0.as_mut_ptr(), m0);
+ }
+ assert_eq!([1.0, 2.0, 3.0], a0.0);
+ let v1 = Vec3A::from(m0);
+ assert_eq!(v0, v1);
+
+ #[repr(C, align(16))]
+ struct U32x3_A16([u32; 3]);
+
+ let v0 = BVec3A::new(true, false, true);
+ let m0: __m128 = v0.into();
+ let mut a0 = U32x3_A16([1, 2, 3]);
+ unsafe {
+ _mm_store_ps(a0.0.as_mut_ptr() as *mut f32, m0);
+ }
+ assert_eq!([0xffffffff, 0, 0xffffffff], a0.0);
+ }
+
+ glam_test!(test_min_max_from_vec4, {
+ // checks that the 4th element is unused.
+ let v1 = Vec3A::from(Vec4::new(1.0, 2.0, 3.0, 4.0));
+ assert_eq!(v1.max_element(), 3.0);
+ let v2 = Vec3A::from(Vec4::new(4.0, 3.0, 2.0, 1.0));
+ assert_eq!(v2.min_element(), 2.0);
+ });
+
+ impl_vec3_float_tests!(f32, const_vec3a, vec3a, Vec3A, BVec3A);
+}
+
+mod dvec3 {
+ use glam::{const_dvec3, dvec3, BVec3, DVec3};
+
+ glam_test!(test_align, {
+ use std::mem;
+ assert_eq!(24, mem::size_of::<DVec3>());
+ assert_eq!(mem::align_of::<f64>(), mem::align_of::<DVec3>());
+ assert_eq!(3, mem::size_of::<BVec3>());
+ assert_eq!(1, mem::align_of::<BVec3>());
+ });
+
+ impl_vec3_float_tests!(f64, const_dvec3, dvec3, DVec3, BVec3);
+}
+
+mod ivec3 {
+ use glam::{const_ivec3, ivec3, BVec3, IVec3, UVec3};
+
+ glam_test!(test_align, {
+ use std::mem;
+ assert_eq!(12, mem::size_of::<IVec3>());
+ assert_eq!(4, mem::align_of::<IVec3>());
+ assert_eq!(3, mem::size_of::<BVec3>());
+ assert_eq!(1, mem::align_of::<BVec3>());
+ });
+
+ impl_vec3_signed_tests!(i32, const_ivec3, ivec3, IVec3, BVec3);
+ impl_vec3_eq_hash_tests!(i32, ivec3);
+
+ impl_vec3_scalar_shift_op_tests!(IVec3, -2, 2);
+ impl_vec3_shift_op_tests!(IVec3);
+
+ impl_vec3_scalar_bit_op_tests!(IVec3, -2, 2);
+ impl_vec3_bit_op_tests!(IVec3, -2, 2);
+}
+
+mod uvec3 {
+ use glam::{const_uvec3, uvec3, BVec3, IVec3, UVec3};
+
+ glam_test!(test_align, {
+ use std::mem;
+ assert_eq!(12, mem::size_of::<UVec3>());
+ assert_eq!(4, mem::align_of::<UVec3>());
+ assert_eq!(3, mem::size_of::<BVec3>());
+ assert_eq!(1, mem::align_of::<BVec3>());
+ });
+
+ impl_vec3_tests!(u32, const_uvec3, uvec3, UVec3, BVec3);
+ impl_vec3_eq_hash_tests!(u32, uvec3);
+
+ impl_vec3_scalar_shift_op_tests!(UVec3, 0, 2);
+ impl_vec3_shift_op_tests!(UVec3);
+
+ impl_vec3_scalar_bit_op_tests!(UVec3, 0, 2);
+ impl_vec3_bit_op_tests!(UVec3, 0, 2);
+}
diff --git a/tests/vec4.rs b/tests/vec4.rs
new file mode 100644
index 0000000..2683348
--- /dev/null
+++ b/tests/vec4.rs
@@ -0,0 +1,1163 @@
+#[macro_use]
+mod support;
+
+macro_rules! impl_vec4_tests {
+ ($t:ident, $const_new:ident, $new:ident, $vec4:ident, $vec3:ident, $vec2:ident, $mask:ident) => {
+ glam_test!(test_const, {
+ const V: $vec4 = $const_new!([1 as $t, 2 as $t, 3 as $t, 4 as $t]);
+ assert_eq!($vec4::new(1 as $t, 2 as $t, 3 as $t, 4 as $t), V);
+ });
+
+ glam_test!(test_vec4_consts, {
+ assert_eq!($vec4::ZERO, $new(0 as $t, 0 as $t, 0 as $t, 0 as $t));
+ assert_eq!($vec4::ONE, $new(1 as $t, 1 as $t, 1 as $t, 1 as $t));
+ assert_eq!($vec4::X, $new(1 as $t, 0 as $t, 0 as $t, 0 as $t));
+ assert_eq!($vec4::Y, $new(0 as $t, 1 as $t, 0 as $t, 0 as $t));
+ assert_eq!($vec4::Z, $new(0 as $t, 0 as $t, 1 as $t, 0 as $t));
+ assert_eq!($vec4::W, $new(0 as $t, 0 as $t, 0 as $t, 1 as $t));
+ });
+
+ glam_test!(test_new, {
+ let v = $new(1 as $t, 2 as $t, 3 as $t, 4 as $t);
+
+ assert_eq!(v.x, 1 as $t);
+ assert_eq!(v.y, 2 as $t);
+ assert_eq!(v.z, 3 as $t);
+ assert_eq!(v.w, 4 as $t);
+
+ let t = (1 as $t, 2 as $t, 3 as $t, 4 as $t);
+ let v = $vec4::from(t);
+ assert_eq!(t, v.into());
+
+ let a = [1 as $t, 2 as $t, 3 as $t, 4 as $t];
+ let v = $vec4::from(a);
+ let a1: [$t; 4] = v.into();
+ assert_eq!(a, a1);
+
+ let v = $vec4::new(t.0, t.1, t.2, t.3);
+ assert_eq!(t, v.into());
+
+ assert_eq!($vec4::new(1 as $t, 0 as $t, 0 as $t, 0 as $t), $vec4::X);
+ assert_eq!($vec4::new(0 as $t, 1 as $t, 0 as $t, 0 as $t), $vec4::Y);
+ assert_eq!($vec4::new(0 as $t, 0 as $t, 1 as $t, 0 as $t), $vec4::Z);
+ assert_eq!($vec4::new(0 as $t, 0 as $t, 0 as $t, 1 as $t), $vec4::W);
+
+ assert_eq!(
+ v,
+ $vec4::from(($vec3::new(1 as $t, 2 as $t, 3 as $t), 4 as $t))
+ );
+
+ assert_eq!(
+ v,
+ $vec4::from((1 as $t, $vec3::new(2 as $t, 3 as $t, 4 as $t)))
+ );
+
+ assert_eq!(
+ v,
+ $vec4::from(($vec2::new(1 as $t, 2 as $t), 3 as $t, 4 as $t))
+ );
+ assert_eq!(
+ v,
+ $vec4::from(($vec2::new(1 as $t, 2 as $t), $vec2::new(3 as $t, 4 as $t)))
+ );
+ });
+
+ glam_test!(test_fmt, {
+ let a = $vec4::new(1 as $t, 2 as $t, 3 as $t, 4 as $t);
+ assert_eq!(
+ format!("{:?}", a),
+ format!(
+ "{}({:?}, {:?}, {:?}, {:?})",
+ stringify!($vec4),
+ a.x,
+ a.y,
+ a.z,
+ a.w
+ )
+ );
+ // assert_eq!(
+ // format!("{:#?}", a),
+ // "$vec4(\n 1.0,\n 2.0,\n 3.0,\n 4.0\n)"
+ // );
+ assert_eq!(format!("{}", a), "[1, 2, 3, 4]");
+ });
+
+ glam_test!(test_zero, {
+ let v = $vec4::ZERO;
+ assert_eq!((0 as $t, 0 as $t, 0 as $t, 0 as $t), v.into());
+ assert_eq!(v, $vec4::default());
+ });
+
+ glam_test!(test_splat, {
+ let v = $vec4::splat(1 as $t);
+ assert_eq!($vec4::ONE, v);
+ });
+
+ glam_test!(test_accessors, {
+ let mut a = $vec4::ZERO;
+ a.x = 1 as $t;
+ a.y = 2 as $t;
+ a.z = 3 as $t;
+ a.w = 4 as $t;
+ assert_eq!(1 as $t, a.x);
+ assert_eq!(2 as $t, a.y);
+ assert_eq!(3 as $t, a.z);
+ assert_eq!(4 as $t, a.w);
+ assert_eq!((1 as $t, 2 as $t, 3 as $t, 4 as $t), a.into());
+
+ let mut a = $vec4::ZERO;
+ a[0] = 1 as $t;
+ a[1] = 2 as $t;
+ a[2] = 3 as $t;
+ a[3] = 4 as $t;
+ assert_eq!(1 as $t, a[0]);
+ assert_eq!(2 as $t, a[1]);
+ assert_eq!(3 as $t, a[2]);
+ assert_eq!(4 as $t, a[3]);
+ assert_eq!((1 as $t, 2 as $t, 3 as $t, 4 as $t), a.into());
+ });
+
+ glam_test!(test_dot_unsigned, {
+ let x = $new(1 as $t, 0 as $t, 0 as $t, 0 as $t);
+ let y = $new(0 as $t, 1 as $t, 0 as $t, 0 as $t);
+ let z = $new(0 as $t, 0 as $t, 1 as $t, 0 as $t);
+ let w = $new(0 as $t, 0 as $t, 0 as $t, 1 as $t);
+ assert_eq!(1 as $t, x.dot(x));
+ assert_eq!(0 as $t, x.dot(y));
+ assert_eq!(0 as $t, y.dot(z));
+ assert_eq!(0 as $t, z.dot(w));
+ });
+
+ glam_test!(test_ops, {
+ let a = $new(2 as $t, 4 as $t, 8 as $t, 16 as $t);
+ assert_eq!($new(4 as $t, 8 as $t, 16 as $t, 32 as $t), a + a);
+ assert_eq!($new(0 as $t, 0 as $t, 0 as $t, 0 as $t), a - a);
+ assert_eq!($new(4 as $t, 16 as $t, 64 as $t, 256 as $t), a * a);
+ assert_eq!($new(4 as $t, 8 as $t, 16 as $t, 32 as $t), a * 2 as $t);
+ assert_eq!($new(4 as $t, 8 as $t, 16 as $t, 32 as $t), 2 as $t * a);
+ assert_eq!($new(1 as $t, 1 as $t, 1 as $t, 1 as $t), a / a);
+ assert_eq!($new(1 as $t, 2 as $t, 4 as $t, 8 as $t), a / 2 as $t);
+ assert_eq!($new(8 as $t, 4 as $t, 2 as $t, 1 as $t), 16 as $t / a);
+ assert_eq!($new(0 as $t, 0 as $t, 0 as $t, 0 as $t), a % a);
+ assert_eq!($new(0 as $t, 1 as $t, 1 as $t, 1 as $t), a % (a - 1 as $t));
+ assert_eq!($new(0 as $t, 0 as $t, 0 as $t, 0 as $t), a % 1 as $t);
+ assert_eq!($new(2 as $t, 1 as $t, 2 as $t, 1 as $t), a % 3 as $t);
+ assert_eq!($new(1 as $t, 1 as $t, 1 as $t, 1 as $t), 17 as $t % a);
+ assert_eq!($new(2 as $t, 4 as $t, 0 as $t, 0 as $t), a % 8 as $t);
+ });
+
+ glam_test!(test_assign_ops, {
+ let a = $new(1 as $t, 2 as $t, 3 as $t, 4 as $t);
+ let mut b = a;
+ b += a;
+ assert_eq!($new(2 as $t, 4 as $t, 6 as $t, 8 as $t), b);
+ b -= a;
+ assert_eq!($new(1 as $t, 2 as $t, 3 as $t, 4 as $t), b);
+ b *= a;
+ assert_eq!($new(1 as $t, 4 as $t, 9 as $t, 16 as $t), b);
+ b /= a;
+ assert_eq!($new(1 as $t, 2 as $t, 3 as $t, 4 as $t), b);
+ b *= 2 as $t;
+ assert_eq!($new(2 as $t, 4 as $t, 6 as $t, 8 as $t), b);
+ b /= 2 as $t;
+ assert_eq!($new(1 as $t, 2 as $t, 3 as $t, 4 as $t), b);
+ b %= (b + 1 as $t);
+ assert_eq!($new(1 as $t, 2 as $t, 3 as $t, 4 as $t), b);
+ b %= b;
+ assert_eq!($new(0 as $t, 0 as $t, 0 as $t, 0 as $t), b);
+ });
+
+ glam_test!(test_min_max, {
+ let a = $new(4 as $t, 6 as $t, 2 as $t, 8 as $t);
+ let b = $new(5 as $t, 3 as $t, 7 as $t, 1 as $t);
+ assert_eq!((4 as $t, 3 as $t, 2 as $t, 1 as $t), a.min(b).into());
+ assert_eq!((4 as $t, 3 as $t, 2 as $t, 1 as $t), b.min(a).into());
+ assert_eq!((5 as $t, 6 as $t, 7 as $t, 8 as $t), a.max(b).into());
+ assert_eq!((5 as $t, 6 as $t, 7 as $t, 8 as $t), b.max(a).into());
+ });
+
+ glam_test!(test_clamp, {
+ fn vec(x: i32, y: i32, z: i32, w: i32) -> $vec4 {
+ $vec4::new(x as $t, y as $t, z as $t, w as $t)
+ }
+ let min = vec(1, 1, 3, 3);
+ let max = vec(6, 6, 8, 8);
+ assert_eq!(vec(0, 0, 0, 0).clamp(min, max), vec(1, 1, 3, 3));
+ assert_eq!(vec(2, 2, 2, 2).clamp(min, max), vec(2, 2, 3, 3));
+ assert_eq!(vec(4, 4, 5, 5).clamp(min, max), vec(4, 4, 5, 5));
+ assert_eq!(vec(6, 6, 6, 6).clamp(min, max), vec(6, 6, 6, 6));
+ assert_eq!(vec(7, 7, 7, 7).clamp(min, max), vec(6, 6, 7, 7));
+ assert_eq!(vec(9, 9, 9, 9).clamp(min, max), vec(6, 6, 8, 8));
+
+ should_glam_assert!({ $vec4::clamp($vec4::ZERO, $vec4::ONE, $vec4::ZERO) });
+ });
+
+ glam_test!(test_hmin_hmax, {
+ let a = $new(3 as $t, 4 as $t, 1 as $t, 2 as $t);
+ assert_eq!(1 as $t, a.min_element());
+ assert_eq!(4 as $t, a.max_element());
+ assert_eq!(
+ 3 as $t,
+ $new(1 as $t, 2 as $t, 3 as $t, 4 as $t)
+ .truncate()
+ .max_element()
+ );
+ assert_eq!(
+ 2 as $t,
+ $new(4 as $t, 3 as $t, 2 as $t, 1 as $t)
+ .truncate()
+ .min_element()
+ );
+ });
+
+ glam_test!(test_eq, {
+ let a = $new(1 as $t, 1 as $t, 1 as $t, 1 as $t);
+ let b = $new(1 as $t, 2 as $t, 3 as $t, 4 as $t);
+ assert!(a.cmpeq(a).all());
+ assert!(b.cmpeq(b).all());
+ assert!(a.cmpne(b).any());
+ assert!(b.cmpne(a).any());
+ assert!(b.cmpeq(a).any());
+ });
+
+ glam_test!(test_cmp, {
+ assert!(!$mask::default().any());
+ assert!(!$mask::default().all());
+ assert_eq!($mask::default().bitmask(), 0x0);
+ let a = $new(1 as $t, 1 as $t, 1 as $t, 1 as $t);
+ let b = $new(2 as $t, 2 as $t, 2 as $t, 2 as $t);
+ let c = $new(1 as $t, 1 as $t, 2 as $t, 2 as $t);
+ let d = $new(2 as $t, 1 as $t, 1 as $t, 2 as $t);
+ assert_eq!(a.cmplt(a).bitmask(), 0x0);
+ assert_eq!(a.cmplt(b).bitmask(), 0xf);
+ assert_eq!(a.cmplt(c).bitmask(), 0xc);
+ assert_eq!(c.cmple(a).bitmask(), 0x3);
+ assert_eq!(a.cmplt(d).bitmask(), 0x9);
+ assert!(a.cmplt(b).all());
+ assert!(a.cmplt(c).any());
+ assert!(a.cmple(b).all());
+ assert!(a.cmple(a).all());
+ assert!(b.cmpgt(a).all());
+ assert!(b.cmpge(a).all());
+ assert!(b.cmpge(b).all());
+ assert!(!(a.cmpge(c).all()));
+ assert!(c.cmple(c).all());
+ assert!(c.cmpge(c).all());
+ assert!(a.cmpeq(a).all());
+ assert!(!a.cmpeq(b).all());
+ assert!(a.cmpeq(c).any());
+ assert!(!a.cmpne(a).all());
+ assert!(a.cmpne(b).all());
+ assert!(a.cmpne(c).any());
+ assert!(a == a);
+ });
+
+ glam_test!(test_slice, {
+ let a = [1 as $t, 2 as $t, 3 as $t, 4 as $t];
+ let b = $vec4::from_slice(&a);
+ let c: [$t; 4] = b.into();
+ assert_eq!(a, c);
+ let mut d = [0 as $t, 0 as $t, 0 as $t, 0 as $t];
+ b.write_to_slice(&mut d[..]);
+ assert_eq!(a, d);
+
+ should_panic!({ $vec4::ONE.write_to_slice(&mut [0 as $t; 3]) });
+ should_panic!({ $vec4::from_slice(&[0 as $t; 3]) });
+ });
+
+ // #[test]
+ // fn test_mask_as_ref() {
+ // assert_eq!(
+ // $mask::new(false, false, false, false).as_ref(),
+ // &[0, 0, 0, 0]
+ // );
+ // assert_eq!(
+ // $mask::new(false, false, true, true).as_ref(),
+ // &[0, 0, !0, !0]
+ // );
+ // assert_eq!(
+ // $mask::new(true, true, false, false).as_ref(),
+ // &[!0, !0, 0, 0]
+ // );
+ // assert_eq!(
+ // $mask::new(false, true, false, true).as_ref(),
+ // &[0, !0, 0, !0]
+ // );
+ // assert_eq!(
+ // $mask::new(true, false, true, false).as_ref(),
+ // &[!0, 0, !0, 0]
+ // );
+ // assert_eq!(
+ // $mask::new(true, true, true, true).as_ref(),
+ // &[!0, !0, !0, !0]
+ // );
+ // }
+
+ glam_test!(test_mask_from, {
+ assert_eq!(
+ Into::<[u32; 4]>::into($mask::new(false, false, false, false)),
+ [0, 0, 0, 0]
+ );
+ assert_eq!(
+ Into::<[u32; 4]>::into($mask::new(false, false, true, true)),
+ [0, 0, !0, !0]
+ );
+ assert_eq!(
+ Into::<[u32; 4]>::into($mask::new(true, true, false, false)),
+ [!0, !0, 0, 0]
+ );
+ assert_eq!(
+ Into::<[u32; 4]>::into($mask::new(false, true, false, true)),
+ [0, !0, 0, !0]
+ );
+ assert_eq!(
+ Into::<[u32; 4]>::into($mask::new(true, false, true, false)),
+ [!0, 0, !0, 0]
+ );
+ assert_eq!(
+ Into::<[u32; 4]>::into($mask::new(true, true, true, true)),
+ [!0, !0, !0, !0]
+ );
+ });
+
+ glam_test!(test_mask_bitmask, {
+ assert_eq!($mask::new(false, false, false, false).bitmask(), 0b0000);
+ assert_eq!($mask::new(false, false, true, true).bitmask(), 0b1100);
+ assert_eq!($mask::new(true, true, false, false).bitmask(), 0b0011);
+ assert_eq!($mask::new(false, true, false, true).bitmask(), 0b1010);
+ assert_eq!($mask::new(true, false, true, false).bitmask(), 0b0101);
+ assert_eq!($mask::new(true, true, true, true).bitmask(), 0b1111);
+ });
+
+ glam_test!(test_mask_any, {
+ assert_eq!($mask::new(false, false, false, false).any(), false);
+ assert_eq!($mask::new(true, false, false, false).any(), true);
+ assert_eq!($mask::new(false, true, false, false).any(), true);
+ assert_eq!($mask::new(false, false, true, false).any(), true);
+ assert_eq!($mask::new(false, false, false, true).any(), true);
+ });
+
+ glam_test!(test_mask_all, {
+ assert_eq!($mask::new(true, true, true, true).all(), true);
+ assert_eq!($mask::new(false, true, true, true).all(), false);
+ assert_eq!($mask::new(true, false, true, true).all(), false);
+ assert_eq!($mask::new(true, true, false, true).all(), false);
+ assert_eq!($mask::new(true, true, true, false).all(), false);
+ });
+
+ glam_test!(test_mask_select, {
+ let a = $vec4::new(1 as $t, 2 as $t, 3 as $t, 4 as $t);
+ let b = $vec4::new(5 as $t, 6 as $t, 7 as $t, 8 as $t);
+ assert_eq!(
+ $vec4::select($mask::new(true, true, true, true), a, b),
+ $vec4::new(1 as $t, 2 as $t, 3 as $t, 4 as $t),
+ );
+ assert_eq!(
+ $vec4::select($mask::new(true, false, true, false), a, b),
+ $vec4::new(1 as $t, 6 as $t, 3 as $t, 8 as $t),
+ );
+ assert_eq!(
+ $vec4::select($mask::new(false, true, false, true), a, b),
+ $vec4::new(5 as $t, 2 as $t, 7 as $t, 4 as $t),
+ );
+ assert_eq!(
+ $vec4::select($mask::new(false, false, false, false), a, b),
+ $vec4::new(5 as $t, 6 as $t, 7 as $t, 8 as $t),
+ );
+ });
+
+ glam_test!(test_mask_and, {
+ assert_eq!(
+ ($mask::new(false, false, false, false) & $mask::new(false, false, false, false))
+ .bitmask(),
+ 0b0000,
+ );
+ assert_eq!(
+ ($mask::new(true, true, true, true) & $mask::new(true, true, true, true)).bitmask(),
+ 0b1111,
+ );
+ assert_eq!(
+ ($mask::new(true, false, true, false) & $mask::new(false, true, false, true))
+ .bitmask(),
+ 0b0000,
+ );
+ assert_eq!(
+ ($mask::new(true, false, true, true) & $mask::new(true, true, true, false))
+ .bitmask(),
+ 0b0101,
+ );
+
+ let mut mask = $mask::new(true, true, false, false);
+ mask &= $mask::new(true, false, true, false);
+ assert_eq!(mask.bitmask(), 0b0001);
+ });
+
+ glam_test!(test_mask_or, {
+ assert_eq!(
+ ($mask::new(false, false, false, false) | $mask::new(false, false, false, false))
+ .bitmask(),
+ 0b0000,
+ );
+ assert_eq!(
+ ($mask::new(true, true, true, true) | $mask::new(true, true, true, true)).bitmask(),
+ 0b1111,
+ );
+ assert_eq!(
+ ($mask::new(true, false, true, false) | $mask::new(false, true, false, true))
+ .bitmask(),
+ 0b1111,
+ );
+ assert_eq!(
+ ($mask::new(true, false, true, false) | $mask::new(true, false, true, false))
+ .bitmask(),
+ 0b0101,
+ );
+
+ let mut mask = $mask::new(true, true, false, false);
+ mask |= $mask::new(true, false, true, false);
+ assert_eq!(mask.bitmask(), 0b0111);
+ });
+
+ glam_test!(test_mask_not, {
+ assert_eq!((!$mask::new(false, false, false, false)).bitmask(), 0b1111);
+ assert_eq!((!$mask::new(true, true, true, true)).bitmask(), 0b0000);
+ assert_eq!((!$mask::new(true, false, true, false)).bitmask(), 0b1010);
+ assert_eq!((!$mask::new(false, true, false, true)).bitmask(), 0b0101);
+ });
+
+ glam_test!(test_mask_fmt, {
+ let a = $mask::new(true, false, true, false);
+
+ assert_eq!(format!("{}", a), "[true, false, true, false]");
+ // assert_eq!(
+ // format!("{:?}", a),
+ // format!("{}(0xffffffff, 0x0, 0xffffffff, 0x0)", stringify!($mask))
+ // );
+ });
+
+ glam_test!(test_mask_eq, {
+ let a = $mask::new(true, false, true, false);
+ let b = $mask::new(true, false, true, false);
+ let c = $mask::new(false, true, true, false);
+
+ assert_eq!(a, b);
+ assert_eq!(b, a);
+ assert_ne!(a, c);
+ assert_ne!(b, c);
+ });
+
+ glam_test!(test_mask_hash, {
+ use std::collections::hash_map::DefaultHasher;
+ use std::hash::Hash;
+ use std::hash::Hasher;
+
+ let a = $mask::new(true, false, true, false);
+ let b = $mask::new(true, false, true, false);
+ let c = $mask::new(false, true, true, false);
+
+ let mut hasher = DefaultHasher::new();
+ a.hash(&mut hasher);
+ let a_hashed = hasher.finish();
+
+ let mut hasher = DefaultHasher::new();
+ b.hash(&mut hasher);
+ let b_hashed = hasher.finish();
+
+ let mut hasher = DefaultHasher::new();
+ c.hash(&mut hasher);
+ let c_hashed = hasher.finish();
+
+ assert_eq!(a, b);
+ assert_eq!(a_hashed, b_hashed);
+ assert_ne!(a, c);
+ assert_ne!(a_hashed, c_hashed);
+ });
+
+ glam_test!(test_to_from_slice, {
+ let v = $vec4::new(1 as $t, 2 as $t, 3 as $t, 4 as $t);
+ let mut a = [0 as $t, 0 as $t, 0 as $t, 0 as $t];
+ v.write_to_slice(&mut a);
+ assert_eq!(v, $vec4::from_slice(&a));
+ });
+
+ glam_test!(test_sum, {
+ let one = $vec4::ONE;
+ assert_eq!(vec![one, one].iter().sum::<$vec4>(), one + one);
+ });
+
+ glam_test!(test_product, {
+ let two = $vec4::new(2 as $t, 2 as $t, 2 as $t, 2 as $t);
+ assert_eq!(vec![two, two].iter().product::<$vec4>(), two * two);
+ });
+ };
+}
+
+macro_rules! impl_vec4_signed_tests {
+ ($t:ident, $const_new:ident, $new:ident, $vec4:ident, $vec3:ident, $vec2:ident, $mask:ident) => {
+ impl_vec4_tests!($t, $const_new, $new, $vec4, $vec3, $vec2, $mask);
+
+ glam_test!(test_neg, {
+ let a = $new(1 as $t, 2 as $t, 3 as $t, 4 as $t);
+ assert_eq!((-1 as $t, -2 as $t, -3 as $t, -4 as $t), (-a).into());
+ });
+
+ glam_test!(test_dot_signed, {
+ let x = $new(1 as $t, 0 as $t, 0 as $t, 0 as $t);
+ let y = $new(0 as $t, 1 as $t, 0 as $t, 0 as $t);
+ let z = $new(0 as $t, 0 as $t, 1 as $t, 0 as $t);
+ let w = $new(0 as $t, 0 as $t, 0 as $t, 1 as $t);
+ assert_eq!(1 as $t, x.dot(x));
+ assert_eq!(0 as $t, x.dot(y));
+ assert_eq!(0 as $t, x.dot(-z));
+ assert_eq!(-1 as $t, w.dot(-w));
+ });
+ };
+}
+
+macro_rules! impl_vec4_eq_hash_tests {
+ ($t:ident, $new:ident) => {
+ glam_test!(test_ve2_hash, {
+ use std::collections::hash_map::DefaultHasher;
+ use std::hash::Hash;
+ use std::hash::Hasher;
+
+ let a = $new(1 as $t, 2 as $t, 3 as $t, 4 as $t);
+ let b = $new(1 as $t, 2 as $t, 3 as $t, 4 as $t);
+ let c = $new(3 as $t, 2 as $t, 1 as $t, 4 as $t);
+
+ let mut hasher = DefaultHasher::new();
+ a.hash(&mut hasher);
+ let a_hashed = hasher.finish();
+
+ let mut hasher = DefaultHasher::new();
+ b.hash(&mut hasher);
+ let b_hashed = hasher.finish();
+
+ let mut hasher = DefaultHasher::new();
+ c.hash(&mut hasher);
+ let c_hashed = hasher.finish();
+
+ assert_eq!(a, b);
+ assert_eq!(a_hashed, b_hashed);
+ assert_ne!(a, c);
+ assert_ne!(a_hashed, c_hashed);
+ });
+ };
+}
+
+macro_rules! impl_vec4_float_tests {
+ ($t:ident, $const_new:ident, $new:ident, $vec4:ident, $vec3:ident, $vec2:ident, $mask:ident) => {
+ impl_vec4_signed_tests!($t, $const_new, $new, $vec4, $vec3, $vec2, $mask);
+ impl_vec_float_normalize_tests!($t, $vec4);
+
+ use core::$t::INFINITY;
+ use core::$t::NAN;
+ use core::$t::NEG_INFINITY;
+
+ glam_test!(test_vec4_nan, {
+ assert!($vec4::NAN.is_nan());
+ assert!(!$vec4::NAN.is_finite());
+ });
+
+ glam_test!(test_funcs, {
+ let x = $new(1.0, 0.0, 0.0, 0.0);
+ let y = $new(0.0, 1.0, 0.0, 0.0);
+ let z = $new(0.0, 0.0, 1.0, 0.0);
+ let w = $new(0.0, 0.0, 0.0, 1.0);
+ assert_eq!(4.0, (2.0 * x).length_squared());
+ assert_eq!(9.0, (-3.0 * y).length_squared());
+ assert_eq!(16.0, (4.0 * z).length_squared());
+ assert_eq!(64.0, (8.0 * w).length_squared());
+ assert_eq!(2.0, (-2.0 * x).length());
+ assert_eq!(3.0, (3.0 * y).length());
+ assert_eq!(4.0, (-4.0 * z).length());
+ assert_eq!(5.0, (-5.0 * w).length());
+ assert_eq!(2.0, x.distance_squared(y));
+ assert_eq!(13.0, (2.0 * x).distance_squared(-3.0 * z));
+ assert_eq!((2.0 as $t).sqrt(), w.distance(y));
+ assert_eq!(5.0, (3.0 * x).distance(-4.0 * y));
+ assert_eq!(13.0, (-5.0 * w).distance(12.0 * y));
+ assert_eq!(x, (2.0 * x).normalize());
+ assert_eq!(
+ 1.0 * 5.0 + 2.0 * 6.0 + 3.0 * 7.0 + 4.0 * 8.0,
+ $new(1.0, 2.0, 3.0, 4.0).dot($new(5.0, 6.0, 7.0, 8.0))
+ );
+ assert_eq!(
+ 2.0 * 2.0 + 3.0 * 3.0 + 4.0 * 4.0 + 5.0 * 5.0,
+ $new(2.0, 3.0, 4.0, 5.0).length_squared()
+ );
+ assert_eq!(
+ (2.0 as $t * 2.0 + 3.0 * 3.0 + 4.0 * 4.0 + 5.0 * 5.0).sqrt(),
+ $new(2.0, 3.0, 4.0, 5.0).length()
+ );
+ assert_eq!(
+ 1.0 / (2.0 as $t * 2.0 + 3.0 * 3.0 + 4.0 * 4.0 + 5.0 * 5.0).sqrt(),
+ $new(2.0, 3.0, 4.0, 5.0).length_recip()
+ );
+ assert!($new(2.0, 3.0, 4.0, 5.0).normalize().is_normalized());
+ assert_approx_eq!(
+ $new(2.0, 3.0, 4.0, 5.0)
+ / (2.0 as $t * 2.0 + 3.0 * 3.0 + 4.0 * 4.0 + 5.0 * 5.0).sqrt(),
+ $new(2.0, 3.0, 4.0, 5.0).normalize()
+ );
+ assert_eq!(
+ $new(0.5, 0.25, 0.125, 0.0625),
+ $new(2.0, 4.0, 8.0, 16.0).recip()
+ );
+ });
+
+ glam_test!(test_project_reject, {
+ assert_eq!(
+ $new(0.0, 0.0, 0.0, 1.0),
+ $new(0.0, 1.0, 0.0, 1.0).project_onto($new(0.0, 0.0, 0.0, 2.0))
+ );
+ assert_eq!(
+ $new(0.0, 1.0, 0.0, 0.0),
+ $new(0.0, 1.0, 0.0, 1.0).reject_from($new(0.0, 0.0, 0.0, 2.0))
+ );
+ assert_eq!(
+ $new(0.0, 0.0, 0.0, 1.0),
+ $new(0.0, 1.0, 0.0, 1.0).project_onto_normalized($new(0.0, 0.0, 0.0, 1.0))
+ );
+ assert_eq!(
+ $new(0.0, 1.0, 0.0, 0.0),
+ $new(0.0, 1.0, 0.0, 1.0).reject_from_normalized($new(0.0, 0.0, 0.0, 1.0))
+ );
+ should_glam_assert!({ $vec4::ONE.project_onto($vec4::ZERO) });
+ should_glam_assert!({ $vec4::ONE.reject_from($vec4::ZERO) });
+ should_glam_assert!({ $vec4::ONE.project_onto_normalized($vec4::ONE) });
+ should_glam_assert!({ $vec4::ONE.reject_from_normalized($vec4::ONE) });
+ });
+
+ glam_test!(test_signum, {
+ assert_eq!($vec4::ZERO.signum(), $vec4::ONE);
+ assert_eq!(-$vec4::ZERO.signum(), -$vec4::ONE);
+ assert_eq!($vec4::ONE.signum(), $vec4::ONE);
+ assert_eq!((-$vec4::ONE).signum(), -$vec4::ONE);
+ assert_eq!($vec4::splat(INFINITY).signum(), $vec4::ONE);
+ assert_eq!($vec4::splat(NEG_INFINITY).signum(), -$vec4::ONE);
+ assert!($vec4::splat(NAN).signum().is_nan_mask().all());
+ });
+
+ glam_test!(test_abs, {
+ assert_eq!($vec4::ZERO.abs(), $vec4::ZERO);
+ assert_eq!($vec4::ONE.abs(), $vec4::ONE);
+ assert_eq!((-$vec4::ONE).abs(), $vec4::ONE);
+ });
+
+ glam_test!(test_round, {
+ assert_eq!($vec4::new(1.35, 0.0, 0.0, 0.0).round().x, 1.0);
+ assert_eq!($vec4::new(0.0, 1.5, 0.0, 0.0).round().y, 2.0);
+ assert_eq!($vec4::new(0.0, 0.0, -15.5, 0.0).round().z, -16.0);
+ assert_eq!($vec4::new(0.0, 0.0, 0.0, 0.0).round().z, 0.0);
+ assert_eq!($vec4::new(0.0, 21.1, 0.0, 0.0).round().y, 21.0);
+ assert_eq!($vec4::new(0.0, 0.0, 0.0, 11.123).round().w, 11.0);
+ assert_eq!($vec4::new(0.0, 0.0, 11.501, 0.0).round().z, 12.0);
+ assert_eq!(
+ $vec4::new(NEG_INFINITY, INFINITY, 1.0, -1.0).round(),
+ $vec4::new(NEG_INFINITY, INFINITY, 1.0, -1.0)
+ );
+ assert!($vec4::new(NAN, 0.0, 0.0, 1.0).round().x.is_nan());
+ });
+
+ glam_test!(test_floor, {
+ assert_eq!(
+ $vec4::new(1.35, 1.5, -1.5, 1.999).floor(),
+ $vec4::new(1.0, 1.0, -2.0, 1.0)
+ );
+ assert_eq!(
+ $vec4::new(INFINITY, NEG_INFINITY, 0.0, 0.0).floor(),
+ $vec4::new(INFINITY, NEG_INFINITY, 0.0, 0.0)
+ );
+ assert!($vec4::new(0.0, NAN, 0.0, 0.0).floor().y.is_nan());
+ assert_eq!(
+ $vec4::new(-0.0, -2000000.123, 10000000.123, 1000.9).floor(),
+ $vec4::new(-0.0, -2000001.0, 10000000.0, 1000.0)
+ );
+ });
+
+ glam_test!(test_fract, {
+ assert_approx_eq!(
+ $vec4::new(1.35, 1.5, -1.5, 1.999).fract(),
+ $vec4::new(0.35, 0.5, 0.5, 0.999)
+ );
+ assert_approx_eq!(
+ $vec4::new(-0.0, -200000.123, 1000000.123, 1000.9).fract(),
+ $vec4::new(0.0, 0.877, 0.123, 0.9),
+ 0.002
+ );
+ });
+
+ glam_test!(test_ceil, {
+ assert_eq!(
+ $vec4::new(1.35, 1.5, -1.5, 1234.1234).ceil(),
+ $vec4::new(2.0, 2.0, -1.0, 1235.0)
+ );
+ assert_eq!(
+ $vec4::new(INFINITY, NEG_INFINITY, 0.0, 0.0).ceil(),
+ $vec4::new(INFINITY, NEG_INFINITY, 0.0, 0.0)
+ );
+ assert!($vec4::new(0.0, 0.0, NAN, 0.0).ceil().z.is_nan());
+ assert_eq!(
+ $vec4::new(-1234.1234, -2000000.123, 1000000.123, 1000.9).ceil(),
+ $vec4::new(-1234.0, -2000000.0, 1000001.0, 1001.0)
+ );
+ });
+
+ glam_test!(test_lerp, {
+ let v0 = $vec4::new(-1.0, -1.0, -1.0, -1.0);
+ let v1 = $vec4::new(1.0, 1.0, 1.0, 1.0);
+ assert_approx_eq!(v0, v0.lerp(v1, 0.0));
+ assert_approx_eq!(v1, v0.lerp(v1, 1.0));
+ assert_approx_eq!($vec4::ZERO, v0.lerp(v1, 0.5));
+ });
+
+ glam_test!(test_is_finite, {
+ assert!($vec4::new(0.0, 0.0, 0.0, 0.0).is_finite());
+ assert!($vec4::new(-1e-10, 1.0, 1e10, 42.0).is_finite());
+ assert!(!$vec4::new(INFINITY, 0.0, 0.0, 0.0).is_finite());
+ assert!(!$vec4::new(0.0, NAN, 0.0, 0.0).is_finite());
+ assert!(!$vec4::new(0.0, 0.0, NEG_INFINITY, 0.0).is_finite());
+ assert!(!$vec4::new(0.0, 0.0, 0.0, NAN).is_finite());
+ });
+
+ glam_test!(test_powf, {
+ assert_eq!(
+ $vec4::new(2.0, 4.0, 8.0, 16.0).powf(2.0),
+ $vec4::new(4.0, 16.0, 64.0, 256.0)
+ );
+ });
+
+ glam_test!(test_exp, {
+ assert_eq!(
+ $vec4::new(1.0, 2.0, 3.0, 4.0).exp(),
+ $vec4::new(
+ (1.0 as $t).exp(),
+ (2.0 as $t).exp(),
+ (3.0 as $t).exp(),
+ (4.0 as $t).exp()
+ )
+ );
+ });
+
+ glam_test!(test_clamp_length, {
+ // Too long gets shortened
+ assert_eq!(
+ $vec4::new(12.0, 16.0, 0.0, 0.0).clamp_length(7.0, 10.0),
+ $vec4::new(6.0, 8.0, 0.0, 0.0) // shortened to length 10.0
+ );
+ // In the middle is unchanged
+ assert_eq!(
+ $vec4::new(2.0, 1.0, 0.0, 0.0).clamp_length(0.5, 5.0),
+ $vec4::new(2.0, 1.0, 0.0, 0.0) // unchanged
+ );
+ // Too short gets lengthened
+ assert_eq!(
+ $vec4::new(0.6, 0.8, 0.0, 0.0).clamp_length(10.0, 20.0),
+ $vec4::new(6.0, 8.0, 0.0, 0.0) // lengthened to length 10.0
+ );
+ should_glam_assert!({ $vec4::ONE.clamp_length(1.0, 0.0) });
+ });
+
+ glam_test!(test_clamp_length_max, {
+ // Too long gets shortened
+ assert_eq!(
+ $vec4::new(12.0, 16.0, 0.0, 0.0).clamp_length_max(10.0),
+ $vec4::new(6.0, 8.0, 0.0, 0.0) // shortened to length 10.0
+ );
+ // Not too long is unchanged
+ assert_eq!(
+ $vec4::new(2.0, 1.0, 0.0, 0.0).clamp_length_max(5.0),
+ $vec4::new(2.0, 1.0, 0.0, 0.0) // unchanged
+ );
+ });
+
+ glam_test!(test_clamp_length_min, {
+ // Not too short is unchanged
+ assert_eq!(
+ $vec4::new(2.0, 1.0, 0.0, 0.0).clamp_length_min(0.5),
+ $vec4::new(2.0, 1.0, 0.0, 0.0) // unchanged
+ );
+ // Too short gets lengthened
+ assert_eq!(
+ $vec4::new(0.6, 0.8, 0.0, 0.0).clamp_length_min(10.0),
+ $vec4::new(6.0, 8.0, 0.0, 0.0) // lengthened to length 10.0
+ );
+ });
+
+ glam_test!(test_mul_add, {
+ assert_eq!(
+ $vec4::new(1.0, 1.0, 1.0, 1.0).mul_add(
+ $vec4::new(0.5, 2.0, -4.0, 0.0),
+ $vec4::new(-1.0, -1.0, -1.0, -1.0)
+ ),
+ $vec4::new(-0.5, 1.0, -5.0, -1.0)
+ );
+ });
+ };
+}
+
+macro_rules! impl_vec4_scalar_shift_op_test {
+ ($vec4:ident, $t_min:literal, $t_max:literal, $rhs_min:literal, $rhs_max:literal) => {
+ glam_test!(test_vec4_scalar_shift_ops, {
+ for x in $t_min..$t_max {
+ for y in $t_min..$t_max {
+ for z in $t_min..$t_max {
+ for w in $t_min..$t_max {
+ for rhs in $rhs_min..$rhs_max {
+ assert_eq!(
+ $vec4::new(x, y, z, w) << rhs,
+ $vec4::new(x << rhs, y << rhs, z << rhs, w << rhs)
+ );
+ assert_eq!(
+ $vec4::new(x, y, z, w) >> rhs,
+ $vec4::new(x >> rhs, y >> rhs, z >> rhs, w >> rhs)
+ );
+ }
+ }
+ }
+ }
+ }
+ });
+ };
+}
+
+macro_rules! impl_vec4_scalar_shift_op_tests {
+ ($vec4:ident, $t_min:literal, $t_max:literal) => {
+ mod shift_by_i8 {
+ use glam::$vec4;
+ impl_vec4_scalar_shift_op_test!($vec4, $t_min, $t_max, 0i8, 2);
+ }
+ mod shift_by_i16 {
+ use glam::$vec4;
+ impl_vec4_scalar_shift_op_test!($vec4, $t_min, $t_max, 0i16, 2);
+ }
+ mod shift_by_i32 {
+ use glam::$vec4;
+ impl_vec4_scalar_shift_op_test!($vec4, $t_min, $t_max, 0i32, 2);
+ }
+ mod shift_by_u8 {
+ use glam::$vec4;
+ impl_vec4_scalar_shift_op_test!($vec4, $t_min, $t_max, 0u8, 2);
+ }
+ mod shift_by_u16 {
+ use glam::$vec4;
+ impl_vec4_scalar_shift_op_test!($vec4, $t_min, $t_max, 0u16, 2);
+ }
+ mod shift_by_u32 {
+ use glam::$vec4;
+ impl_vec4_scalar_shift_op_test!($vec4, $t_min, $t_max, 0u32, 2);
+ }
+ };
+}
+
+macro_rules! impl_vec4_shift_op_test {
+ ($vec4:ident, $rhs:ident, $t_min:literal, $t_max:literal) => {
+ glam_test!(test_vec4_shift_ops, {
+ for x1 in $t_min..$t_max {
+ for y1 in $t_min..$t_max {
+ for z1 in $t_min..$t_max {
+ for w1 in $t_min..$t_max {
+ for x2 in $t_min..$t_max {
+ for y2 in $t_min..$t_max {
+ for z2 in $t_min..$t_max {
+ for w2 in $t_min..$t_max {
+ assert_eq!(
+ $vec4::new(x1, y1, z1, w1)
+ << $rhs::new(x2, y2, z2, w2),
+ $vec4::new(x1 << x2, y1 << y2, z1 << z2, w1 << w2)
+ );
+ assert_eq!(
+ $vec4::new(x1, y1, z1, w1)
+ >> $rhs::new(x2, y2, z2, w2),
+ $vec4::new(x1 >> x2, y1 >> y2, z1 >> z2, w1 >> w2)
+ );
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ });
+ };
+}
+
+macro_rules! impl_vec4_shift_op_tests {
+ ($vec4:ident) => {
+ mod shift_ivec4_by_ivec4 {
+ use super::*;
+ impl_vec4_shift_op_test!($vec4, IVec4, 0, 2);
+ }
+ mod shift_ivec4_by_uvec4 {
+ use super::*;
+ impl_vec4_shift_op_test!($vec4, UVec4, 0, 2);
+ }
+ };
+}
+
+macro_rules! impl_vec4_scalar_bit_op_tests {
+ ($vec4:ident, $t_min:literal, $t_max:literal) => {
+ glam_test!(test_vec4_scalar_bit_ops, {
+ for x in $t_min..$t_max {
+ for y in $t_min..$t_max {
+ for z in $t_min..$t_max {
+ for w in $t_min..$t_max {
+ for rhs in $t_min..$t_max {
+ assert_eq!(
+ $vec4::new(x, y, z, w) & rhs,
+ $vec4::new(x & rhs, y & rhs, z & rhs, w & rhs)
+ );
+ assert_eq!(
+ $vec4::new(x, y, z, w) | rhs,
+ $vec4::new(x | rhs, y | rhs, z | rhs, w | rhs)
+ );
+ assert_eq!(
+ $vec4::new(x, y, z, w) ^ rhs,
+ $vec4::new(x ^ rhs, y ^ rhs, z ^ rhs, w ^ rhs)
+ );
+ }
+ }
+ }
+ }
+ }
+ });
+ };
+}
+
+macro_rules! impl_vec4_bit_op_tests {
+ ($vec4:ident, $t_min:literal, $t_max:literal) => {
+ glam_test!(test_vec4_bit_ops, {
+ for x1 in $t_min..$t_max {
+ for y1 in $t_min..$t_max {
+ for z1 in $t_min..$t_max {
+ for w1 in $t_min..$t_max {
+ assert_eq!(!$vec4::new(x1, y1, z1, w1), $vec4::new(!x1, !y1, !z1, !w1));
+
+ for x2 in $t_min..$t_max {
+ for y2 in $t_min..$t_max {
+ for z2 in $t_min..$t_max {
+ for w2 in $t_min..$t_max {
+ assert_eq!(
+ $vec4::new(x1, y1, z1, w1)
+ & $vec4::new(x2, y2, z2, w2),
+ $vec4::new(x1 & x2, y1 & y2, z1 & z2, w1 & w2)
+ );
+ assert_eq!(
+ $vec4::new(x1, y1, z1, w1)
+ | $vec4::new(x2, y2, z2, w2),
+ $vec4::new(x1 | x2, y1 | y2, z1 | z2, w1 | w2)
+ );
+ assert_eq!(
+ $vec4::new(x1, y1, z1, w1)
+ ^ $vec4::new(x2, y2, z2, w2),
+ $vec4::new(x1 ^ x2, y1 ^ y2, z1 ^ z2, w1 ^ w2)
+ );
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ });
+ };
+}
+mod vec4 {
+ use glam::{const_vec4, vec4, Vec2, Vec3, Vec4};
+
+ #[cfg(all(
+ any(target_feature = "sse2", target_feature = "simd128"),
+ not(feature = "scalar-math")
+ ))]
+ type Vec4Mask = glam::BVec4A;
+
+ #[cfg(any(
+ not(any(target_feature = "sse2", target_feature = "simd128")),
+ feature = "scalar-math"
+ ))]
+ type Vec4Mask = glam::BVec4;
+
+ glam_test!(test_align, {
+ use std::mem;
+ assert_eq!(16, mem::size_of::<Vec4>());
+ if cfg!(any(not(feature = "scalar-math"), feature = "cuda")) {
+ assert_eq!(16, mem::align_of::<Vec4>());
+ } else {
+ assert_eq!(4, mem::align_of::<Vec4>());
+ }
+ if cfg!(all(
+ any(target_feature = "sse2", target_feature = "simd128"),
+ not(feature = "scalar-math")
+ )) {
+ assert_eq!(16, mem::size_of::<Vec4Mask>());
+ assert_eq!(16, mem::align_of::<Vec4Mask>());
+ } else {
+ assert_eq!(4, mem::size_of::<Vec4Mask>());
+ assert_eq!(1, mem::align_of::<Vec4Mask>());
+ }
+ });
+
+ #[cfg(all(target_feature = "sse2", not(feature = "scalar-math")))]
+ #[test]
+ fn test_m128() {
+ #[cfg(target_arch = "x86")]
+ use core::arch::x86::*;
+ #[cfg(target_arch = "x86_64")]
+ use core::arch::x86_64::*;
+
+ #[repr(C, align(16))]
+ struct F32x4_A16([f32; 4]);
+
+ let v0 = Vec4::new(1.0, 2.0, 3.0, 4.0);
+ let m0: __m128 = v0.into();
+ let mut a0 = F32x4_A16([0.0, 0.0, 0.0, 0.0]);
+ unsafe {
+ _mm_store_ps(a0.0.as_mut_ptr(), m0);
+ }
+ assert_eq!([1.0, 2.0, 3.0, 4.0], a0.0);
+ let v1 = Vec4::from(m0);
+ assert_eq!(v0, v1);
+
+ #[repr(C, align(16))]
+ struct U32x4_A16([u32; 4]);
+
+ let v0 = Vec4Mask::new(true, false, true, false);
+ let m0: __m128 = v0.into();
+ let mut a0 = U32x4_A16([1, 2, 3, 4]);
+ unsafe {
+ _mm_store_ps(a0.0.as_mut_ptr() as *mut f32, m0);
+ }
+ assert_eq!([0xffffffff, 0, 0xffffffff, 0], a0.0);
+ }
+
+ glam_test!(test_as, {
+ use glam::{DVec4, IVec4, UVec4};
+ assert_eq!(
+ DVec4::new(-1.0, -2.0, -3.0, -4.0),
+ Vec4::new(-1.0, -2.0, -3.0, -4.0).as_dvec4()
+ );
+ assert_eq!(
+ IVec4::new(-1, -2, -3, -4),
+ Vec4::new(-1.0, -2.0, -3.0, -4.0).as_ivec4()
+ );
+ assert_eq!(
+ UVec4::new(1, 2, 3, 4),
+ Vec4::new(1.0, 2.0, 3.0, 4.0).as_uvec4()
+ );
+
+ assert_eq!(
+ IVec4::new(-1, -2, -3, -4),
+ DVec4::new(-1.0, -2.0, -3.0, -4.0).as_ivec4()
+ );
+ assert_eq!(
+ UVec4::new(1, 2, 3, 4),
+ DVec4::new(1.0, 2.0, 3.0, 4.0).as_uvec4()
+ );
+ assert_eq!(
+ Vec4::new(-1.0, -2.0, -3.0, -4.0),
+ DVec4::new(-1.0, -2.0, -3.0, -4.0).as_vec4()
+ );
+
+ assert_eq!(
+ DVec4::new(-1.0, -2.0, -3.0, -4.0),
+ IVec4::new(-1, -2, -3, -4).as_dvec4()
+ );
+ assert_eq!(UVec4::new(1, 2, 3, 4), IVec4::new(1, 2, 3, 4).as_uvec4());
+ assert_eq!(
+ Vec4::new(-1.0, -2.0, -3.0, -4.0),
+ IVec4::new(-1, -2, -3, -4).as_vec4()
+ );
+
+ assert_eq!(
+ DVec4::new(1.0, 2.0, 3.0, 4.0),
+ UVec4::new(1, 2, 3, 4).as_dvec4()
+ );
+ assert_eq!(IVec4::new(1, 2, 3, 4), UVec4::new(1, 2, 3, 4).as_ivec4());
+ assert_eq!(
+ Vec4::new(1.0, 2.0, 3.0, 4.0),
+ UVec4::new(1, 2, 3, 4).as_vec4()
+ );
+ });
+
+ glam_test!(test_vec3a, {
+ use glam::Vec3A;
+ assert_eq!(
+ Vec4::new(1.0, 2.0, 3.0, 4.0),
+ Vec4::from((Vec3A::new(1.0, 2.0, 3.0), 4.0))
+ );
+ assert_eq!(
+ Vec4::new(1.0, 2.0, 3.0, 4.0),
+ Vec4::from((1.0, Vec3A::new(2.0, 3.0, 4.0)))
+ );
+ });
+
+ impl_vec4_float_tests!(f32, const_vec4, vec4, Vec4, Vec3, Vec2, Vec4Mask);
+}
+
+mod dvec4 {
+ use glam::{const_dvec4, dvec4, BVec4, DVec2, DVec3, DVec4};
+
+ glam_test!(test_align, {
+ use std::mem;
+ assert_eq!(32, mem::size_of::<DVec4>());
+ #[cfg(not(feature = "cuda"))]
+ assert_eq!(mem::align_of::<f64>(), mem::align_of::<DVec4>());
+ #[cfg(feature = "cuda")]
+ assert_eq!(16, mem::align_of::<DVec4>());
+ assert_eq!(4, mem::size_of::<BVec4>());
+ assert_eq!(1, mem::align_of::<BVec4>());
+ });
+
+ impl_vec4_float_tests!(f64, const_dvec4, dvec4, DVec4, DVec3, DVec2, BVec4);
+}
+
+mod ivec4 {
+ use glam::{const_ivec4, ivec4, BVec4, IVec2, IVec3, IVec4, UVec4};
+
+ glam_test!(test_align, {
+ use std::mem;
+ assert_eq!(16, mem::size_of::<IVec4>());
+ #[cfg(not(feature = "cuda"))]
+ assert_eq!(4, mem::align_of::<IVec4>());
+ #[cfg(feature = "cuda")]
+ assert_eq!(16, mem::align_of::<IVec4>());
+ assert_eq!(4, mem::size_of::<BVec4>());
+ assert_eq!(1, mem::align_of::<BVec4>());
+ });
+
+ impl_vec4_signed_tests!(i32, const_ivec4, ivec4, IVec4, IVec3, IVec2, BVec4);
+ impl_vec4_eq_hash_tests!(i32, ivec4);
+
+ impl_vec4_scalar_shift_op_tests!(IVec4, -2, 2);
+ impl_vec4_shift_op_tests!(IVec4);
+
+ impl_vec4_scalar_bit_op_tests!(IVec4, -2, 2);
+ impl_vec4_bit_op_tests!(IVec4, -2, 2);
+}
+
+mod uvec4 {
+ use glam::{const_uvec4, uvec4, BVec4, IVec4, UVec2, UVec3, UVec4};
+
+ glam_test!(test_align, {
+ use std::mem;
+ assert_eq!(16, mem::size_of::<UVec4>());
+ #[cfg(not(feature = "cuda"))]
+ assert_eq!(4, mem::align_of::<UVec4>());
+ #[cfg(feature = "cuda")]
+ assert_eq!(16, mem::align_of::<UVec4>());
+ assert_eq!(4, mem::size_of::<BVec4>());
+ assert_eq!(1, mem::align_of::<BVec4>());
+ });
+
+ impl_vec4_tests!(u32, const_uvec4, uvec4, UVec4, UVec3, UVec2, BVec4);
+ impl_vec4_eq_hash_tests!(u32, uvec4);
+
+ impl_vec4_scalar_shift_op_tests!(UVec4, 0, 2);
+ impl_vec4_shift_op_tests!(UVec4);
+
+ impl_vec4_scalar_bit_op_tests!(UVec4, 0, 2);
+ impl_vec4_bit_op_tests!(UVec4, 0, 2);
+}