sci-ml/XNNPACK: drop 2024.11.08-r1

Signed-off-by: Alfredo Tupone <tupone@gentoo.org>
This commit is contained in:
Alfredo Tupone
2026-04-03 15:57:11 +02:00
parent 747486e73b
commit 38364eed76
3 changed files with 0 additions and 97 deletions
-1
View File
@@ -1,2 +1 @@
DIST XNNPACK-2024.11.08.tar.gz 8923022 BLAKE2B 1e73ffc1e5d1e1248272910860e59a471b52f207945f0049188d64b944e442fd2bc814105b2fef59e9ec295e9871cae0a805de485a047f9eacedcee2695d6e99 SHA512 b9e711e1c69a24e347f64ee21c60a609bff25e36153216382acd3d431e466b127eea81b1d80f00f84699a426c44f8a3566d9b95d0e87079c34f527e05c2db787
DIST XNNPACK-2024.12.03.tar.gz 8894276 BLAKE2B 1a944a3903ef8ea1c1454b8ac90cdccbdcdf7fb92ae76447ec9982d447f9dd69381d73397b186f58339142543f02b4106c09dea56a8efab3f864a8475f4ad8b4 SHA512 ecb859935996416ce51b2b9ffd5a8c3cc5e933dfab96b7b68c8a52e05227ce3416ffa81e8a3fcef8349aa85147aa1c6a07138c7fc8e87a02ad577e1da9126827
@@ -1,66 +0,0 @@
# Copyright 2022-2025 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
EAPI=8
inherit cmake flag-o-matic
CommitId=4ea82e595b36106653175dcb04b2aa532660d0d8
DESCRIPTION="library of floating-point neural network inference operators"
HOMEPAGE="https://github.com/google/XNNPACK/"
SRC_URI="https://github.com/google/${PN}/archive/${CommitId}.tar.gz
-> ${P}.tar.gz"
S="${WORKDIR}"/${PN}-${CommitId}
LICENSE="MIT"
SLOT="0"
KEYWORDS="~amd64"
IUSE="+assembly jit +memopt +sparse static-libs test"
RDEPEND="
dev-libs/cpuinfo
dev-libs/pthreadpool
"
DEPEND="${RDEPEND}
sci-ml/FP16
dev-libs/FXdiv
"
BDEPEND="test? ( dev-cpp/gtest )"
RESTRICT="!test? ( test )"
REQUIRED_USE="test? ( static-libs )"
PATCHES=( "${FILESDIR}"/${P}-gentoo.patch )
src_configure() {
# -Werror=lto-type-mismatch
# https://bugs.gentoo.org/933414
# https://github.com/google/XNNPACK/issues/6806
filter-lto
local mycmakeargs=(
-DXNNPACK_BUILD_BENCHMARKS=OFF
-DXNNPACK_USE_SYSTEM_LIBS=ON
-DXNNPACK_BUILD_TESTS=$(usex test ON OFF)
-DXNNPACK_LIBRARY_TYPE=$(usex static-libs static shared)
-DXNNPACK_ENABLE_ASSEMBLY=$(usex assembly ON OFF)
-DXNNPACK_ENABLE_MEMOPT=$(usex memopt ON OFF)
-DXNNPACK_ENABLE_SPARSE=$(usex sparse ON OFF)
-DCMAKE_POSITION_INDEPENDENT_CODE=ON
-DPTHREADPOOL_SOURCE_DIR=/usr
-DCPUINFO_SOURCE_DIR=/usr
)
cmake_src_configure
}
src_test() {
local CMAKE_SKIP_TESTS=(
unary-elementwise-nc-test
f32-f16-vcvt-test
f16-vlrelu-test
f16-vsqr-test
)
cmake_src_test
}
@@ -1,30 +0,0 @@
--- a/src/reference/unary-elementwise.cc 2025-02-19 21:58:14.973845984 +0100
+++ b/src/reference/unary-elementwise.cc 2025-02-19 22:06:55.224576694 +0100
@@ -127,6 +127,16 @@
}
};
+#ifdef XNN_HAVE_FLOAT16
+template <>
+struct ConvertOp<xnn_bfloat16, _Float16> {
+ explicit ConvertOp(const xnn_unary_uparams*) {}
+ _Float16 operator()(xnn_bfloat16 x) const {
+ return static_cast<_Float16>(static_cast<float>(x));
+ }
+};
+#endif
+
template <typename TIn, typename TOut>
const xnn_unary_elementwise_config* get_convert_config(
std::true_type /*input_quantized*/, std::true_type /*output_quantized*/) {
--- a/src/xnnpack/requantization.h 2025-02-19 22:36:23.417900964 +0100
+++ b/src/xnnpack/requantization.h 2025-02-19 22:37:06.910367395 +0100
@@ -121,7 +121,7 @@
uint8_t zero_point,
uint8_t min, uint8_t max) {
assert(scale < 256.0f);
- assert(scale >= 0x1.0p-32f);
+ assert(scale >= 1.0f / 4294967296.0f /* 0x1.0p-32f */);
struct ExpMul f32 = parse_f32(scale);