From e820f6c620e67c28d28d4261b59211c20716c784 Mon Sep 17 00:00:00 2001 From: Anatoly Myachev Date: Thu, 19 Sep 2024 02:55:14 +0200 Subject: [PATCH] Remove `import intel_extension_for_pytorch` from `fused_softmax.py` (#2278) Part of https://github.com/intel/intel-xpu-backend-for-triton/pull/2147 Perf the same: https://github.com/intel/intel-xpu-backend-for-triton/actions/runs/10928161213 vs https://github.com/intel/intel-xpu-backend-for-triton/actions/runs/10912513726; geomean diff: 1% for triton, 2% for xetla One more CI run: https://github.com/intel/intel-xpu-backend-for-triton/actions/runs/10928754028 (also looks good) Signed-off-by: Anatoly Myachev --- benchmarks/triton_kernels_benchmark/fused_softmax.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/benchmarks/triton_kernels_benchmark/fused_softmax.py b/benchmarks/triton_kernels_benchmark/fused_softmax.py index 8027c5e2aa..0b983448e4 100644 --- a/benchmarks/triton_kernels_benchmark/fused_softmax.py +++ b/benchmarks/triton_kernels_benchmark/fused_softmax.py @@ -15,9 +15,6 @@ import triton_kernels_benchmark as benchmark_suit import xetla_kernel -if benchmark_suit.USE_IPEX_OPTION: - import intel_extension_for_pytorch # type: ignore # noqa: F401 - @torch.jit.script def naive_softmax(x):