Skip to content

Commit

Permalink
unittest: Skip new failures due to PyTorch uplift
Browse files Browse the repository at this point in the history
Tracking: #922

Signed-off-by: Feng Yuan <[email protected]>
  • Loading branch information
fengyuan14 committed Sep 19, 2024
1 parent c134991 commit 52e0d2b
Show file tree
Hide file tree
Showing 2 changed files with 27 additions and 15 deletions.
3 changes: 3 additions & 0 deletions test/xpu/extended/skip_list_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,5 +162,8 @@
# https://github.com/intel/torch-xpu-ops/issues/781
"test_compare_cpu_square_xpu_complex64",

# https://github.com/intel/torch-xpu-ops/issues/922
"test_compare_cpu_isin_xpu_bfloat16",
"test_compare_cpu_unique_consecutive_xpu_bfloat16",
),
}
39 changes: 24 additions & 15 deletions test/xpu/skip_list_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
# Skip list of base line

# Need to revisit when the ops are enabled
# AssertionError: The supported dtypes for xxx on device type xpu are incorrect!
# AssertionError: The supported dtypes for xxx on device type xpu are incorrect!
"test_dtypes___rmod___xpu",
"test_dtypes_nn_functional_conv1d_xpu",
"test_dtypes_nn_functional_conv2d_xpu",
Expand All @@ -15,7 +15,7 @@
"test_dtypes_sparse_sampled_addmm_xpu",
# AssertionError: RuntimeError not raised
"test_errors_dot_xpu",
"test_errors_kthvalue_xpu",
"test_errors_kthvalue_xpu",
"test_errors_take_xpu",
"test_errors_vdot_xpu",
# Fallback cases with skipCPUIfNoLapack, AssertionError: Tensor-likes are not close!
Expand All @@ -37,7 +37,7 @@
"test_errors_sparse_mul_layout2_xpu",
"test_errors_sparse_mul_layout3_xpu",
"test_out_requires_grad_error_sparse_sampled_addmm_xpu_complex64",
"test_out_requires_grad_error_sparse_sampled_addmm_xpu_float32",
"test_out_requires_grad_error_sparse_sampled_addmm_xpu_float32",

# NotImplementedError: Could not run 'aten::_to_dense' with arguments from the 'SparseXPU' backend.
# https://github.com/intel/torch-xpu-ops/issues/357
Expand All @@ -47,14 +47,14 @@
# RuntimeError: sparse_dim expected sparse or strided tensor layout but got Sparse
# Issue https://github.com/intel/torch-xpu-ops/issues/357
"test_variant_consistency_eager_to_sparse_xpu_complex64",
"test_non_standard_bool_values_to_sparse_xpu_bool",
"test_non_standard_bool_values_to_sparse_xpu_bool",

# OneDNN issues, https://github.com/intel/torch-xpu-ops/issues/253
# RuntimeError: Long is not supported in oneDNN!
# RuntimeError: Long is not supported in oneDNN!
# RuntimeError: could not create a primitive descriptor for a deconvolution forward propagation primitive
# RuntimeError: Double and complex datatype matmul is not supported in oneDNN
"test_noncontiguous_samples_nn_functional_conv3d_xpu_int64",
"test_noncontiguous_samples_nn_functional_conv_transpose1d_xpu_int64", #
"test_noncontiguous_samples_nn_functional_conv3d_xpu_int64",
"test_noncontiguous_samples_nn_functional_conv_transpose1d_xpu_int64",
"test_noncontiguous_samples_nn_functional_conv_transpose2d_xpu_complex64",
"test_noncontiguous_samples_nn_functional_conv_transpose2d_xpu_float32",
"test_noncontiguous_samples_nn_functional_conv_transpose2d_xpu_int64",
Expand All @@ -63,15 +63,15 @@
"test_noncontiguous_samples_nn_functional_conv_transpose3d_xpu_int64",
"test_noncontiguous_samples_nn_functional_conv1d_xpu_int64",
"test_noncontiguous_samples_nn_functional_conv2d_xpu_int64",

# RuntimeError: mode only supports CPU AND CUDA device type, got: xpu
# Issue https://github.com/intel/torch-xpu-ops/issues/327
"test_numpy_ref_linalg_tensorinv_xpu_float64",
"test_out_mode_xpu_float32",

# RuntimeError: false INTERNAL ASSERT FAILED at "/home/gta/daisyden/pytorch4/aten/src/ATen/native/DispatchStub.cpp":220, please report a bug to PyTorch. DispatchStub: missing kernel for xpu
"test_out_nanmean_xpu_float32",
"test_out_warning_nanmean_xpu",
"test_out_warning_nanmean_xpu",

# NameError: name 'nanj' is not defined. Did you mean: 'nan'?
# https://github.com/intel/torch-xpu-ops/issues/768
Expand Down Expand Up @@ -109,19 +109,17 @@
"test_python_ref_torch_fallback__refs_div_no_rounding_mode_xpu_complex32",
"test_python_ref_torch_fallback__refs_pow_xpu_complex32",


# unexpected success because of cpu fallback
# unexpected success because of cpu fallback
"test_out_triangular_solve_xpu_float32",

# Newly added:

# Cuda skipped it
# Cuda skipped it
"test_non_standard_bool_values_sort_xpu_bool", # The implementation aligns with CUDA, RuntimeError: "sort" not implemented for 'Bool'.

# Cuda skipped it
"test_non_standard_bool_values_msort_xpu_bool", # The implementation aligns with CUDA, RuntimeError: "msort" not implemented for 'Bool'.


# Unexpected success
"test_python_ref_executor__refs_pow_executor_aten_xpu_complex32", # Didn't align with CUDA, Unexpected success

Expand Down Expand Up @@ -672,13 +670,16 @@
# So far CUDA doesn't support Half, so that XPU fails as we aligned claimed dtypes with CUDA in test infra.
# https://github.com/intel/torch-xpu-ops/issues/623
"test_dtypes_nextafter_xpu",

# AssertionError: The supported dtypes for argsort on device type xpu are incorrect!
# The following dtypes worked in forward but are not listed by the OpInfo: {torch.bool}.
# CUDA does not have torch.bool support on argsort.
"test_dtypes_argsort_xpu",
# Unexpected success, CUDA got XFAIL because CUDA does not have historgramadd supported"
"test_errors_histogramdd_xpu",

# https://github.com/intel/torch-xpu-ops/issues/922
"test_dtypes_isin_xpu",
),

"test_binary_ufuncs_xpu.py": (
Expand Down Expand Up @@ -3483,5 +3484,13 @@
"test_dispatch_symbolic_meta_outplace_nn_functional_adaptive_max_pool1d_xpu_float",
"test_dispatch_symbolic_meta_outplace_nn_functional_adaptive_max_pool2d_xpu_bfloat16",
"test_dispatch_symbolic_meta_outplace_nn_functional_adaptive_max_pool2d_xpu_float",

# https://github.com/intel/torch-xpu-ops/issues/922
"test_dispatch_meta_outplace_isin_xpu_bfloat16",
"test_dispatch_meta_outplace_unique_consecutive_xpu_bfloat16",
"test_dispatch_symbolic_meta_outplace_isin_xpu_bfloat16",
"test_dispatch_symbolic_meta_outplace_unique_consecutive_xpu_bfloat16",
"test_meta_outplace_isin_xpu_bfloat16",
"test_meta_outplace_unique_consecutive_xpu_bfloat16",
),
}

0 comments on commit 52e0d2b

Please sign in to comment.