Skip to content

Commit

Permalink
Merge branch 'master' into add-mean-keyword-to-std-var
Browse files Browse the repository at this point in the history
  • Loading branch information
antonwolfy authored Jan 22, 2025
2 parents f40dbba + 451c2b3 commit 46c14cb
Show file tree
Hide file tree
Showing 15 changed files with 59 additions and 60 deletions.
2 changes: 1 addition & 1 deletion dpnp/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@
# are not installed under any of default paths where Python is searching.
from platform import system

if system() == "Windows":
if system() == "Windows": # pragma: no cover
if hasattr(os, "add_dll_directory"):
os.add_dll_directory(mypath)
os.add_dll_directory(dpctlpath)
Expand Down
2 changes: 1 addition & 1 deletion dpnp/dpnp_iface.py
Original file line number Diff line number Diff line change
Expand Up @@ -712,7 +712,7 @@ def is_cuda_backend(obj=None):
if (
sycl_device is not None
and sycl_device.backend == dpctl.backend_type.cuda
):
): # pragma: no cover
return True
return False

Expand Down
6 changes: 3 additions & 3 deletions dpnp/dpnp_iface_histograms.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def _align_dtypes(a_dtype, bins_dtype, ntype, supported_types, device):
return sample_type, hist_type

# should not happen
return None, None
return None, None # pragma: no cover


def _ravel_check_a_and_weights(a, weights):
Expand Down Expand Up @@ -392,7 +392,7 @@ def bincount(x, weights=None, minlength=None):
x.dtype, x.dtype, ntype, supported_types, device
)

if x_casted_dtype is None or ntype_casted is None:
if x_casted_dtype is None or ntype_casted is None: # pragma: no cover
raise ValueError(
f"function '{bincount}' does not support input types "
f"({x.dtype}, {ntype}), "
Expand Down Expand Up @@ -607,7 +607,7 @@ def histogram(a, bins=10, range=None, density=None, weights=None):
a.dtype, bin_edges.dtype, ntype, supported_types, device
)

if a_bin_dtype is None or hist_dtype is None:
if a_bin_dtype is None or hist_dtype is None: # pragma: no cover
raise ValueError(
f"function '{histogram}' does not support input types "
f"({a.dtype}, {bin_edges.dtype}, {ntype}), "
Expand Down
2 changes: 1 addition & 1 deletion dpnp/dpnp_iface_indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ def choose(x1, choices, out=None, mode="raise"):
)

if x1_desc:
if dpnp.is_cuda_backend(x1_desc.get_array()):
if dpnp.is_cuda_backend(x1_desc.get_array()): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down
2 changes: 1 addition & 1 deletion dpnp/dpnp_iface_libmath.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def erf(in_array1):
in_array1, copy_when_strides=False, copy_when_nondefault_queue=False
)
if x1_desc:
if dpnp.is_cuda_backend(x1_desc.get_array()):
if dpnp.is_cuda_backend(x1_desc.get_array()): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down
2 changes: 1 addition & 1 deletion dpnp/dpnp_iface_mathematical.py
Original file line number Diff line number Diff line change
Expand Up @@ -2946,7 +2946,7 @@ def modf(x1, **kwargs):

x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False)
if x1_desc:
if dpnp.is_cuda_backend(x1_desc.get_array()):
if dpnp.is_cuda_backend(x1_desc.get_array()): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down
2 changes: 1 addition & 1 deletion dpnp/dpnp_iface_sorting.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ def partition(x1, kth, axis=-1, kind="introselect", order=None):

x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False)
if x1_desc:
if dpnp.is_cuda_backend(x1_desc.get_array()):
if dpnp.is_cuda_backend(x1_desc.get_array()): # pragma: no cover
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
Expand Down
4 changes: 2 additions & 2 deletions dpnp/dpnp_iface_statistics.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ def _count_reduce_items(arr, axis, where=True):
for ax in axis:
items *= arr.shape[normalize_axis_index(ax, arr.ndim)]
items = dpnp.intp(items)
else:
else: # pragma: no cover
raise NotImplementedError(
"where keyword argument is only supported with its default value."
)
Expand Down Expand Up @@ -614,7 +614,7 @@ def correlate(a, v, mode="valid"):
rdtype = result_type_for_device([a.dtype, v.dtype], device)
supported_dtype = to_supported_dtypes(rdtype, supported_types, device)

if supported_dtype is None:
if supported_dtype is None: # pragma: no cover
raise ValueError(
f"function does not support input types "
f"({a.dtype.name}, {v.dtype.name}), "
Expand Down
2 changes: 1 addition & 1 deletion dpnp/dpnp_utils/dpnp_utils_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,4 +78,4 @@ def is_castable(dtype, stype):
):
return stypes

return None
return None # pragma: no cover
4 changes: 2 additions & 2 deletions dpnp/linalg/dpnp_utils_linalg.py
Original file line number Diff line number Diff line change
Expand Up @@ -401,7 +401,7 @@ def _batched_qr(a, mode="reduced"):
# w/a to avoid raice conditional on CUDA during multiple runs
# TODO: Remove it ones the OneMath issue is resolved
# https://github.com/uxlfoundation/oneMath/issues/626
if dpnp.is_cuda_backend(a_sycl_queue):
if dpnp.is_cuda_backend(a_sycl_queue): # pragma: no cover
ht_ev.wait()
else:
_manager.add_event_pair(ht_ev, geqrf_ev)
Expand Down Expand Up @@ -2479,7 +2479,7 @@ def dpnp_qr(a, mode="reduced"):
# w/a to avoid raice conditional on CUDA during multiple runs
# TODO: Remove it ones the OneMath issue is resolved
# https://github.com/uxlfoundation/oneMath/issues/626
if dpnp.is_cuda_backend(a_sycl_queue):
if dpnp.is_cuda_backend(a_sycl_queue): # pragma: no cover
ht_ev.wait()
else:
_manager.add_event_pair(ht_ev, geqrf_ev)
Expand Down
Loading

0 comments on commit 46c14cb

Please sign in to comment.