mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
# Motivation According to [[1/4] Intel GPU Runtime Upstreaming for Device](https://github.com/pytorch/pytorch/pull/116019), As mentioned in [[RFC] Intel GPU Runtime Upstreaming](https://github.com/pytorch/pytorch/issues/114842), this third PR covers the changes under `libtorch_python`. # Design This PR primarily offers device-related APIs in python frontend, including - `torch.xpu.is_available` - `torch.xpu.device_count` - `torch.xpu.current_device` - `torch.xpu.set_device` - `torch.xpu.device` - `torch.xpu.device_of` - `torch.xpu.get_device_name` - `torch.xpu.get_device_capability` - `torch.xpu.get_device_properties` - ==================== - `torch.xpu._DeviceGuard` - `torch.xpu._is_compiled` - `torch.xpu._get_device` # Additional Context We will implement the support of lazy initialization in the next PR. Pull Request resolved: https://github.com/pytorch/pytorch/pull/116850 Approved by: https://github.com/EikanWang, https://github.com/jgong5, https://github.com/gujinghui, https://github.com/malfet
56 lines
2.0 KiB
Python
56 lines
2.0 KiB
Python
from typing import Any
|
|
|
|
import torch
|
|
|
|
# The _get_device_index has been moved to torch.utils._get_device_index
|
|
from torch._utils import _get_device_index as _torch_get_device_index
|
|
|
|
|
|
def _get_device_index(
|
|
device: Any, optional: bool = False, allow_cpu: bool = False
|
|
) -> int:
|
|
r"""Get the device index from :attr:`device`, which can be a torch.device
|
|
object, a Python integer, or ``None``.
|
|
|
|
If :attr:`device` is a torch.device object, returns the device index if it
|
|
is a XPU device. Note that for a XPU device without a specified index,
|
|
i.e., ``torch.device('xpu')``, this will return the current default XPU
|
|
device if :attr:`optional` is ``True``. If :attr:`allow_cpu` is ``True``,
|
|
CPU devices will be accepted and ``-1`` will be returned in this case.
|
|
|
|
If :attr:`device` is a Python integer, it is returned as is.
|
|
|
|
If :attr:`device` is ``None``, this will return the current default XPU
|
|
device if :attr:`optional` is ``True``.
|
|
"""
|
|
if isinstance(device, int):
|
|
return device
|
|
if isinstance(device, str):
|
|
device = torch.device(device)
|
|
if isinstance(device, torch.device):
|
|
if allow_cpu:
|
|
if device.type not in ["xpu", "cpu"]:
|
|
raise ValueError(f"Expected a xpu or cpu device, but got: {device}")
|
|
elif device.type != "xpu":
|
|
raise ValueError(f"Expected a xpu device, but got: {device}")
|
|
if not torch.jit.is_scripting():
|
|
if isinstance(device, torch.xpu.device):
|
|
return device.idx
|
|
return _torch_get_device_index(device, optional, allow_cpu)
|
|
|
|
|
|
def _dummy_type(name: str) -> type:
|
|
def get_err_fn(is_init: bool):
|
|
def err_fn(obj, *args, **kwargs):
|
|
if is_init:
|
|
class_name = obj.__class__.__name__
|
|
else:
|
|
class_name = obj.__name__
|
|
raise RuntimeError(f"Tried to instantiate dummy base class {class_name}")
|
|
|
|
return err_fn
|
|
|
|
return type(
|
|
name, (object,), {"__init__": get_err_fn(True), "__new__": get_err_fn(False)}
|
|
)
|