|
11 | 11 | dynamo_tensorrt_converter,
|
12 | 12 | )
|
13 | 13 | from torch_tensorrt.fx.types import TRTTensor
|
| 14 | +from torch_tensorrt.fx.utils import Frameworks, unified_dtype_converter |
14 | 15 |
|
15 | 16 | _LOGGER: logging.Logger = logging.getLogger(__name__)
|
16 | 17 |
|
@@ -47,3 +48,87 @@ def aten_ops_arange_start_step(
|
47 | 48 | name: str,
|
48 | 49 | ) -> Union[TRTTensor, Sequence[TRTTensor]]:
|
49 | 50 | return np.arange(*args)
|
| 51 | + |
| 52 | + |
| 53 | +def empty_validator(empty_node: Node) -> bool: |
| 54 | + layout = empty_node.kwargs.get("layout", None) |
| 55 | + pin_memory = empty_node.kwargs.get("pin_memory", None) |
| 56 | + memory_format = empty_node.kwargs.get("memory_format", None) |
| 57 | + if layout is not None: |
| 58 | + _LOGGER.debug(f"Currently we don't support specifying layout, got {layout}.") |
| 59 | + return False |
| 60 | + if pin_memory is not None: |
| 61 | + _LOGGER.debug( |
| 62 | + f"Currently we don't support specifying pin_memory, got {pin_memory}." |
| 63 | + ) |
| 64 | + return False |
| 65 | + if memory_format is not None: |
| 66 | + _LOGGER.debug( |
| 67 | + f"Currently we don't support specifying layout, got {memory_format}." |
| 68 | + ) |
| 69 | + return False |
| 70 | + return True |
| 71 | + |
| 72 | + |
| 73 | +@dynamo_tensorrt_converter( |
| 74 | + torch.ops.aten.empty.memory_format, capability_validator=empty_validator |
| 75 | +) |
| 76 | +def aten_ops_empty( |
| 77 | + ctx: ConversionContext, |
| 78 | + target: Target, |
| 79 | + args: Tuple[Argument, ...], |
| 80 | + kwargs: Dict[str, Argument], |
| 81 | + name: str, |
| 82 | +) -> Union[TRTTensor, Sequence[TRTTensor]]: |
| 83 | + if kwargs.get("device") is not None: |
| 84 | + return np.empty(*args[0], dtype=kwargs.get("dtype")).to( |
| 85 | + device=kwargs.get("device") |
| 86 | + ) |
| 87 | + return np.empty( |
| 88 | + *args[0], dtype=unified_dtype_converter(kwargs.get("dtype"), Frameworks.NUMPY) |
| 89 | + ) |
| 90 | + |
| 91 | + |
| 92 | +def empty_validator(empty_node: Node) -> bool: |
| 93 | + layout = empty_node.kwargs.get("layout", None) |
| 94 | + if layout is not None: |
| 95 | + _LOGGER.debug(f"Currently we don't support specifying layout, got {layout}.") |
| 96 | + return False |
| 97 | + return True |
| 98 | + |
| 99 | + |
| 100 | +@dynamo_tensorrt_converter( |
| 101 | + torch.ops.aten.empty.memory_format, capability_validator=empty_validator |
| 102 | +) |
| 103 | +def aten_ops_empty( |
| 104 | + ctx: ConversionContext, |
| 105 | + target: Target, |
| 106 | + args: Tuple[Argument, ...], |
| 107 | + kwargs: Dict[str, Argument], |
| 108 | + name: str, |
| 109 | +) -> Union[TRTTensor, Sequence[TRTTensor]]: |
| 110 | + empty_np_tensor = None |
| 111 | + memory_format = kwargs.get("memory_format") |
| 112 | + if kwargs.get("dtype") is not None: |
| 113 | + empty_np_tensor = np.empty( |
| 114 | + tuple(args[0]), |
| 115 | + dtype=unified_dtype_converter(kwargs.get("dtype"), Frameworks.NUMPY), |
| 116 | + ) |
| 117 | + else: |
| 118 | + # default returns np.float64. Verify the correctness of this |
| 119 | + empty_np_tensor = np.empty(tuple(args[0])) |
| 120 | + |
| 121 | + empty_tensor = torch.Tensor(empty_np_tensor) |
| 122 | + # device |
| 123 | + if kwargs.get("device") is not None: |
| 124 | + empty_tensor = empty_tensor.to(device=kwargs.get("device")) |
| 125 | + |
| 126 | + # memory_format. default is torch.contiguous_format |
| 127 | + if memory_format == torch.channels_last: |
| 128 | + # shape of args[0] must be 4 |
| 129 | + empty_tensor = empty_tensor.to(memory_format=torch.channels_last) |
| 130 | + elif memory_format == torch.channels_last_3d: |
| 131 | + # shape of args[0] must be 5 |
| 132 | + empty_tensor = empty_tensor.to(memory_format=torch.channels_last_3d) |
| 133 | + |
| 134 | + return empty_tensor |
0 commit comments