We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 17f8a21 commit 1d36999Copy full SHA for 1d36999
vllm/platforms/cuda.py
@@ -64,8 +64,7 @@ def supported_dtypes(self) -> list[torch.dtype]:
64
if self.has_device_capability(80):
65
# Ampere and Hopper or later NVIDIA GPUs.
66
return [torch.bfloat16, torch.float16, torch.float32]
67
- elif (not self.has_device_capability(80)
68
- ) and self.has_device_capability(60):
+ if self.has_device_capability(60):
69
# Pascal, Volta and Turing NVIDIA GPUs, BF16 is not supported
70
return [torch.float16, torch.float32]
71
# Kepler and Maxwell NVIDIA GPUs, only FP32 is supported,
0 commit comments