We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 73b5f43 commit fc94685Copy full SHA for fc94685
vllm/platforms/cuda.py
@@ -64,7 +64,7 @@ def supported_dtypes(self) -> list[torch.dtype]:
64
if self.has_device_capability(80):
65
# Ampere and Hopper or later NVIDIA GPUs.
66
return [torch.bfloat16, torch.float16, torch.float32]
67
- elif self.has_device_capability(60):
+ if self.has_device_capability(60):
68
# Pascal, Volta and Turing NVIDIA GPUs, BF16 is not supported
69
return [torch.float16, torch.float32]
70
# Kepler and Maxwell NVIDIA GPUs, only FP32 is supported,
0 commit comments