Skip to content

Commit

Permalink
Correct type hint in functional.py (#992)
Browse files Browse the repository at this point in the history
  • Loading branch information
sxndqc authored Jan 28, 2024
1 parent e651e8e commit 8ddfda1
Showing 1 changed file with 1 addition and 1 deletion.
2 changes: 1 addition & 1 deletion bitsandbytes/functional.py
Original file line number Diff line number Diff line change
Expand Up @@ -890,7 +890,7 @@ def quantize_fp4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize
def quantize_nf4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False, quant_storage=torch.uint8):
return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'nf4', quant_storage)

def quantize_4bit(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False, quant_type='fp4', quant_storage=torch.uint8) -> Tensor:
def quantize_4bit(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False, quant_type='fp4', quant_storage=torch.uint8) -> (Tensor, QuantState):
"""
Quantize tensor A in blocks of 4-bit values.
Expand Down

0 comments on commit 8ddfda1

Please sign in to comment.