Skip to content

Commit 2b0f78b

Browse files
committed
comma
1 parent 7cac178 commit 2b0f78b

File tree

2 files changed

+12
-8
lines changed

2 files changed

+12
-8
lines changed

torchao/quantization/__init__.py

+12-7
Original file line numberDiff line numberDiff line change
@@ -56,9 +56,17 @@
5656
from .quant_primitives import (
5757
MappingType,
5858
ZeroPointDomain,
59+
TorchAODType,
5960
choose_qparams_affine,
60-
dequantize_affine,
6161
quantize_affine,
62+
dequantize_affine,
63+
choose_qparams_affine_floatx,
64+
choose_qparams_affine_with_min_max,
65+
quantize_affine_floatx,
66+
dequantize_affine_floatx,
67+
fake_quantize_affine,
68+
fake_quantize_affine_cachemask,
69+
choose_qparams_and_quantize_affine_hqq,
6270
)
6371
from .smoothquant import (
6472
SmoothFakeDynamicallyQuantizedLinear,
@@ -74,9 +82,6 @@
7482
compute_error,
7583
)
7684
from .weight_only import WeightOnlyInt8QuantLinear
77-
from .linear_activation_weight_observed_tensor import (
78-
to_linear_activation_weight_observed,
79-
)
8085

8186
__all__ = [
8287
# top level API - auto
@@ -94,12 +99,12 @@
9499
"int8_weight_only",
95100
"float8_weight_only",
96101
"float8_dynamic_activation_float8_weight",
97-
"float8_static_activation_float8_weight"
102+
"float8_static_activation_float8_weight",
98103
"uintx_weight_only",
99104
"fpx_weight_only",
100105

101106
# smooth quant - subject to change
102-
"swap_conv2d_1x1_to_linear"
107+
"swap_conv2d_1x1_to_linear",
103108
"get_scale",
104109
"SmoothFakeDynQuantMixin",
105110
"SmoothFakeDynamicallyQuantizedLinear",
@@ -115,7 +120,7 @@
115120
"AffineQuantizedObserverBase",
116121

117122
# quant primitive ops
118-
"choose_qprams_affine",
123+
"choose_qparams_affine",
119124
"choose_qparams_affine_with_min_max",
120125
"choose_qparams_affine_floatx",
121126
"quantize_affine",

torchao/quantization/quant_primitives.py

-1
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@
1010

1111
import torch
1212

13-
from torchao.kernel.intmm import int_scaled_matmul, safe_int_mm
1413
from torchao.prototype.custom_fp_utils import (
1514
_f32_to_floatx_unpacked,
1615
_floatx_unpacked_to_f32,

0 commit comments

Comments
 (0)