|
| 1 | +import torch |
| 2 | + |
| 3 | +from .subclass import ( # noqa |
| 4 | + Int8DynamicallyQuantizedLinearWeight, |
| 5 | + Int8WeightOnlyQuantizedLinearWeight, |
| 6 | + QuantizedLinearWeightBase, |
| 7 | +) |
| 8 | +from torch.utils._python_dispatch import return_and_correct_aliasing |
| 9 | +from .utils import benchmark |
| 10 | + |
| 11 | +aten = torch.ops.aten |
| 12 | + |
| 13 | +AUTOQUANT_CACHE = {} |
| 14 | + |
| 15 | +def check_cache(shape, cls): |
| 16 | + if shape in AUTOQUANT_CACHE: |
| 17 | + return AUTOQUANT_CACHE[shape].get(cls, None) |
| 18 | + else: |
| 19 | + return None |
| 20 | + |
| 21 | +def update_cache(shape, cls, res): |
| 22 | + if not shape in AUTOQUANT_CACHE: |
| 23 | + AUTOQUANT_CACHE[shape] = {} |
| 24 | + AUTOQUANT_CACHE[shape][cls] = res |
| 25 | + |
| 26 | +class AutoQuantizableLinearWeight(torch.Tensor): |
| 27 | + """ |
| 28 | + when run, finds best type of quantization for this tensor and swaps itself with that |
| 29 | + """ |
| 30 | + @staticmethod |
| 31 | + def __new__(cls, weight, qtensor_class_list, *args, **kwargs): |
| 32 | + kwargs["device"] = weight.device |
| 33 | + kwargs["layout"] = ( |
| 34 | + kwargs.get("layout") if kwargs.get("layout", False) else weight.layout |
| 35 | + ) |
| 36 | + kwargs["dtype"] = ( |
| 37 | + kwargs.get("dtype") if kwargs.get("dtype", False) else weight.dtype |
| 38 | + ) |
| 39 | + kwargs["requires_grad"] = False |
| 40 | + shape = kwargs.pop("shape", weight.shape) |
| 41 | + return torch.Tensor._make_wrapper_subclass(cls, shape, **kwargs) # type: ignore[attr-defined] |
| 42 | + |
| 43 | + def __init__(self, weight, qtensor_class_list, *args, **kwargs): |
| 44 | + self.weight = weight |
| 45 | + self.qtensor_class_list = qtensor_class_list |
| 46 | + self.cache_shape = None |
| 47 | + |
| 48 | + def __repr__(self): |
| 49 | + return ( |
| 50 | + f"{self.__class__.__name__}(data={self.weight}, shape={self.shape}, " |
| 51 | + f"device={self.device}, dtype={self.dtype}, qtensor_class_list={self.qtensor_class_list})" |
| 52 | + ) |
| 53 | + |
| 54 | + @staticmethod |
| 55 | + def tune_autoquant(act_mat, w_autoquant, bias): |
| 56 | + orig_shape = act_mat.shape |
| 57 | + act_mat = act_mat.reshape(-1, act_mat.shape[-1]) |
| 58 | + cache_shape = (act_mat.shape, w_autoquant.shape, None if bias is None else bias.shape) |
| 59 | + w_autoquant.cache_shape = cache_shape |
| 60 | + for cur_cls in w_autoquant.qtensor_class_list: |
| 61 | + if check_cache(cache_shape, cur_cls) is None: |
| 62 | + with torch.no_grad(): |
| 63 | + print(cur_cls, cache_shape) |
| 64 | + print(torch.cuda.max_memory_allocated()/1e6, torch.cuda.memory_usage()) |
| 65 | + res = cur_cls._autoquant_test(act_mat.clone(), w_autoquant.weight.clone(), None if bias is None else bias.clone()) |
| 66 | + update_cache(cache_shape, cur_cls, res) |
| 67 | + print(torch.cuda.max_memory_allocated()/1e6, torch.cuda.memory_usage()) |
| 68 | + y = torch.mm(act_mat, w_autoquant.weight.t()) |
| 69 | + y = y.reshape(*orig_shape[:-1], y.shape[-1]) |
| 70 | + if bias is not None: |
| 71 | + y += bias |
| 72 | + return y |
| 73 | + |
| 74 | + def to_quantized(self): |
| 75 | + if self.cache_shape is None or self.cache_shape not in AUTOQUANT_CACHE: |
| 76 | + raise RuntimeError("must run module normally to find best quantization option") |
| 77 | + best_time = torch.inf |
| 78 | + best_cls = None |
| 79 | + for cur_cls in self.qtensor_class_list: |
| 80 | + cls_res = AUTOQUANT_CACHE[self.cache_shape].get(cur_cls, torch.inf) |
| 81 | + if best_time >= cls_res: |
| 82 | + best_time = cls_res |
| 83 | + best_cls = cur_cls |
| 84 | + # need to handle random cls args/kwargs? |
| 85 | + self = best_cls.from_float(self.weight) |
| 86 | + return self |
| 87 | + |
| 88 | + def _apply_fn_to_data(self, fn): |
| 89 | + return self.__class__( |
| 90 | + fn(self.weight), self.qtensor_class_list, dtype=self.dtype |
| 91 | + ) |
| 92 | + |
| 93 | + def __tensor_flatten__(self): |
| 94 | + return ["weight"], [self.qtensor_class_list, self.dtype, self.shape] |
| 95 | + |
| 96 | + @classmethod |
| 97 | + def __tensor_unflatten__(cls, tensor_data_dict, tensor_attributes, outer_size=None, outer_stride=None): |
| 98 | + weight = tensor_data_dict["weight"] |
| 99 | + qtensor_class_list, dtype, shape = tensor_attributes[0] |
| 100 | + return cls(weight, qtensor_class_list, shape=shape if outer_size is None else outer_size, dtype=dtype, strides=outer_stride) |
| 101 | + |
| 102 | + @classmethod |
| 103 | + def from_float(cls, weight, qtensor_class_list): |
| 104 | + return cls(weight, qtensor_class_list) |
| 105 | + |
| 106 | + @classmethod |
| 107 | + def __torch_function__(cls, func, types, args=(), kwargs=None): |
| 108 | + kwargs = {} if kwargs is None else kwargs |
| 109 | + |
| 110 | + if func is torch.nn.functional.linear: |
| 111 | + mat1, w_autoquant, bias = ( |
| 112 | + args[0], |
| 113 | + args[1], |
| 114 | + args[2] if len(args)>2 else None |
| 115 | + ) |
| 116 | + return cls.tune_autoquant(mat1, w_autoquant, bias) |
| 117 | + |
| 118 | + try: |
| 119 | + with torch._C.DisableTorchFunctionSubclass(): |
| 120 | + return func(*args, **kwargs) |
| 121 | + except: |
| 122 | + print(f"ERR: subclass doesn't implement {func}") |
| 123 | + |
| 124 | + @classmethod |
| 125 | + def __torch_dispatch__(cls, func, types, args, kwargs): |
| 126 | + if func is aten.detach.default: |
| 127 | + return return_and_correct_aliasing(func, args, kwargs, args[0]._apply_fn_to_data(torch.detach)) |
| 128 | + |
| 129 | + |
| 130 | +class DefaultLinear(torch.Tensor): |
| 131 | + """ |
| 132 | + An class to be used in concert with AutoQuantizableLinearWeight to provide a |
| 133 | + default/non-quantized option. Only implements the bare minimum needed to work with the |
| 134 | + AutoQuantizableLinearWeight class using the same interfaces that would normally be |
| 135 | + used by QTensor subclasses but for a default linear op instead. |
| 136 | + """ |
| 137 | + def __init__(self): |
| 138 | + super().__init__() |
| 139 | + |
| 140 | + @classmethod |
| 141 | + def _autoquant_test(cls, act_mat, weight, bias): |
| 142 | + w_qtensor = cls.from_float(weight) |
| 143 | + q_c_op = torch.compile(cls._quantized_op, mode="max-autotune") |
| 144 | + with torch.no_grad(): |
| 145 | + res=benchmark(q_c_op, act_mat, w_qtensor, bias) |
| 146 | + print(cls, res) |
| 147 | + return res |
| 148 | + |
| 149 | + @staticmethod |
| 150 | + def _quantized_op(act_mat, w_qtensor, bias): |
| 151 | + return torch.nn.functional.linear(act_mat, w_qtensor, bias) |
| 152 | + |
| 153 | + @classmethod |
| 154 | + def from_float(cls, weight): |
| 155 | + return weight |
| 156 | + |
| 157 | +DEFAULT_CLASS_LIST = [ |
| 158 | + DefaultLinear, |
| 159 | + Int8WeightOnlyQuantizedLinearWeight, |
| 160 | + Int8DynamicallyQuantizedLinearWeight, |
| 161 | +] |
| 162 | + |
| 163 | +if False: |
| 164 | + # def _get_to_kwargs(self, *args, **kwargs): |
| 165 | + # device, dtype, _, memory_format = torch._C._nn._parse_to(*args, **kwargs) |
| 166 | + # device = self.device if device is None else device |
| 167 | + # dtype = self.dtype if dtype is None else dtype |
| 168 | + # memory_format = ( |
| 169 | + # memory_format if memory_format is not None else torch.preserve_format |
| 170 | + # ) |
| 171 | + # kwargs = { |
| 172 | + # "device": device, |
| 173 | + # "dtype": dtype, |
| 174 | + # "memory_format": memory_format, |
| 175 | + # } |
| 176 | + # return kwargs |
| 177 | + |
| 178 | + # def to(self, *args, **kwargs): |
| 179 | + # kwargs = self._get_to_kwargs(*args, **kwargs) |
| 180 | + # return self.__class__( |
| 181 | + # self.int_data.to(kwargs["device"]), |
| 182 | + # self.q_scales.to(kwargs["device"]), |
| 183 | + # self.transposed, |
| 184 | + # self.shape, |
| 185 | + # **kwargs, |
| 186 | + # ) |
| 187 | + |
| 188 | + # def _apply_fn_to_data(self, fn): |
| 189 | + # return self.__class__( |
| 190 | + # fn(self.int_data), fn(self.q_scales), self.transposed, self.shape, dtype=self.dtype |
| 191 | + # ) |
| 192 | + |
| 193 | + # def _change_shape(self, shape): |
| 194 | + # return self.__class__( |
| 195 | + # self.int_data, self.q_scales, self.transposed, shape, dtype=self.dtype |
| 196 | + # ) |
| 197 | + |
| 198 | + # def half(self): |
| 199 | + # return self.to(torch.float16) |
| 200 | + pass |
0 commit comments