Traceback (most recent call last):
File "finetune.py", line 367, in <module>
train()
File "finetune.py", line 345, in train
model = get_peft_model(model, lora_config)
File "/ssd1/miniconda3/envs/pytorch2.1.2/lib/python3.8/site-packages/peft/mapping.py", line 133, in get_peft_model
return MODEL_TYPE_TO_PEFT_MODEL_MAPPING[peft_config.task_type](model, peft_config, adapter_name=adapter_name)
File "/ssd1/miniconda3/envs/pytorch2.1.2/lib/python3.8/site-packages/peft/peft_model.py", line 1043, in __init__
super().__init__(model, peft_config, adapter_name)
File "/ssd1/miniconda3/envs/pytorch2.1.2/lib/python3.8/site-packages/peft/peft_model.py", line 125, in __init__
self.base_model = cls(model, {adapter_name: peft_config}, adapter_name)
File "/ssd1/miniconda3/envs/pytorch2.1.2/lib/python3.8/site-packages/peft/tuners/lora/model.py", line 111, in __init__
super().__init__(model, config, adapter_name)
File "/ssd1/miniconda3/envs/pytorch2.1.2/lib/python3.8/site-packages/peft/tuners/tuners_utils.py", line 90, in __init__
self.inject_adapter(self.model, adapter_name)
File "/ssd1/miniconda3/envs/pytorch2.1.2/lib/python3.8/site-packages/peft/tuners/tuners_utils.py", line 247, in inject_adapter
self._create_and_replace(peft_config, adapter_name, target, target_name, parent, **optional_kwargs)
File "/ssd1/miniconda3/envs/pytorch2.1.2/lib/python3.8/site-packages/peft/tuners/lora/model.py", line 202, in _create_and_replace
new_module = self._create_new_module(lora_config, adapter_name, target, **kwargs)
File "/ssd1/miniconda3/envs/pytorch2.1.2/lib/python3.8/site-packages/peft/tuners/lora/model.py", line 268, in _create_new_module
AutoGPTQQuantLinear = get_auto_gptq_quant_linear(gptq_quantization_config)
File "/ssd1/miniconda3/envs/pytorch2.1.2/lib/python3.8/site-packages/peft/utils/other.py", line 449, in get_auto_gptq_quant_linear
AutoGPTQQuantLinear = dynamically_import_QuantLinear(
TypeError: dynamically_import_QuantLinear() got an unexpected keyword argument 'disable_exllamav2'