diff --git a/src/autotrain/cli/run_llm.py b/src/autotrain/cli/run_llm.py index c8816b5060..d76d28c0b7 100644 --- a/src/autotrain/cli/run_llm.py +++ b/src/autotrain/cli/run_llm.py @@ -407,10 +407,16 @@ def __init__(self, args): break print(f"Bot: {tgi.chat(prompt)}") - if not torch.cuda.is_available(): - raise ValueError("No GPU found. Please install CUDA and try again.") + cuda_available = torch.cuda.is_available() + mps_available = torch.backends.mps.is_available() - self.num_gpus = torch.cuda.device_count() + if not cuda_available and not mps_available: + raise ValueError("No GPU/MPS device found. LLM training requires an accelerator") + + if cuda_available: + self.num_gpus = torch.cuda.device_count() + elif mps_available: + self.num_gpus = 1 def run(self): from autotrain.backend import EndpointsRunner, SpaceRunner diff --git a/src/autotrain/trainers/clm/__main__.py b/src/autotrain/trainers/clm/__main__.py index 853c12a1b6..177142e690 100644 --- a/src/autotrain/trainers/clm/__main__.py +++ b/src/autotrain/trainers/clm/__main__.py @@ -114,17 +114,19 @@ def train(config): bnb_4bit_compute_dtype=torch.float16, bnb_4bit_use_double_quant=False, ) + config.fp16 = True elif config.use_int8: bnb_config = BitsAndBytesConfig(load_in_8bit=config.use_int8) + config.fp16 = True else: - bnb_config = BitsAndBytesConfig() + bnb_config = None model = AutoModelForCausalLM.from_pretrained( config.model, config=model_config, token=config.token, quantization_config=bnb_config, - torch_dtype=torch.float16, + torch_dtype=torch.float16 if config.fp16 else torch.float32, device_map={"": Accelerator().process_index} if torch.cuda.is_available() else None, trust_remote_code=True, use_flash_attention_2=config.use_flash_attention_2,