Skip to content

Commit 336740e

Browse files
committed
Add Torch 2.9 options
1 parent 3462703 commit 336740e

File tree

2 files changed

+60
-10
lines changed

2 files changed

+60
-10
lines changed

‎pyproject.toml‎

Lines changed: 57 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ huggingfacenotorch = [
5959
]
6060
huggingface = [
6161
"unsloth[huggingfacenotorch]",
62-
"unsloth_zoo>=2025.10.10",
62+
"unsloth_zoo>=2025.10.11",
6363
"torchvision",
6464
"unsloth[triton]",
6565
]
@@ -269,6 +269,14 @@ cu128onlytorch280 = [
269269
"xformers @ https://download.pytorch.org/whl/cu129/xformers-0.0.32.post2-cp39-abi3-manylinux_2_28_x86_64.whl ; ('linux' in sys_platform)",
270270
"xformers @ https://download.pytorch.org/whl/cu129/xformers-0.0.32.post2-cp39-abi3-win_amd64.whl ; (sys_platform == 'win32')",
271271
]
272+
cu130onlytorch280 = [
273+
]
274+
cu126onlytorch290 = [
275+
]
276+
cu128onlytorch290 = [
277+
]
278+
cu130onlytorch290 = [
279+
]
272280
cu118 = [
273281
"unsloth[huggingface]",
274282
"bitsandbytes>=0.45.5,!=0.46.0,!=0.48.0",
@@ -424,6 +432,26 @@ cu128-torch280 = [
424432
"bitsandbytes>=0.45.5,!=0.46.0,!=0.48.0",
425433
"unsloth[cu128onlytorch280]",
426434
]
435+
cu130-torch280 = [
436+
"unsloth[huggingface]",
437+
"bitsandbytes>=0.45.5,!=0.46.0,!=0.48.0",
438+
"unsloth[cu130onlytorch280]",
439+
]
440+
cu126-torch290 = [
441+
"unsloth[huggingface]",
442+
"bitsandbytes>=0.45.5,!=0.46.0,!=0.48.0",
443+
"unsloth[cu126onlytorch290]",
444+
]
445+
cu128-torch290 = [
446+
"unsloth[huggingface]",
447+
"bitsandbytes>=0.45.5,!=0.46.0,!=0.48.0",
448+
"unsloth[cu128onlytorch290]",
449+
]
450+
cu130-torch290 = [
451+
"unsloth[huggingface]",
452+
"bitsandbytes>=0.45.5,!=0.46.0,!=0.48.0",
453+
"unsloth[cu130onlytorch290]",
454+
]
427455
kaggle = [
428456
"unsloth[huggingface]",
429457
]
@@ -461,7 +489,7 @@ colab-ampere-torch220 = [
461489
"flash-attn>=2.6.3 ; ('linux' in sys_platform)",
462490
]
463491
colab-new = [
464-
"unsloth_zoo>=2025.10.10",
492+
"unsloth_zoo>=2025.10.11",
465493
"packaging",
466494
"tyro",
467495
"transformers>=4.51.3,!=4.52.0,!=4.52.1,!=4.52.2,!=4.52.3,!=4.53.0,!=4.54.0,!=4.55.0,!=4.55.1,<=4.56.2",
@@ -671,6 +699,27 @@ cu128-ampere-torch280 = [
671699
"unsloth[cu128onlytorch280]",
672700
"unsloth[flashattention]",
673701
]
702+
cu130-ampere-torch280 = [
703+
"unsloth[huggingface]",
704+
"bitsandbytes>=0.45.5,!=0.46.0,!=0.48.0",
705+
"unsloth[cu130onlytorch280]",
706+
"unsloth[flashattention]",
707+
]
708+
cu126-ampere-torch290 = [
709+
"unsloth[huggingface]",
710+
"bitsandbytes>=0.45.5,!=0.46.0,!=0.48.0",
711+
"unsloth[cu126onlytorch290]",
712+
]
713+
cu128-ampere-torch290 = [
714+
"unsloth[huggingface]",
715+
"bitsandbytes>=0.45.5,!=0.46.0,!=0.48.0",
716+
"unsloth[cu128onlytorch290]",
717+
]
718+
cu130-ampere-torch290 = [
719+
"unsloth[huggingface]",
720+
"bitsandbytes>=0.45.5,!=0.46.0,!=0.48.0",
721+
"unsloth[cu130onlytorch290]",
722+
]
674723
flashattentiontorch260abiFALSEcu12x = [
675724
"flash-attn @ https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp39-cp39-linux_x86_64.whl ; ('linux' in sys_platform) and python_version == '3.9'",
676725
"flash-attn @ https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp310-cp310-linux_x86_64.whl ; ('linux' in sys_platform) and python_version == '3.10'",
@@ -743,12 +792,6 @@ intel-gpu-torch270 = [
743792
"torch @ https://download.pytorch.org/whl/xpu/torch-2.7.0%2Bxpu-cp312-cp312-linux_x86_64.whl#sha256=c806d44aa2ca5d225629f6fbc6c994d5deaac2d2cde449195bc8e3522ddd219a ; ('linux' in sys_platform) and python_version == '3.12' and (platform_machine == 'AMD64' or platform_machine == 'x86_64')",
744793
"torch @ https://download.pytorch.org/whl/xpu/torch-2.7.0%2Bxpu-cp313-cp313-linux_x86_64.whl#sha256=25d8277b7f01d42e2e014ccbab57a2692b6ec4eff8dcf894eda1b297407cf97a ; ('linux' in sys_platform) and python_version == '3.13' and (platform_machine == 'AMD64' or platform_machine == 'x86_64')",
745794
]
746-
amd = [
747-
"unsloth[huggingfacenotorch]",
748-
"bitsandbytes @ https://github.com/bitsandbytes-foundation/bitsandbytes/releases/download/continuous-release_main/bitsandbytes-1.33.7.preview-py3-none-manylinux_2_24_x86_64.whl ; ('linux' in sys_platform) and (platform_machine == 'AMD64' or platform_machine == 'x86_64')",
749-
"bitsandbytes @ https://github.com/bitsandbytes-foundation/bitsandbytes/releases/download/continuous-release_main/bitsandbytes-1.33.7.preview-py3-none-win_amd64.whl ; (sys_platform == 'win32') and (platform_machine == 'AMD64' or platform_machine == 'x86_64')",
750-
"bitsandbytes @ https://github.com/bitsandbytes-foundation/bitsandbytes/releases/download/continuous-release_main/bitsandbytes-1.33.7.preview-py3-none-manylinux_2_24_aarch64.whl ; ('linux' in sys_platform) and (platform_machine == 'aarch64')",
751-
]
752795
intel-gpu-torch280 = [
753796
"unsloth[huggingface]",
754797

@@ -764,6 +807,12 @@ intel-gpu-torch280 = [
764807
"torch @ https://download.pytorch.org/whl/xpu/torch-2.8.0%2Bxpu-cp312-cp312-linux_x86_64.whl ; platform_system == 'Linux' and python_version == '3.12' and platform_machine == 'x86_64'",
765808
"torch @ https://download.pytorch.org/whl/xpu/torch-2.8.0%2Bxpu-cp313-cp313-linux_x86_64.whl ; platform_system == 'Linux' and python_version == '3.13' and platform_machine == 'x86_64'",
766809
]
810+
amd = [
811+
"unsloth[huggingfacenotorch]",
812+
"bitsandbytes @ https://github.com/bitsandbytes-foundation/bitsandbytes/releases/download/continuous-release_main/bitsandbytes-1.33.7.preview-py3-none-manylinux_2_24_x86_64.whl ; ('linux' in sys_platform) and (platform_machine == 'AMD64' or platform_machine == 'x86_64')",
813+
"bitsandbytes @ https://github.com/bitsandbytes-foundation/bitsandbytes/releases/download/continuous-release_main/bitsandbytes-1.33.7.preview-py3-none-win_amd64.whl ; (sys_platform == 'win32') and (platform_machine == 'AMD64' or platform_machine == 'x86_64')",
814+
"bitsandbytes @ https://github.com/bitsandbytes-foundation/bitsandbytes/releases/download/continuous-release_main/bitsandbytes-1.33.7.preview-py3-none-manylinux_2_24_aarch64.whl ; ('linux' in sys_platform) and (platform_machine == 'aarch64')",
815+
]
767816

768817
[project.urls]
769818
homepage = "http://www.unsloth.ai"

‎unsloth/_auto_install.py‎

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
cuda = str(torch.version.cuda)
2121
is_ampere = torch.cuda.get_device_capability()[0] >= 8
2222
USE_ABI = torch._C._GLIBCXX_USE_CXX11_ABI
23-
if cuda not in ("11.8", "12.1", "12.4", "12.6", "12.8"): raise RuntimeError(f"CUDA = {cuda} not supported!")
23+
if cuda not in ("11.8", "12.1", "12.4", "12.6", "12.8", "13.0"): raise RuntimeError(f"CUDA = {cuda} not supported!")
2424
if v <= V('2.1.0'): raise RuntimeError(f"Torch = {v} too old!")
2525
elif v <= V('2.1.1'): x = 'cu{}{}-torch211'
2626
elif v <= V('2.1.2'): x = 'cu{}{}-torch212'
@@ -33,7 +33,8 @@
3333
elif v < V('2.7.9'): x = 'cu{}{}-torch270'
3434
elif v < V('2.8.0'): x = 'cu{}{}-torch271'
3535
elif v < V('2.8.9'): x = 'cu{}{}-torch280'
36+
elif v < V('2.9.1'): x = 'cu{}{}-torch290'
3637
else: raise RuntimeError(f"Torch = {v} too new!")
37-
if v > V('2.6.9') and cuda not in ("11.8", "12.6", "12.8"): raise RuntimeError(f"CUDA = {cuda} not supported!")
38+
if v > V('2.6.9') and cuda not in ("11.8", "12.6", "12.8", "13.0"): raise RuntimeError(f"CUDA = {cuda} not supported!")
3839
x = x.format(cuda.replace(".", ""), "-ampere" if is_ampere else "")
3940
print(f'pip install --upgrade pip && pip install "unsloth[{x}] @ git+https://github.com/unslothai/unsloth.git"')

0 commit comments

Comments
 (0)