From dbf7542a6f0922022a47008aede9924753456c74 Mon Sep 17 00:00:00 2001 From: Sebastian Raschka Date: Fri, 24 May 2024 14:55:52 -0400 Subject: [PATCH 1/5] Align readme (#1438) --- README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index e4377d5281..9f233d2074 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,11 @@ Uses the latest state-of-the-art techniques: -✅ flash attention     ✅ fp4/8/16/32     ✅ LoRA, QLoRA, Adapter (v1, v2)     ✅ FSDP     ✅ 1-1000+ GPUs/TPUs +
+                     ✅ flash attention       ✅ fp4/8/16/32          ✅ LoRA, QLoRA, Adapter (v1, v2)
+✅ FSDP                  ✅ 1-1000+ GPUs/TPUs    ✅ 20+ LLMs
+
+ --- From 1754a2b55b7a834fac139abb75069677a8cc673c Mon Sep 17 00:00:00 2001 From: Sebastian Raschka Date: Fri, 24 May 2024 15:36:28 -0400 Subject: [PATCH 2/5] Pin litdata (#1440) --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 7627a17691..4c0bf6f421 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -37,7 +37,7 @@ all = [ "sentencepiece>=0.2.0", # llama-based models "tokenizers>=0.15.2", # pythia, falcon, redpajama "requests>=2.31.0", # litgpt.data - "litdata>=0.2.6", # litgpt.data + "litdata==0.2.6", # litgpt.data "litserve>=0.1.0", # litgpt.deploy "zstandard>=0.22.0", # litgpt.data.prepare_slimpajama.py "pandas>=1.9.0", # litgpt.data.prepare_starcoder.py From 19a0d7a2d7d4de60622c6fb4376bf474382c3986 Mon Sep 17 00:00:00 2001 From: Sebastian Raschka Date: Fri, 24 May 2024 16:05:52 -0400 Subject: [PATCH 3/5] Fix README.md alignment (#1439) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 9f233d2074..97066ac225 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ Uses the latest state-of-the-art techniques:
-                     ✅ flash attention       ✅ fp4/8/16/32          ✅ LoRA, QLoRA, Adapter (v1, v2)
+             ✅ flash attention       ✅ fp4/8/16/32          ✅ LoRA, QLoRA, Adapter
 ✅ FSDP                  ✅ 1-1000+ GPUs/TPUs    ✅ 20+ LLMs
 
From 221b7ef54161272162aa9b036f1ef3674f3160a4 Mon Sep 17 00:00:00 2001 From: Sebastian Raschka Date: Fri, 24 May 2024 16:29:58 -0400 Subject: [PATCH 4/5] Update README.md for one last time (#1442) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 97066ac225..02f89b71f7 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ Uses the latest state-of-the-art techniques:
-             ✅ flash attention       ✅ fp4/8/16/32          ✅ LoRA, QLoRA, Adapter
+            ✅ flash attention       ✅ fp4/8/16/32          ✅ LoRA, QLoRA, Adapter
 ✅ FSDP                  ✅ 1-1000+ GPUs/TPUs    ✅ 20+ LLMs
 
From f6654e8753cf9bc328a8a9b4d1ec86a74cb2a340 Mon Sep 17 00:00:00 2001 From: Sebastian Raschka Date: Tue, 28 May 2024 10:07:22 -0400 Subject: [PATCH 5/5] A more centered look (#1449) --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 02f89b71f7..3122b682a0 100644 --- a/README.md +++ b/README.md @@ -8,8 +8,8 @@ Uses the latest state-of-the-art techniques:
-            ✅ flash attention       ✅ fp4/8/16/32          ✅ LoRA, QLoRA, Adapter
-✅ FSDP                  ✅ 1-1000+ GPUs/TPUs    ✅ 20+ LLMs
+✅ flash attention    ✅ fp4/8/16/32        ✅ LoRA, QLoRA, Adapter
+✅ FSDP               ✅ 1-1000+ GPUs/TPUs  ✅ 20+ LLMs