From 49257901f556ae522885f2ce86278417c6887c2a Mon Sep 17 00:00:00 2001 From: casinca <47400729+casinca@users.noreply.github.com> Date: Sun, 17 Nov 2024 19:57:46 +0100 Subject: [PATCH] typo & comment - safe -> save - commenting code: batch_size, seq_len = in_idx.shape --- ch05/07_gpt_to_llama/converting-gpt-to-llama2.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ch05/07_gpt_to_llama/converting-gpt-to-llama2.ipynb b/ch05/07_gpt_to_llama/converting-gpt-to-llama2.ipynb index e75ff4a6..b64a710e 100644 --- a/ch05/07_gpt_to_llama/converting-gpt-to-llama2.ipynb +++ b/ch05/07_gpt_to_llama/converting-gpt-to-llama2.ipynb @@ -648,7 +648,7 @@ "\n", "mha(example_batch)\n", "\n", - "del mha # delete to safe memory" + "del mha # delete to save memory" ] }, { @@ -781,7 +781,7 @@ " self.out_head = nn.Linear(cfg[\"emb_dim\"], cfg[\"vocab_size\"], bias=False, dtype=cfg[\"dtype\"])\n", "\n", " def forward(self, in_idx):\n", - " batch_size, seq_len = in_idx.shape\n", + " # batch_size, seq_len = in_idx.shape\n", " tok_embeds = self.tok_emb(in_idx)\n", " # pos_embeds = self.pos_emb(torch.arange(seq_len, device=in_idx.device))\n", " x = tok_embeds # + pos_embeds # Shape [batch_size, num_tokens, emb_size]\n",