Skip to content

Commit

Permalink
RoPE increase (#407)
Browse files Browse the repository at this point in the history
  • Loading branch information
rasbt authored Oct 22, 2024
1 parent 7513360 commit 534a704
Show file tree
Hide file tree
Showing 3 changed files with 15 additions and 15 deletions.
18 changes: 9 additions & 9 deletions ch05/07_gpt_to_llama/converting-llama2-to-llama3.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -254,12 +254,12 @@
"- Llama 3 uses rotary position embeddings (RoPE) similar to Llama 2 (for a detailed explanation, please see the [RoPE paper](https://arxiv.org/abs/2104.09864))\n",
"- There are some subtle differences in the RoPE settings, though\n",
" - Llama 3 now supports up to 8,192 tokens, twice as many as Llama 2 (4,096)\n",
" - The base value for the so-called RoPE $\\theta$ (see equation below) was increased from 10,000 (Llama 2) to 50,000 (Llama 3) in the following equation (adapted from the [RoPE paper](https://arxiv.org/abs/2104.09864))\n",
" - The base value for the so-called RoPE $\\theta$ (see equation below) was increased from 10,000 (Llama 2) to 500,000 (Llama 3) in the following equation (adapted from the [RoPE paper](https://arxiv.org/abs/2104.09864))\n",
"\n",
"$$\\Theta = \\left\\{\\theta_i = \\text{base}^{\\frac{-2(i-1)}{d}}, i \\in \\left[1, 2, ..., d/2\\right]\\right\\}$$\n",
"\n",
"- These $\\theta$ values are a set of predefined parameters that are used to determine the rotational angles in the rotary matrix, where $d$ is the dimensionality of the embedding space\n",
"- Increasing the base from 10,000 to 50,000 makes the frequencies (or rotation angles) decay more slowly across the dimensions, which means that higher dimensions will be associated with larger angles than before (essentially, it's a decompression of the frequencies)\n",
"- Increasing the base from 10,000 to 500,000 makes the frequencies (or rotation angles) decay more slowly across the dimensions, which means that higher dimensions will be associated with larger angles than before (essentially, it's a decompression of the frequencies)\n",
"- In addition, we introduce a `freq_config` section in the code below that adjusts the frequency; however, we won't be needing it in Llama 3 (only Llama 3.1 and Llama 3.2), so we will revisit this `freq_config` later (it's set to `None` and ignored by default)"
]
},
Expand All @@ -274,7 +274,7 @@
"source": [
"import torch\n",
"\n",
"def precompute_rope_params(head_dim, theta_base=10000, context_length=4096, freq_config=None):\n",
"def precompute_rope_params(head_dim, theta_base=10_000, context_length=4096, freq_config=None):\n",
" assert head_dim % 2 == 0, \"Embedding dimension must be even\"\n",
"\n",
" # Compute the inverse frequencies\n",
Expand Down Expand Up @@ -347,7 +347,7 @@
"llama_3_context_len = 8192\n",
"\n",
"llama_2_theta_base = 10_000\n",
"llama_3_theta_base = 50_000"
"llama_3_theta_base = 500_000"
]
},
{
Expand Down Expand Up @@ -907,7 +907,7 @@
" \"n_layers\": 32, # Number of layers\n",
" \"hidden_dim\": 14_336, # NEW: Larger size of the intermediate dimension in FeedForward\n",
" \"n_kv_groups\": 8, # NEW: Key-Value groups for grouped-query attention\n",
" \"rope_base\": 50_000, # NEW: The base in RoPE's \"theta\" was increased to 50_000\n",
" \"rope_base\": 500_000, # NEW: The base in RoPE's \"theta\" was increased to 500_000\n",
" \"rope_freq\": None, # NEW: Additional configuration for adjusting the RoPE frequencies\n",
" \"dtype\": torch.bfloat16 # Lower-precision dtype to save memory\n",
"}"
Expand Down Expand Up @@ -2060,7 +2060,7 @@
" \"n_layers\": 32, # Number of layers\n",
" \"hidden_dim\": 14_336, # Size of the intermediate dimension in FeedForward\n",
" \"n_kv_groups\": 8, # Key-Value groups for grouped-query attention\n",
" \"rope_base\": 50_000, # The base in RoPE's \"theta\"\n",
" \"rope_base\": 500_000, # The base in RoPE's \"theta\"\n",
" \"rope_freq\": None, # Additional configuration for adjusting the RoPE frequencies\n",
" \"dtype\": torch.bfloat16 # Lower-precision dtype to save memory\n",
"}\n",
Expand All @@ -2073,7 +2073,7 @@
" \"n_layers\": 32, # Number of layers\n",
" \"hidden_dim\": 14_336, # Size of the intermediate dimension in FeedForward\n",
" \"n_kv_groups\": 8, # Key-Value groups for grouped-query attention\n",
" \"rope_base\": 50_000, # The base in RoPE's \"theta\"\n",
" \"rope_base\": 500_000, # The base in RoPE's \"theta\"\n",
" \"dtype\": torch.bfloat16, # Lower-precision dtype to save memory\n",
" \"rope_freq\": { # NEW: RoPE frequency scaling\n",
" \"factor\": 8.0,\n",
Expand Down Expand Up @@ -2421,7 +2421,7 @@
" \"n_layers\": 32, # Number of layers\n",
" \"hidden_dim\": 14_336, # Size of the intermediate dimension in FeedForward\n",
" \"n_kv_groups\": 8, # Key-Value groups for grouped-query attention\n",
" \"rope_base\": 50_000, # The base in RoPE's \"theta\"\n",
" \"rope_base\": 500_000, # The base in RoPE's \"theta\"\n",
" \"dtype\": torch.bfloat16, # Lower-precision dtype to save memory\n",
" \"rope_freq\": { # NEW: RoPE frequency scaling\n",
" \"factor\": 8.0,\n",
Expand All @@ -2440,7 +2440,7 @@
" \"n_layers\": 16, # NEW: Half the number of layers\n",
" \"hidden_dim\": 8192, # NEW: Almost half the size of the intermediate dimension in FeedForward\n",
" \"n_kv_groups\": 8, # Key-Value groups for grouped-query attention\n",
" \"rope_base\": 50_000, # The base in RoPE's \"theta\"\n",
" \"rope_base\": 500_000, # The base in RoPE's \"theta\"\n",
" \"dtype\": torch.bfloat16, # Lower-precision dtype to save memory\n",
" \"rope_freq\": { # RoPE frequency scaling\n",
" \"factor\": 32.0, # NEW: Adjustment of the rescaling factor\n",
Expand Down
6 changes: 3 additions & 3 deletions ch05/07_gpt_to_llama/standalone-llama32.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@
"metadata": {},
"outputs": [],
"source": [
"def precompute_rope_params(head_dim, theta_base=10000, context_length=4096, freq_config=None):\n",
"def precompute_rope_params(head_dim, theta_base=10_000, context_length=4096, freq_config=None):\n",
" assert head_dim % 2 == 0, \"Embedding dimension must be even\"\n",
"\n",
" # Compute the inverse frequencies\n",
Expand Down Expand Up @@ -407,7 +407,7 @@
" \"n_layers\": 16, # Number of layers\n",
" \"hidden_dim\": 8192, # Size of the intermediate dimension in FeedForward\n",
" \"n_kv_groups\": 8, # Key-Value groups for grouped-query attention\n",
" \"rope_base\": 50_000, # The base in RoPE's \"theta\"\n",
" \"rope_base\": 500_000, # The base in RoPE's \"theta\"\n",
" \"dtype\": torch.bfloat16, # Lower-precision dtype to save memory\n",
" \"rope_freq\": { # RoPE frequency scaling\n",
" \"factor\": 32.0,\n",
Expand All @@ -427,7 +427,7 @@
"# \"n_layers\": 28, # Number of layers\n",
"# \"hidden_dim\": 8192, # Size of the intermediate dimension in FeedForward\n",
"# \"n_kv_groups\": 8, # Key-Value groups for grouped-query attention\n",
"# \"rope_base\": 50_000, # The base in RoPE's \"theta\"\n",
"# \"rope_base\": 500_000, # The base in RoPE's \"theta\"\n",
"# \"dtype\": torch.bfloat16, # Lower-precision dtype to save memory\n",
"# \"rope_freq\": { # RoPE frequency scaling\n",
"# \"factor\": 32.0,\n",
Expand Down
6 changes: 3 additions & 3 deletions ch05/07_gpt_to_llama/tests/tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ def test_rope_llama3(notebook):
context_len = 8192
num_heads = 4
head_dim = 16
theta_base = 50_000
theta_base = 500_000

# Instantiate RoPE parameters
cos, sin = nb2.precompute_rope_params(
Expand Down Expand Up @@ -155,7 +155,7 @@ def test_rope_llama3_12(notebook):
context_len = 8192
num_heads = 4
head_dim = 16
rope_theta = 50_000
rope_theta = 500_000

rope_config = {
"factor": 8.0,
Expand Down Expand Up @@ -194,7 +194,7 @@ class RoPEConfig:
rope_scaling = hf_rope_params
factor = 1.0
dim: int = head_dim
rope_theta = 50_000
rope_theta = 500_000
max_position_embeddings: int = 8192
hidden_size = head_dim * num_heads
num_attention_heads = num_heads
Expand Down

0 comments on commit 534a704

Please sign in to comment.