Skip to content

Commit

Permalink
Update pep8 (#78)
Browse files Browse the repository at this point in the history
* simplify requirements file

* style

* apply linter
  • Loading branch information
rasbt authored Mar 18, 2024
1 parent 3752346 commit 3f1dfc9
Show file tree
Hide file tree
Showing 13 changed files with 82 additions and 120 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,10 @@ def get_packages(pkgs):
except AttributeError:
try:
versions.append(imported.version_info)
except:
except AttributeError:
try:
import importlib, importlib_metadata
import importlib
import importlib_metadata
imported = importlib.import_module(p)
version = importlib_metadata.version(p)
versions.append(version)
Expand Down
16 changes: 8 additions & 8 deletions appendix-A/03_main-chapter-code/DDP-script.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,11 +91,11 @@ def prepare_dataset():
train_loader = DataLoader(
dataset=train_ds,
batch_size=2,
shuffle=False, # NEW: False because of DistributedSampler below
shuffle=False, # NEW: False because of DistributedSampler below
pin_memory=True,
drop_last=True,
# NEW: chunk batches across GPUs without overlapping samples:
sampler=DistributedSampler(train_ds) # NEW
sampler=DistributedSampler(train_ds) # NEW
)
test_loader = DataLoader(
dataset=test_ds,
Expand All @@ -108,30 +108,30 @@ def prepare_dataset():
# NEW: wrapper
def main(rank, world_size, num_epochs):

ddp_setup(rank, world_size) # NEW: initialize process groups
ddp_setup(rank, world_size) # NEW: initialize process groups

train_loader, test_loader = prepare_dataset()
model = NeuralNetwork(num_inputs=2, num_outputs=2)
model.to(rank)
optimizer = torch.optim.SGD(model.parameters(), lr=0.5)

model = DDP(model, device_ids=[rank]) # NEW: wrap model with DDP
model = DDP(model, device_ids=[rank]) # NEW: wrap model with DDP
# the core model is now accessible as model.module

for epoch in range(num_epochs):

model.train()
for features, labels in train_loader:

features, labels = features.to(rank), labels.to(rank) # New: use rank
features, labels = features.to(rank), labels.to(rank) # New: use rank
logits = model(features)
loss = F.cross_entropy(logits, labels) # Loss function
loss = F.cross_entropy(logits, labels) # Loss function

optimizer.zero_grad()
loss.backward()
optimizer.step()

### LOGGING
# LOGGING
print(f"[GPU{rank}] Epoch: {epoch+1:03d}/{num_epochs:03d}"
f" | Batchsize {labels.shape[0]:03d}"
f" | Train/Val Loss: {loss:.2f}")
Expand All @@ -142,7 +142,7 @@ def main(rank, world_size, num_epochs):
test_acc = compute_accuracy(model, test_loader, device=rank)
print(f"[GPU{rank}] Test accuracy", test_acc)

destroy_process_group() # NEW: cleanly exit distributed mode
destroy_process_group() # NEW: cleanly exit distributed mode


def compute_accuracy(model, dataloader, device):
Expand Down
14 changes: 7 additions & 7 deletions appendix-D/01_main-chapter-code/previous_chapters.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def __getitem__(self, idx):
return self.input_ids[idx], self.target_ids[idx]


def create_dataloader_v1(txt, batch_size=4, max_length=256,
def create_dataloader_v1(txt, batch_size=4, max_length=256,
stride=128, shuffle=True, drop_last=True):
# Initialize the tokenizer
tokenizer = tiktoken.get_encoding("gpt2")
Expand Down Expand Up @@ -80,7 +80,7 @@ def forward(self, x):

# We implicitly split the matrix by adding a `num_heads` dimension
# Unroll last dim: (b, num_tokens, d_out) -> (b, num_tokens, num_heads, head_dim)
keys = keys.view(b, num_tokens, self.num_heads, self.head_dim)
keys = keys.view(b, num_tokens, self.num_heads, self.head_dim)
values = values.view(b, num_tokens, self.num_heads, self.head_dim)
queries = queries.view(b, num_tokens, self.num_heads, self.head_dim)

Expand All @@ -102,7 +102,7 @@ def forward(self, x):
attn_weights = self.dropout(attn_weights)

# Shape: (b, num_tokens, num_heads, head_dim)
context_vec = (attn_weights @ values).transpose(1, 2)
context_vec = (attn_weights @ values).transpose(1, 2)

# Combine heads, where self.d_out = self.num_heads * self.head_dim
context_vec = context_vec.reshape(b, num_tokens, self.d_out)
Expand Down Expand Up @@ -135,7 +135,7 @@ def __init__(self):

def forward(self, x):
return 0.5 * x * (1 + torch.tanh(
torch.sqrt(torch.tensor(2.0 / torch.pi)) *
torch.sqrt(torch.tensor(2.0 / torch.pi)) *
(x + 0.044715 * torch.pow(x, 3))
))

Expand All @@ -161,7 +161,7 @@ def __init__(self, cfg):
d_in=cfg["emb_dim"],
d_out=cfg["emb_dim"],
block_size=cfg["ctx_len"],
num_heads=cfg["n_heads"],
num_heads=cfg["n_heads"],
dropout=cfg["drop_rate"],
qkv_bias=cfg["qkv_bias"])
self.ff = FeedForward(cfg)
Expand Down Expand Up @@ -227,7 +227,7 @@ def generate_text_simple(model, idx, max_new_tokens, context_size):

# Focus only on the last time step
# (batch, n_token, vocab_size) becomes (batch, vocab_size)
logits = logits[:, -1, :]
logits = logits[:, -1, :]

# Get the idx of the vocab entry with the highest logits value
idx_next = torch.argmax(logits, dim=-1, keepdim=True) # (batch, 1)
Expand Down Expand Up @@ -315,4 +315,4 @@ def text_to_token_ids(text, tokenizer):

def token_ids_to_text(token_ids, tokenizer):
flat = token_ids.squeeze(0) # remove batch dimension
return tokenizer.decode(flat.tolist())
return tokenizer.decode(flat.tolist())
74 changes: 19 additions & 55 deletions ch02/02_bonus_bytepair-encoder/bpe_openai_gpt2.py
Original file line number Diff line number Diff line change
@@ -1,46 +1,11 @@
"""
Byte pair encoding utilities
Code from https://github.com/openai/gpt-2/blob/master/src/encoder.py
And modified code (download_vocab) from
https://github.com/openai/gpt-2/blob/master/download_model.py
Modified MIT License
Software Copyright (c) 2019 OpenAI
We don’t claim ownership of the content you create with GPT-2, so it is yours to do with as you please.
We only ask that you use GPT-2 responsibly and clearly indicate your content was created using GPT-2.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
The above copyright notice and this permission notice need not be included
with content created by the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
"""

import os
import json
import regex as re
import requests
from tqdm import tqdm
from functools import lru_cache


@lru_cache()
def bytes_to_unicode():
"""
Expand All @@ -52,20 +17,21 @@ def bytes_to_unicode():
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
bs = list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
cs.append(2**8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))

def get_pairs(word):
"""Return set of symbol pairs in a word.

def get_pairs(word):
"""
Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
Expand All @@ -75,17 +41,18 @@ def get_pairs(word):
prev_char = char
return pairs


class Encoder:
def __init__(self, encoder, bpe_merges, errors='replace'):
self.encoder = encoder
self.decoder = {v:k for k,v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.decoder = {v: k for k, v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v:k for k, v in self.byte_encoder.items()}
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}

# Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")

def bpe(self, token):
Expand All @@ -98,7 +65,7 @@ def bpe(self, token):
return token

while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
Expand All @@ -109,12 +76,12 @@ def bpe(self, token):
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
except ValueError:
new_word.extend(word[i:])
break

if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
Expand All @@ -141,28 +108,25 @@ def decode(self, tokens):
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
return text


def get_encoder(model_name, models_dir):
with open(os.path.join(models_dir, model_name, 'encoder.json'), 'r') as f:
encoder = json.load(f)
with open(os.path.join(models_dir, model_name, 'vocab.bpe'), 'r', encoding="utf-8") as f:
bpe_data = f.read()
bpe_merges = [tuple(merge_str.split()) for merge_str in bpe_data.split('\n')[1:-1]]
return Encoder(
encoder=encoder,
bpe_merges=bpe_merges,
)
return Encoder(encoder=encoder, bpe_merges=bpe_merges)


def download_vocab():
# Modified code from
subdir = 'gpt2_model'
if not os.path.exists(subdir):
os.makedirs(subdir)
subdir = subdir.replace('\\','/') # needed for Windows
subdir = subdir.replace('\\', '/') # needed for Windows

for filename in ['encoder.json', 'vocab.bpe']:

r = requests.get("https://openaipublic.blob.core.windows.net/gpt-2/models/117M" + "/" + filename, stream=True)
r = requests.get("https://openaipublic.blob.core.windows.net/gpt-2/models/117M/" + filename, stream=True)

with open(os.path.join(subdir, filename), 'wb') as f:
file_size = int(r.headers["content-length"])
Expand Down
31 changes: 15 additions & 16 deletions ch03/02_bonus_efficient-multihead-attention/ch03.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,33 +8,33 @@ def __init__(self, d_in, d_out, block_size, dropout, qkv_bias=False):
super().__init__()
self.d_out = d_out
self.W_query = nn.Linear(d_in, d_out, bias=qkv_bias)
self.W_key = nn.Linear(d_in, d_out, bias=qkv_bias)
self.W_key = nn.Linear(d_in, d_out, bias=qkv_bias)
self.W_value = nn.Linear(d_in, d_out, bias=qkv_bias)
self.dropout = nn.Dropout(dropout) # New
self.register_buffer('mask', torch.triu(torch.ones(block_size, block_size), diagonal=1)) # New
self.dropout = nn.Dropout(dropout) # New
self.register_buffer('mask', torch.triu(torch.ones(block_size, block_size), diagonal=1)) # New

def forward(self, x):
b, num_tokens, d_in = x.shape # New batch dimension b
b, num_tokens, d_in = x.shape # New batch dimension b
keys = self.W_key(x)
queries = self.W_query(x)
values = self.W_value(x)

attn_scores = queries @ keys.transpose(1, 2) # Changed transpose
attn_scores = queries @ keys.transpose(1, 2) # Changed transpose
attn_scores.masked_fill_( # New, _ ops are in-place
self.mask.bool()[:num_tokens, :num_tokens], -torch.inf)
self.mask.bool()[:num_tokens, :num_tokens], -torch.inf)
attn_weights = torch.softmax(attn_scores / keys.shape[-1]**0.5, dim=-1)
attn_weights = self.dropout(attn_weights) # New
attn_weights = self.dropout(attn_weights) # New

context_vec = attn_weights @ values
return context_vec


class MultiHeadAttentionWrapper(nn.Module):

def __init__(self, d_in, d_out, block_size, dropout, num_heads, qkv_bias=False):
super().__init__()
self.heads = nn.ModuleList(
[CausalAttention(d_in, d_out, block_size, dropout, qkv_bias)
[CausalAttention(d_in, d_out, block_size, dropout, qkv_bias)
for _ in range(num_heads)]
)
self.out_proj = nn.Linear(d_out*num_heads, d_out*num_heads)
Expand All @@ -44,15 +44,14 @@ def forward(self, x):
return self.out_proj(context_vec)



class MultiHeadAttention(nn.Module):
def __init__(self, d_in, d_out, block_size, dropout, num_heads, qkv_bias=False):
super().__init__()
assert d_out % num_heads == 0, "d_out must be divisible by num_heads"

self.d_out = d_out
self.num_heads = num_heads
self.head_dim = d_out // num_heads # Reduce the projection dim to match desired output dim
self.head_dim = d_out // num_heads # Reduce the projection dim to match desired output dim

self.W_query = nn.Linear(d_in, d_out, bias=qkv_bias)
self.W_key = nn.Linear(d_in, d_out, bias=qkv_bias)
Expand All @@ -70,7 +69,7 @@ def forward(self, x):

# We implicitly split the matrix by adding a `num_heads` dimension
# Unroll last dim: (b, num_tokens, d_out) -> (b, num_tokens, num_heads, head_dim)
keys = keys.view(b, num_tokens, self.num_heads, self.head_dim)
keys = keys.view(b, num_tokens, self.num_heads, self.head_dim)
values = values.view(b, num_tokens, self.num_heads, self.head_dim)
queries = queries.view(b, num_tokens, self.num_heads, self.head_dim)

Expand All @@ -92,10 +91,10 @@ def forward(self, x):
attn_weights = self.dropout(attn_weights)

# Shape: (b, num_tokens, num_heads, head_dim)
context_vec = (attn_weights @ values).transpose(1, 2)
context_vec = (attn_weights @ values).transpose(1, 2)

# Combine heads, where self.d_out = self.num_heads * self.head_dim
context_vec = context_vec.contiguous().view(b, num_tokens, self.d_out)
context_vec = self.out_proj(context_vec) # optional projection
context_vec = self.out_proj(context_vec) # optional projection

return context_vec
return context_vec
Loading

0 comments on commit 3f1dfc9

Please sign in to comment.