Skip to content

Commit

Permalink
Se guardan vectores y se divide en 2 notebooks
Browse files Browse the repository at this point in the history
  • Loading branch information
camilomarino committed Dec 24, 2020
1 parent 5a73804 commit be289c2
Show file tree
Hide file tree
Showing 9 changed files with 1,706 additions and 406 deletions.
579 changes: 179 additions & 400 deletions ProyectoFinal/AMPD_with_pandas.ipynb

Large diffs are not rendered by default.

1,446 changes: 1,446 additions & 0 deletions ProyectoFinal/AMPds_compare_methods.ipynb

Large diffs are not rendered by default.

14 changes: 12 additions & 2 deletions ProyectoFinal/apgd.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,15 @@ def adam_pgd(D: np.ndarray,
betas=(0.99, 0.999),
max_iter = 70_000,
A0: np.ndarray = None,
mask: np.ndarray = None,
n_mask: int = None,
verbose: bool = True) -> np.ndarray:

X = torch.tensor(X, device=device, dtype=torch.float32)
D = torch.tensor(D, device=device, dtype=torch.float32)
f = lambda A: torch.norm(X-D@A)**2
if mask is not None: mask = torch.tensor(mask, device=device)

if A0 is None:
A = torch.zeros((D.shape[1], X.shape[1]), device=device,
dtype=torch.float32, requires_grad=True)
Expand All @@ -28,16 +32,22 @@ def adam_pgd(D: np.ndarray,
requires_grad=True)

optimizer = optim.Adam([A], lr=lr, betas=betas)

losses = list()
for k in range(max_iter):
optimizer.zero_grad()
loss = f(A)
losses.append(float(loss))
loss.backward()
optimizer.step()
if k%1000==0 and verbose:
print(f'[iter:{k}]\t{float(loss):.2f}')

with torch.no_grad():
A.clip_(0, None)
if mask is not None:
eps = 1e-10
for i in range(n_mask):
A[mask==i] /= (A[mask==i].sum(dim=0) + eps)


return to_np(A), float(loss)
return to_np(A), float(loss), losses
Binary file added ProyectoFinal/arrays/A_simple.npy
Binary file not shown.
Binary file added ProyectoFinal/arrays/D.npy
Binary file not shown.
Binary file added ProyectoFinal/arrays/X.npy
Binary file not shown.
Binary file added ProyectoFinal/arrays/df.pickle
Binary file not shown.
Binary file added ProyectoFinal/arrays/losses_simple.npy
Binary file not shown.
73 changes: 69 additions & 4 deletions ProyectoFinal/pgd.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,10 @@
@author: camilo
"""
import numpy as np
import torch
from time import time
from tqdm import tqdm
device = 'cuda'

def pgd(D:np.ndarray,
X:np.ndarray,
Expand All @@ -16,10 +18,10 @@ def pgd(D:np.ndarray,
weight_decay: float = 0,
Ak: np.ndarray = None,
verbose: bool = True) -> np.ndarray:
X = X.reshape((X.shape[0], 1))
#X = X.reshape((X.shape[0], 1))
ti = time()
if X.shape[1] !=1:
raise AttributeError('X debe ser una matriz columna')
#if X.shape[1] !=1:
#raise AttributeError('X debe ser una matriz columna')
p = D.T@X
Q = D.T@D
lr = 1/np.linalg.norm(Q, ord=2)
Expand All @@ -28,15 +30,78 @@ def pgd(D:np.ndarray,
Ak = np.ones(A_shape) / np.prod(A_shape)
else:
Ak = Ak.reshape(A_shape)
losses = list()
for i in tqdm(range(max_iter)):
losses.append(np.linalg.norm(X - D@Ak, ord=2)**2)
A_ant = Ak
Ak = Ak - lr * ( 2*(Q@Ak-p) + weight_decay*np.sign(Ak))
Ak[Ak<0] = 0

# if Ak.sum()>1:
# Ak = Ak / Ak.sum()

if np.linalg.norm(Ak-A_ant)<min_consecutive_diff:
break
cost = np.linalg.norm(X - D@Ak, ord=2)
if verbose: print(f'Cantidad de iteraciones: {i}\nTiempo: {time()-ti:.2f}')
return Ak.reshape((Ak.shape[0],)), cost
return Ak, cost, losses


# -*- coding: utf-8 -*-

import numpy as np
import torch
from torch import optim

device = 'cuda'

def to_np(x):
return x.detach().cpu().numpy()

def pgd_torch(D: np.ndarray,
X: np.ndarray,
#lr: float = 1e-2,
#betas=(0.99, 0.999),
max_iter = 70_000,
A0: np.ndarray = None,
mask: np.ndarray = None,
n_mask: int = None,
verbose: bool = True) -> np.ndarray:

X = torch.tensor(X, device=device, dtype=torch.float32)
D = torch.tensor(D, device=device, dtype=torch.float32)

#p = D.T@X
Q = D.T@D
lr = 1/torch.norm(Q)

f = lambda A: torch.norm(X-D@A)**2
if mask is not None: mask = torch.tensor(mask, device=device)

if A0 is None:
A = torch.zeros((D.shape[1], X.shape[1]), device=device,
dtype=torch.float32, requires_grad=True)
else:
A = torch.tensor(A0, device=device, dtype=torch.float32,
requires_grad=True)

optimizer = optim.SGD([A], lr=lr)
losses = list()
for k in range(max_iter):
optimizer.zero_grad()
loss = f(A)
losses.append(float(loss))
loss.backward()
optimizer.step()
if k%1000==0 and verbose:
print(f'[iter:{k}]\t{float(loss):.2f}')

with torch.no_grad():
A.clip_(0, None)
if mask is not None:
eps = 1e-10
for i in range(n_mask):
A[mask==i] /= (A[mask==i].sum(dim=0) + eps)


return to_np(A), float(loss), losses

0 comments on commit be289c2

Please sign in to comment.