import torch
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np

###############################
# Stahnuti a nacteni CIFAR-10 #
###############################

transform = transforms.Compose(
    [transforms.ToTensor(),
     transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
                                        download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=2,
                                          shuffle=True, num_workers=2)

testset = torchvision.datasets.CIFAR10(root='./data', train=False,
                                       download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=2,
                                         shuffle=False, num_workers=2)

classes = ('plane', 'car', 'bird', 'cat',
           'deer', 'dog', 'frog', 'horse', 'ship', 'truck')


# functions to show an image

def imshow(img):
    img = img / 2 + 0.5     # unnormalize
    npimg = img.numpy()
    plt.imshow(np.transpose(npimg, (1, 2, 0)))
    plt.axis('off')
    plt.show()


# get some random training images
dataiter = iter(trainloader)
images, labels = dataiter.next()

# show images
imshow(torchvision.utils.make_grid(images))
# print labels
print(' '.join('%5s' % classes[labels[j]] for j in range(2)))




#######################################
# Demonstrace uceni na jednom obrazku #
#######################################


# initialize variables
X = images[0,:,:,:]
W = torch.randn_like(X)*0.01 #0.01  # 100
W.requires_grad_()

y = 1
eps = 1e-20

for i in range(10):
    # define loss function
    p = torch.sigmoid( (W*X).sum() )
    print("Epocha ", i)
    print("Pravdepodobnost, ze obr X je ze tridy s labelem y=1 je p=", p.item())
    print("Pravdepodobnost, ze obr X je ze tridy s labelem y=0 je p=", 1-p.item())
    loss = y*(-torch.log(p+eps)) + (1-y)*(-torch.log(1-p+eps))   # cross-entropy loss
    #loss = -torch.log(torch.sigmoid( y*(W*X).sum() )+eps)
    #loss = torch.log(1+torch.exp(-y * (W * X).sum()) + eps)    # logistic loss
    print("Loss ", loss.detach().numpy())

    # compute gradient
    loss.backward()

    # update weights
    with torch.no_grad():
        W -= 0.01*W.grad.data
        W.grad.zero_()

plt.clf()
plt.imshow(W.detach().sum(dim=0))
plt.colorbar()

plt.figure(1)
plt.clf()
plt.imshow(-W.grad.detach().sum(dim=0))
plt.colorbar()

plt.figure(2)
plt.clf()
imshow(X)
plt.colorbar()

# Ukazat:
# (1) Co se deje kdyz y=1, a y=0 jak vypada gradient (zvysuje hodnoty W ktere odpovidaji kladnym/zapornym hodnotam)
# (2) Co se deje kdyz inicializace W je 100x vetsi (loss velky ale grad=0)
# (3) Co se deje kdyz je learning rate velky/maly
# (4) Ukazat, ze cross entropy je logistic loss

#b = torch.zeros([1,1], requires_grad=True)