{"id":554486,"date":"2022-02-10T19:58:45","date_gmt":"2022-02-10T11:58:45","guid":{"rendered":"http:\/\/4563.org\/?p=554486"},"modified":"2022-02-10T19:58:45","modified_gmt":"2022-02-10T11:58:45","slug":"%e4%bd%bf%e7%94%a8%e9%a2%84%e8%ae%ad%e7%bb%83%e6%a8%a1%e5%9e%8b%e7%9a%84-alexnet-%e8%bf%9b%e8%a1%8c%e5%9b%be%e7%89%87%e5%88%86%e7%b1%bb%ef%bc%8c%e5%87%86%e7%a1%ae%e7%8e%87%e4%b8%8e%e7%bd%91%e7%bb%9c-5","status":"publish","type":"post","link":"http:\/\/4563.org\/?p=554486","title":{"rendered":"\u4f7f\u7528\u9884\u8bad\u7ec3\u6a21\u578b\u7684 Alexnet \u8fdb\u884c\u56fe\u7247\u5206\u7c7b\uff0c\u51c6\u786e\u7387\u4e0e\u7f51\u7edc\u6570\u636e\u4e0d\u7b26\uff0c\u53ef\u80fd\u662f\u4ec0\u4e48\u539f\u56e0\u5bfc\u81f4\u7684\uff1f"},"content":{"rendered":"<div>\n<div>\n<div>\n<h1> \u4f7f\u7528\u9884\u8bad\u7ec3\u6a21\u578b\u7684 Alexnet \u8fdb\u884c\u56fe\u7247\u5206\u7c7b\uff0c\u51c6\u786e\u7387\u4e0e\u7f51\u7edc\u6570\u636e\u4e0d\u7b26\uff0c\u53ef\u80fd\u662f\u4ec0\u4e48\u539f\u56e0\u5bfc\u81f4\u7684\uff1f <\/h1>\n<p> <\/p>\n<div>\n<div> <span>\u8cc7\u6df1\u5927\u4f6c : Richard14 <\/span>  <span><i><\/i> 21<\/span> <\/div>\n<div> <\/div>\n<\/p><\/div>\n<\/p><\/div>\n<\/p><\/div>\n<div isfirst=\"1\"> <\/p>\n<p>\u9884\u8bad\u7ec3\u7684\u610f\u601d\u662f\u7528 torchvision \u91cc\u5199\u597d\u7684 alexnet \uff08\u4fee\u6539\u6700\u540e\u4e00\u5c42\uff09\uff0c\u4e0d\u662f\u6307\u5bfc\u5165\u8bad\u7ec3\u597d\u7684\uff0c\u5c1d\u8bd5\u7528 quickstart \u91cc\u7684\u4ee3\u7801\u8bad\u7ec3 cifar10 \uff0c\u4f46\u662f\u7f51\u4e0a\u666e\u904d\u67e5\u5230\u7684\u5b9e\u9a8c\u6570\u636e\uff0c\u51c6\u786e\u7387\u5927\u6982\u5728 80%\uff0c78%\u5de6\u53f3\uff0c\u6211\u8fed\u4ee3\u5230\u6536\u655b\u4e5f\u53ea\u80fd\u5f97\u5230 70%\u7684\u51c6\u786e\u7387\uff0c\u8fd9\u4e2a\u5dee\u5f02\u4ea7\u751f\u7684\u539f\u56e0\u662f\u5565\u5462\uff1f<\/p>\n<p>\u5b8c\u6574\u4ee3\u7801\uff1a<\/p>\n<pre><code>from utils import * from pipeit import * import os,sys,time,pickle,random import matplotlib.pyplot as plt import numpy as np  import torch from torch import nn from torchvision import datasets, models from torch.utils.data import Dataset, DataLoader, TensorDataset from torchvision.transforms import ToTensor, Lambda, Resize, Compose, InterpolationMode  device = \"cuda\" if torch.cuda.is_available() else \"cpu\" print(\"Using {} device\".format(device)) torch.backends.cudnn.benchmark=True  # Download training data from open datasets. training_data = datasets.CIFAR10(     root=\".\\data\\cifar10\",     train=True,     download=True,     transform=Compose([         Resize((64, 64), InterpolationMode.BICUBIC),         ToTensor()     ]) )  # Download test data from open datasets. test_data = datasets.CIFAR10(     root=\".\\data\\cifar10\",     train=False,     download=True,     transform=Compose([         Resize((64, 64), InterpolationMode.BICUBIC),         ToTensor()     ]) )  def imshow(training_data):     labels_map = {         0: \"plane\",         1: \"car\",         2: \"bird\",         3: \"cat\",         4: \"deer\",         5: \"dog\",         6: \"frog\",         7: \"horse\",         8: \"ship\",         9: \"truck\",     }     cols, rows = 3, 3     figure = plt.figure(figsize=(8,8))     for i in range(1, cols * rows + 1):         sample_idx = torch.randint(len(training_data), size=(1,)).item()         img, label = training_data[sample_idx]         img = img.swapaxes(0,1)         img = img.swapaxes(1,2)         figure.add_subplot(rows, cols, i)         plt.title(labels_map[label])         plt.axis(\"off\")         plt.imshow(img)     plt.show()  # imshow(training_data)  def train_loop(dataloader, net, loss_fn, optimizer):     size = len(dataloader)     train_loss = 0     for batch_idx, (X, tag) in enumerate(dataloader):         X, tag = X.to(device), tag.to(device)         pred = net(X)         loss = loss_fn(pred, tag)         train_loss += loss.item()          # Back propagation         optimizer.zero_grad()         loss.backward()         optimizer.step()     train_loss \/= size      return train_loss  def test_loop(dataloader, model, loss_fn):     size = len(dataloader.dataset)     num_batches = len(dataloader)     test_loss, correct = 0, 0      with torch.no_grad():         for X, y in dataloader:             X, y = X.to(device), y.to(device)             pred = model(X)             test_loss += loss_fn(pred, y).item()             correct += (pred.argmax(1) == y).type(torch.float).sum().item()      test_loss \/= num_batches     correct \/= size     return test_loss, correct  net = models.alexnet().to(device) net.classifier[6] = nn.Linear(4096, 10).to(device)  learning_rate = 0.01 batch_size = 128 weight_decay = 0  train_dataloader = DataLoader(training_data, batch_size = batch_size) test_dataloader = DataLoader(test_data, batch_size = batch_size)  loss_fn = nn.CrossEntropyLoss() optimizer = torch.optim.SGD(net.parameters(), lr = learning_rate)  epochs = 50 for t in range(epochs):     print(f\"Epoch {t+1}n-------------------------------\")     st_time = time.time()     train_loss = train_loop(train_dataloader, net, loss_fn, optimizer)     test_loss, correct = test_loop(test_dataloader, net, loss_fn)     print(f\"Train loss: {train_loss:&gt;8f}, Test loss: {test_loss:&gt;8f}, Accuracy: {(100*correct):&gt;0.1f}%, Epoch time: {time.time() - st_time:.2f}sn\") print(\"Done!\") torch.save(net.state_dict(), 'alexnet-pre1.model') <\/code><\/pre>\n<p>\u6700\u540e\u6536\u655b\u65f6\u7684\u6570\u636e\u5728\u8fd9\u6837\uff1a<\/p>\n<pre><code>Epoch 52 ------------------------------- Train loss: 0.399347, Test loss: 0.970927, Accuracy: 70.3%, Epoch time: 17.20s <\/code><\/pre>\n<\/p><\/div>\n<div> <b>\u5927\u4f6c\u6709\u8a71\u8aaa<\/b> (<span>1<\/span>)  <\/div>\n<div> <\/div>\n<\/p><\/div>\n<\/p><\/div>\n<ul>\n<li data-pid=\"7113769\" data-uid=\"2\">\n<div>\n<div>\n<div> <span>\u8cc7\u6df1\u5927\u4f6c : KangolHsu <\/span>  <\/div>\n<div> <i title=\"\u5f15\u7528\"><\/i>  <span>  <\/span> <\/div>\n<\/p><\/div>\n<div> \u8f93\u5165\u7684\u56fe\u7247 64*64 \uff1f\u662f\u4e0d\u662f\u6709\u70b9\u5c0f\u554a <\/div>\n<\/p><\/div>\n<\/li>\n<li>\n","protected":false},"excerpt":{"rendered":"<p>\u4f7f\u7528\u9884\u8bad\u7ec3\u6a21\u578b\u7684 Alexnet &hellip;<\/p>\n","protected":false},"author":1,"featured_media":0,"comment_status":"closed","ping_status":"closed","sticky":false,"template":"","format":"standard","meta":[],"categories":[],"tags":[],"_links":{"self":[{"href":"http:\/\/4563.org\/index.php?rest_route=\/wp\/v2\/posts\/554486"}],"collection":[{"href":"http:\/\/4563.org\/index.php?rest_route=\/wp\/v2\/posts"}],"about":[{"href":"http:\/\/4563.org\/index.php?rest_route=\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"http:\/\/4563.org\/index.php?rest_route=\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"http:\/\/4563.org\/index.php?rest_route=%2Fwp%2Fv2%2Fcomments&post=554486"}],"version-history":[{"count":0,"href":"http:\/\/4563.org\/index.php?rest_route=\/wp\/v2\/posts\/554486\/revisions"}],"wp:attachment":[{"href":"http:\/\/4563.org\/index.php?rest_route=%2Fwp%2Fv2%2Fmedia&parent=554486"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"http:\/\/4563.org\/index.php?rest_route=%2Fwp%2Fv2%2Fcategories&post=554486"},{"taxonomy":"post_tag","embeddable":true,"href":"http:\/\/4563.org\/index.php?rest_route=%2Fwp%2Fv2%2Ftags&post=554486"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}