import torch.utils.data as data_utils
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torchvision import datasets, transforms, utils
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5) # input channel is 3 and output channel is 10 (RGB) and kernel size is 5x5
self.conv2 = nn.Conv2d(10, 20, kernel_size=5) # input channel is 10 and output channel is 20 (RGB) and kernel size is 5x5
self.fc1 = nn.Linear(20*30*30 , 50 ) # input shape of linear layer 30x30 image with 20 channels , output shape 50
self.fc2 = nn.Linear(50 , 2 ) # input shape of linear layer 50 , output shape 2
def forward(self, x): # this function will be called when we pass the data to the model for training or testing
x = F.relu ( F . max_pool2d ( self . conv1 (x), 2)) # max pooling with a filter size of 2x2 on convolutional layer 1
x = F . relu (F . max_pool2d (self . conv2 (x), 2)) # max pooling with a filter size of 2x2 on convolutional layer 2
# flattening the data from 3D to 1D for passing it to fully connected layers
#flatten the data from 3D to 1D for passing it to fully connected layers
x= x . view (-1 , 20 * 30 * 30 ) # -1 indicates that we don't know how many images are there in a batch but we know that each image has 20 channels with dimnesion 30X30 so total elements in a image will be 20X30X30 which will be flattened into one dimension vector of length 1800 (=20X30X30)
#passing flattened data into fully connected layers
x= F . relu ((self . fc1 (x))) # applying ReLu activation function on first fully connected layer ouput \ \ \ \ \ \ |______this line can also be written as :- "output_of_first_fullyconnectedlayer = F . relu ((self . fc1 (input_of _first _fullyconnectedlayer))) " | | | | | | / / / / / / where input_of _first _fullyconnectedlayer & output_of _second _fullyconnectedlayer are same i-e 1800 elements vector / / / / // // /////////////////////////////////
#final output after passing through second fully connected layer
output= self . fc2 (x) ## no activation function applied here because softmax activation function will be applied at last while calculating loss using cross entropy loss function so final ouput after this line will be two elements vector containing probability distribution between two classes i-e 0 or 1 in our case /// /// /// /// /// /// //////////////////////////////// ::::note::::::: softmax activation fnction should only be used at last while calculating loss using cross entropy loss fnction otherwise use ReLu or any other type of activtion functions like tanh etc :::::::::::::::::::::::::::::::::::::::::::::: ::::note::::::: Cross entropy should only used if there are more than two classes otherwise use MSE Loss Function :::::::::::::::: ::note:::::: Cross Entropy Loss Function should only used if there are more than two classes otherwise use MSE Loss Function :::::::::::::::: ::note:::::: Cross Entropy Loss Function should only used if there are more than two classes otherwise use MSE Loss Function :::::::::::::::: ::note:::::: Cross Entropy Loss Function should only used if there are more than two classes otherwise use MSE Loss Function :::::::::::::::: ::note:::::: Cross Entropy Loss Function should only used if there are more than two classes otherwise use MSE Loss Function :::::::::::::
创作工场
免责声明:
以上内容除特别注明外均来源于网友提问,创作工场回答,未经许可,严谨转载。
点击这里>>使用🔥专业版,更聪明、更完整、更原创!