|
马上注册,结交更多好友,享用更多功能^_^
您需要 登录 才可以下载或查看,没有账号?立即注册
x
- def makeTheNet():
- class TweetDisasterRNN(nn.Module):
- def __init__(self, no_layers, vocab_size, hidden_dim, embedding_dim, output_dim):
- super().__init__()
- self.no_layers = no_layers
- self.hidden_dim = hidden_dim
- self.output_dim = output_dim
- # Embedding Layer
- self.embedding = nn.Embedding(vocab_size, embedding_dim)
- # LSTM Layers
- self.lstm = nn.LSTM(
- input_size=embedding_dim,
- hidden_size=hidden_dim,
- num_layers=no_layers,
- batch_first=True,
- )
- # Dropout to reduce overfitting
- self.dropout = nn.Dropout(0.5)
- # Fully Connected Layers with BatchNorm and Dropout
- self.fc1 = nn.Linear(hidden_dim, hidden_dim) # Reduce size to decrease complexity
- self.bn1 = nn.BatchNorm1d(hidden_dim) # BatchNorm
- self.fc2 = nn.Linear(hidden_dim, hidden_dim) # Further reduction
- self.bn2 = nn.BatchNorm1d(hidden_dim)
- self.output = nn.Linear(hidden_dim, output_dim) # Final output layer
- def forward(self, x, hidden):
- # Embedding and LSTM
- x = self.embedding(x)
- x, hidden = self.lstm(x, hidden)
- x = x[:, -1, :] # Use last output for classification
- x = self.dropout(x) # Apply dropout
- # Fully Connected Layers with ReLU, BatchNorm, and Dropout
- x = F.relu(self.fc1(x))
- x = self.bn1(x) # Apply batch normalization
- x = F.dropout(x, 0.5) # Apply dropout
-
- x = F.relu(self.fc2(x))
- x = self.bn2(x)
- x = F.dropout(x, 0.5) # Additional dropout for regularization
-
- return self.output(x), hidden # Output layer
- def init_hidden(self, batch_size):
- # Initialize hidden states
- h0 = torch.zeros((self.no_layers, batch_size, self.hidden_dim)).to(device)
- c0 = torch.zeros((self.no_layers, batch_size, self.hidden_dim)).to(device)
- hidden = (h0, c0)
- return hidden
- # Create the model with modified layer dimensions
- net = TweetDisasterRNN(
- no_layers=3,
- hidden_dim=256,
- output_dim=1,
- embedding_dim=128,
- vocab_size=len(vocab) + 1,
- )
- # Loss function and optimizer with L2 regularization
- lossfun = nn.BCEWithLogitsLoss() # Binary classification
- optimizer = torch.optim.Adam(net.parameters(), lr=0.0001, weight_decay=0.01) # L2 regularization
-
- return net, lossfun, optimizer
复制代码
上述代码中,以下两个全连接层- # Fully Connected Layers with BatchNorm and Dropout
- self.fc1 = nn.Linear(hidden_dim, hidden_dim) # Reduce size to decrease complexity
- self.bn1 = nn.BatchNorm1d(hidden_dim) # BatchNorm
- self.fc2 = nn.Linear(hidden_dim, hidden_dim) # Further reduction
- self.bn2 = nn.BatchNorm1d(hidden_dim)
- self.output = nn.Linear(hidden_dim, output_dim) # Final output layer
复制代码- # Fully Connected Layers with ReLU, BatchNorm, and Dropout
- x = F.relu(self.fc1(x))
- x = self.bn1(x) # Apply batch normalization
- x = F.dropout(x, 0.5) # Apply dropout
-
- x = F.relu(self.fc2(x))
- x = self.bn2(x)
- x = F.dropout(x, 0.5) # Additional dropout for regularization
复制代码
有什么关系 |
|