import torch.nn as nn import torch.nn.functional as F import torch from transformers import BertModel import numpy as np class SentimentAspectCNN(nn.Module): def __init__(self, embedding_dim, num_filters, filter_sizes, output_dim, dropout): super().__init__() self.convs = nn.ModuleList([ nn.Conv2d(in_channels=1, out_channels=num_filters, kernel_size=(fs, embedding_dim)) for fs in filter_sizes ]) self.fc = nn.Linear(len(filter_sizes) * num_filters, output_dim) self.dropout = nn.Dropout(dropout) self.sigmoid = nn.Sigmoid() def forward(self, x): # x shape: (batch_size, max_length, embedding_dim + 1) x = x.unsqueeze(1) # Add a channel dimension, x shape: (batch_size, 1, max_length, embedding_dim + 1) # Apply convolution and ReLU activation x = [nn.functional.relu(conv(x)).squeeze(3) for conv in self.convs] # List of tensors of shape (batch_size, num_filters, max_length - filter_size + 1) # Apply max pooling x = [nn.functional.max_pool1d(tensor, tensor.size(2)).squeeze(2) for tensor in x] # List of tensors of shape (batch_size, num_filters) # Concatenate the pooling results x = torch.cat(x, dim=1) # Shape: (batch_size, len(filter_sizes) * num_filters) # Apply dropout x = self.dropout(x) # Fully connected layer x = self.fc(x) # Shape: (batch_size, output_dim) # Sigmoid activation to get a score between 0 and 1 x = self.sigmoid(x) return x if __name__ == "__main__": embedding_dim = 26 # 25 for word embeddings + 1 for aspect indicator num_filters = 100 filter_sizes = [3, 4, 5] output_dim = 1 dropout = 0.5 model = SentimentAspectCNN(embedding_dim, num_filters, filter_sizes, output_dim, dropout) print(model)