|
|
import torch.nn as nn |
|
|
from transformers import AutoModel |
|
|
from .configs import Config |
|
|
|
|
|
|
|
|
class HateSpeechModel(nn.Module): |
|
|
def __init__(self, n_classes): |
|
|
super(HateSpeechModel, self).__init__() |
|
|
|
|
|
self.bert = AutoModel.from_pretrained(Config.MODEL_NAME, weights_only=False) |
|
|
|
|
|
|
|
|
for param in self.bert.parameters(): |
|
|
param.requires_grad = True |
|
|
|
|
|
|
|
|
self.drop = nn.Dropout(p=0.3) |
|
|
self.fc = nn.Linear(768, n_classes) |
|
|
|
|
|
def forward(self, input_ids, attention_mask): |
|
|
|
|
|
|
|
|
outputs = self.bert( |
|
|
input_ids=input_ids, |
|
|
attention_mask=attention_mask |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
pooled_output = outputs[1] |
|
|
|
|
|
output = self.drop(pooled_output) |
|
|
return self.fc(output) |