| | |
| | import torch |
| | import torch.nn as nn |
| | import torch.nn.functional as F |
| | import numpy as np |
| |
|
| |
|
| | class Config(object): |
| | """配置参数""" |
| |
|
| | def __init__(self, dataset, embedding): |
| | self.model_name = "FastText" |
| | self.train_path = dataset + "/data/train.txt" |
| | self.dev_path = dataset + "/data/dev.txt" |
| | self.test_path = dataset + "/data/test.txt" |
| | self.class_list = [ |
| | x.strip() |
| | for x in open(dataset + "/data/class.txt", encoding="utf-8").readlines() |
| | ] |
| | self.vocab_path = dataset + "/data/vocab.pkl" |
| | self.save_path = ( |
| | dataset + "/saved_dict/" + self.model_name + ".ckpt" |
| | ) |
| | self.log_path = dataset + "/log/" + self.model_name |
| | self.embedding_pretrained = ( |
| | torch.tensor( |
| | np.load(dataset + "/data/" + embedding)["embeddings"].astype("float32") |
| | ) |
| | if embedding != "random" |
| | else None |
| | ) |
| | self.device = torch.device( |
| | "cuda" if torch.cuda.is_available() else "cpu" |
| | ) |
| |
|
| | self.dropout = 0.5 |
| | self.require_improvement = 1000 |
| | self.num_classes = len(self.class_list) |
| | self.n_vocab = 0 |
| | self.num_epochs = 20 |
| | self.batch_size = 128 |
| | self.pad_size = 32 |
| | self.learning_rate = 1e-3 |
| | self.embed = ( |
| | self.embedding_pretrained.size(1) |
| | if self.embedding_pretrained is not None |
| | else 300 |
| | ) |
| | self.hidden_size = 256 |
| | self.n_gram_vocab = 250499 |
| |
|
| |
|
| | """Bag of Tricks for Efficient Text Classification""" |
| |
|
| |
|
| | class FastText(nn.Module): |
| | def __init__(self, config): |
| | super(FastText, self).__init__() |
| | if config.embedding_pretrained is not None: |
| | self.embedding = nn.Embedding.from_pretrained( |
| | config.embedding_pretrained, freeze=False |
| | ) |
| | else: |
| | self.embedding = nn.Embedding( |
| | config.n_vocab, config.embed, padding_idx=config.n_vocab - 1 |
| | ) |
| | self.embedding_ngram2 = nn.Embedding(config.n_gram_vocab, config.embed) |
| | self.embedding_ngram3 = nn.Embedding(config.n_gram_vocab, config.embed) |
| | self.dropout = nn.Dropout(config.dropout) |
| | self.fc1 = nn.Linear(config.embed * 3, config.hidden_size) |
| | |
| | self.fc2 = nn.Linear(config.hidden_size, config.num_classes) |
| |
|
| | def forward(self, x): |
| | out_word = self.embedding(x[0]) |
| | out_bigram = self.embedding_ngram2(x[2]) |
| | out_trigram = self.embedding_ngram3(x[3]) |
| | out = torch.cat((out_word, out_bigram, out_trigram), -1) |
| |
|
| | out = out.mean(dim=1) |
| | out = self.dropout(out) |
| | out = self.fc1(out) |
| | out = F.relu(out) |
| | out = self.fc2(out) |
| | return out |
| | |
| | def feature(self, x): |
| | """ |
| | 提取中间层特征向量,用于可视化 |
| | 返回fc1层的输出(ReLU激活后的隐藏层特征) |
| | """ |
| | with torch.no_grad(): |
| | out_word = self.embedding(x[0]) |
| | out_bigram = self.embedding_ngram2(x[2]) |
| | out_trigram = self.embedding_ngram3(x[3]) |
| | out = torch.cat((out_word, out_bigram, out_trigram), -1) |
| |
|
| | out = out.mean(dim=1) |
| | out = self.dropout(out) |
| | features = self.fc1(out) |
| | features = F.relu(features) |
| | return features.cpu().numpy() |
| | |
| | def get_prediction(self, x): |
| | """ |
| | 获取模型最终层输出向量(logits) |
| | """ |
| | with torch.no_grad(): |
| | out_word = self.embedding(x[0]) |
| | out_bigram = self.embedding_ngram2(x[2]) |
| | out_trigram = self.embedding_ngram3(x[3]) |
| | out = torch.cat((out_word, out_bigram, out_trigram), -1) |
| |
|
| | out = out.mean(dim=1) |
| | out = self.dropout(out) |
| | out = self.fc1(out) |
| | out = F.relu(out) |
| | predictions = self.fc2(out) |
| | return predictions.cpu().numpy() |
| | |
| | def prediction(self, features): |
| | """ |
| | 根据中间特征向量预测结果 |
| | features: 来自feature()函数的输出 [batch_size, hidden_size] |
| | """ |
| | with torch.no_grad(): |
| | features_tensor = torch.tensor(features, dtype=torch.float32).to(next(self.parameters()).device) |
| | predictions = self.fc2(features_tensor) |
| | return predictions.cpu().numpy() |
| |
|