| from transformers import AutoModel |
| from huggingface_hub import hf_hub_download |
| from safetensors.torch import load_file |
| import torch.nn as nn |
| import torch |
|
|
| |
| NUM_LABELS = 4 |
|
|
| class SciBertClassificationModel(nn.Module): |
| def __init__(self, model_path="allenai/scibert_scivocab_uncased", freeze_weights=True): |
| super(SciBertClassificationModel, self).__init__() |
| if model_path == "allenai/scibert_scivocab_uncased": |
| self.base_model = AutoModel.from_pretrained(model_path) |
| else: |
| pytorch_model_path = hf_hub_download( |
| repo_id=model_path, |
| repo_type="model", |
| filename="model.safetensors" |
| ) |
| state_dict = load_file(pytorch_model_path) |
| filtered_state_dict = { |
| k.replace("base_model.", ""): v |
| for k, v in state_dict.items() |
| if not k.startswith("classifier.") |
| } |
|
|
| self.base_model = AutoModel.from_pretrained("allenai/scibert_scivocab_uncased", state_dict=filtered_state_dict) |
|
|
| |
| self.config = self.base_model.config |
| |
| |
| if freeze_weights: |
| for param in self.base_model.parameters(): |
| param.requires_grad = False |
| |
| |
| self.classifier = nn.Linear(self.base_model.config.hidden_size, NUM_LABELS) |
| |
| def forward(self, input_ids, attention_mask, labels=None): |
| with torch.no_grad(): |
| outputs = self.base_model(input_ids=input_ids, attention_mask=attention_mask) |
| |
| |
| |
| |
|
|
| |
| summed_representation = outputs.last_hidden_state.sum(dim=1) |
| |
| logits = self.classifier(summed_representation) |
| |
| loss = None |
| if labels is not None: |
| loss_fn = nn.BCEWithLogitsLoss() |
| loss = loss_fn(logits, labels.float()) |
| return {"loss": loss, "logits": logits} |
|
|
| def state_dict(self, *args, **kwargs): |
| |
| state_dict = super().state_dict(*args, **kwargs) |
| |
| for key, tensor in state_dict.items(): |
| if isinstance(tensor, torch.Tensor) and not tensor.is_contiguous(): |
| state_dict[key] = tensor.contiguous() |
| return state_dict |
|
|