| | from transformers import AutoModelForSequenceClassification, AutoTokenizer |
| | import torch |
| | import json |
| |
|
| | |
| | with open('../config/config.json') as f: |
| | config = json.load(f) |
| |
|
| | |
| | model = AutoModelForSequenceClassification.from_pretrained('../model') |
| | tokenizer = AutoTokenizer.from_pretrained(config['model_name']) |
| |
|
| | def predict(text): |
| | inputs = tokenizer(text, return_tensors="pt", padding="max_length", truncation=True) |
| | with torch.no_grad(): |
| | outputs = model(**inputs) |
| | logits = outputs.logits |
| | prediction = torch.argmax(logits, dim=-1) |
| | return prediction.item() |
| |
|
| | |
| | text = "Example text for prediction" |
| | prediction = predict(text) |
| | print(f"Prediction: {prediction}") |
| |
|