Update README.md
Browse files
README.md
CHANGED
@@ -99,6 +99,19 @@ def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=N
|
|
99 |
return (loss, outputs) if return_outputs else loss
|
100 |
```
|
101 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
# Evaluation and comparison with Vanilla and GPT-4o model:
|
103 |
|
104 |
| Dataset | Model | F1 | Accuracy |
|
|
|
99 |
return (loss, outputs) if return_outputs else loss
|
100 |
```
|
101 |
|
102 |
+
# Usage
|
103 |
+
|
104 |
+
```python
|
105 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
|
106 |
+
model = pipeline(task='sentiment-analysis', model='alexander-sh/mDeBERTa-v3-multi-sent', device='cuda')
|
107 |
+
model('Keep your face always toward the sunshine—and shadows will fall behind you.')
|
108 |
+
>>> [{'label': 'positive', 'score': 0.6478521227836609}]
|
109 |
+
model('I am not coming with you.')
|
110 |
+
>>> [{'label': 'neutral', 'score': 0.790919840335846}]
|
111 |
+
model("I am hating that my transformer model don't work properly.")
|
112 |
+
>>> [{'label': 'negative', 'score': 0.7474458813667297}]
|
113 |
+
```
|
114 |
+
|
115 |
# Evaluation and comparison with Vanilla and GPT-4o model:
|
116 |
|
117 |
| Dataset | Model | F1 | Accuracy |
|