anantoj commited on
Commit
058a3cd
·
1 Parent(s): 66fc7d4

Added Model

Browse files
0_Transformer/config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "indobenchmark/indobert-lite-base-p1",
3
+ "_num_labels": 5,
4
+ "architectures": [
5
+ "AlbertModel"
6
+ ],
7
+ "attention_probs_dropout_prob": 0,
8
+ "bos_token_id": 2,
9
+ "classifier_dropout_prob": 0.1,
10
+ "down_scale_factor": 1,
11
+ "embedding_size": 128,
12
+ "eos_token_id": 3,
13
+ "gap_size": 0,
14
+ "hidden_act": "gelu",
15
+ "hidden_dropout_prob": 0,
16
+ "hidden_size": 768,
17
+ "id2label": {
18
+ "0": "LABEL_0",
19
+ "1": "LABEL_1",
20
+ "2": "LABEL_2",
21
+ "3": "LABEL_3",
22
+ "4": "LABEL_4"
23
+ },
24
+ "initializer_range": 0.02,
25
+ "inner_group_num": 1,
26
+ "intermediate_size": 3072,
27
+ "label2id": {
28
+ "LABEL_0": 0,
29
+ "LABEL_1": 1,
30
+ "LABEL_2": 2,
31
+ "LABEL_3": 3,
32
+ "LABEL_4": 4
33
+ },
34
+ "layer_norm_eps": 1e-12,
35
+ "max_position_embeddings": 512,
36
+ "model_type": "albert",
37
+ "net_structure_type": 0,
38
+ "num_attention_heads": 12,
39
+ "num_hidden_groups": 1,
40
+ "num_hidden_layers": 12,
41
+ "num_memory_blocks": 0,
42
+ "output_past": true,
43
+ "pad_token_id": 0,
44
+ "position_embedding_type": "absolute",
45
+ "torch_dtype": "float32",
46
+ "transformers_version": "4.29.2",
47
+ "type_vocab_size": 2,
48
+ "vocab_size": 30000
49
+ }
0_Transformer/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:694ff51abc4465cd22dd7945e017a061c354d34e547f6feb4558fc5afaa49db2
3
+ size 46747263
0_Transformer/sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_seq_length": 32,
3
+ "do_lower_case": false
4
+ }
0_Transformer/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
0_Transformer/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
0_Transformer/tokenizer_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "clean_up_tokenization_spaces": true,
3
+ "cls_token": "[CLS]",
4
+ "do_basic_tokenize": true,
5
+ "do_lower_case": true,
6
+ "mask_token": "[MASK]",
7
+ "model_max_length": 1000000000000000019884624838656,
8
+ "never_split": null,
9
+ "pad_token": "[PAD]",
10
+ "sep_token": "[SEP]",
11
+ "strip_accents": null,
12
+ "tokenize_chinese_chars": true,
13
+ "tokenizer_class": "BertTokenizer",
14
+ "unk_token": "[UNK]"
15
+ }
0_Transformer/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
1_Pooling/config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "word_embedding_dimension": 768,
3
+ "pooling_mode_cls_token": false,
4
+ "pooling_mode_mean_tokens": true,
5
+ "pooling_mode_max_tokens": false,
6
+ "pooling_mode_mean_sqrt_len_tokens": false,
7
+ "pooling_mode_weightedmean_tokens": false,
8
+ "pooling_mode_lasttoken": false
9
+ }
2_Dense/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"in_features": 768, "out_features": 768, "bias": true, "activation_function": "torch.nn.modules.activation.Tanh"}
2_Dense/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad2363388c666f7378e908cbddaacf912aa19a4a943cb2b22ef7ff64d8744e73
3
+ size 2363583
README.md CHANGED
@@ -1,3 +1,91 @@
1
  ---
2
- license: apache-2.0
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ pipeline_tag: sentence-similarity
3
+ tags:
4
+ - sentence-transformers
5
+ - feature-extraction
6
+ - sentence-similarity
7
+ datasets:
8
+ - LazarusNLP/wikipedia_id_20230520
9
  ---
10
+
11
+ # LazarusNLP/congen-indobert-lite-base
12
+
13
+ This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search.
14
+
15
+ <!--- Describe your model here -->
16
+
17
+ ## Usage (Sentence-Transformers)
18
+
19
+ Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
20
+
21
+ ```
22
+ pip install -U sentence-transformers
23
+ ```
24
+
25
+ Then you can use the model like this:
26
+
27
+ ```python
28
+ from sentence_transformers import SentenceTransformer
29
+ sentences = ["This is an example sentence", "Each sentence is converted"]
30
+
31
+ model = SentenceTransformer('LazarusNLP/congen-indobert-lite-base')
32
+ embeddings = model.encode(sentences)
33
+ print(embeddings)
34
+ ```
35
+
36
+
37
+
38
+ ## Evaluation Results
39
+
40
+ <!--- Describe how your model was evaluated -->
41
+
42
+ For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=LazarusNLP/congen-indobert-lite-base)
43
+
44
+
45
+ ## Training
46
+ The model was trained with the parameters:
47
+
48
+ **DataLoader**:
49
+
50
+ `torch.utils.data.dataloader.DataLoader` of length 6524 with parameters:
51
+ ```
52
+ {'batch_size': 128, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
53
+ ```
54
+
55
+ **Loss**:
56
+
57
+ `sentence_transformers_congen.losses.ConGenLoss.ConGenLoss`
58
+
59
+ Parameters of the fit()-Method:
60
+ ```
61
+ {
62
+ "epochs": 20,
63
+ "evaluation_steps": 0,
64
+ "evaluator": "sentence_transformers.evaluation.EmbeddingSimilarityEvaluator.EmbeddingSimilarityEvaluator",
65
+ "max_grad_norm": 1,
66
+ "optimizer_class": "<class 'transformers.optimization.AdamW'>",
67
+ "optimizer_params": {
68
+ "correct_bias": false,
69
+ "eps": 1e-06,
70
+ "lr": 0.0003
71
+ },
72
+ "scheduler": "WarmupLinear",
73
+ "steps_per_epoch": null,
74
+ "warmup_steps": 13048,
75
+ "weight_decay": 0.01
76
+ }
77
+ ```
78
+
79
+
80
+ ## Full Model Architecture
81
+ ```
82
+ SentenceTransformer(
83
+ (0): Transformer({'max_seq_length': 32, 'do_lower_case': False}) with Transformer model: AlbertModel
84
+ (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False})
85
+ (2): Dense({'in_features': 768, 'out_features': 768, 'bias': True, 'activation_function': 'torch.nn.modules.activation.Tanh'})
86
+ )
87
+ ```
88
+
89
+ ## Citing & Authors
90
+
91
+ <!--- Describe where people can find more information -->
config_sentence_transformers.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "__version__": {
3
+ "sentence_transformers": "1.0.0",
4
+ "transformers": "4.29.2",
5
+ "pytorch": "2.0.1+cu117"
6
+ }
7
+ }
eval/similarity_evaluation_results.csv ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ epoch,steps,cosine_pearson,cosine_spearman,euclidean_pearson,euclidean_spearman,manhattan_pearson,manhattan_spearman,dot_pearson,dot_spearman
2
+ 0,-1,0.7784628369715322,0.790441310186935,0.7745610761786144,0.7815123112306533,0.7707112402507581,0.7782807840320607,0.5110618110023735,0.5106208643865392
3
+ 1,-1,0.7919183217313387,0.8026935090405058,0.7781612660393912,0.7846604995944839,0.7743678045260972,0.7816463913967416,0.49774579884752534,0.5083373628127001
4
+ 2,-1,0.8130861078714943,0.8199870662006165,0.7644937862950276,0.7692507983083728,0.7609452213447676,0.7667759406867635,0.5105255947994755,0.5181236204472818
5
+ 3,-1,0.8120522841233425,0.8186837188637932,0.7569514053881609,0.7617098701664707,0.7537653246203407,0.759006221670444,0.4860231165617469,0.48471249955484075
6
+ 4,-1,0.8096698438029456,0.8176057439253992,0.7539490531181825,0.7601980534739224,0.7500700790213375,0.7570742790234226,0.49307098290290635,0.4970878479418517
7
+ 5,-1,0.8154446573476172,0.8221920403048152,0.7607869736755386,0.765150446235885,0.756842552092023,0.7624013379497931,0.5052868003959904,0.5053055818128606
8
+ 6,-1,0.8182260791503786,0.8240742902576491,0.7665322890312545,0.7723289417405284,0.7628382123508297,0.7697840965226919,0.5052775802370709,0.5092274902133616
9
+ 7,-1,0.8226442739407788,0.8281965374756299,0.7583215259849425,0.7619723412052115,0.7539303731069449,0.7589825319368873,0.5147085467217847,0.5215022704627538
10
+ 8,-1,0.8250791879929443,0.8302767883248077,0.767164179868101,0.7722566865545311,0.7637886454196815,0.7697028626849216,0.5251239718953331,0.5280571824538515
11
+ 9,-1,0.8175180934127748,0.8236161902306005,0.75219688988271,0.7588720365221943,0.7483920597915993,0.7562650335719203,0.48908222053279615,0.4938552834363178
12
+ 10,-1,0.8234490987201571,0.8291432428933043,0.7614318396464613,0.7668242716817875,0.7574953059572305,0.7635380611733849,0.5060344154344832,0.5059956103090336
13
+ 11,-1,0.818128634922896,0.824645219560505,0.755939003935069,0.7634814034489885,0.7516300329176759,0.7599805112910322,0.47717241436727775,0.48320670136398697
14
+ 12,-1,0.8235165732186087,0.8285989043600063,0.7532804361204786,0.7614422436425015,0.7490167315723992,0.7585644847033318,0.500012271619837,0.5050206843701895
15
+ 13,-1,0.8261978317523195,0.8302346388233832,0.7599432436544662,0.765971799548841,0.7560932821503724,0.7629799226669498,0.5138222972304346,0.5201865738264096
16
+ 14,-1,0.8265272353853084,0.829494040339557,0.7501531520145648,0.7569895102802171,0.7458370306298866,0.7538350347403957,0.5115806205876149,0.5194403704651349
17
+ 15,-1,0.824367971815316,0.8292325703701074,0.7494784306248647,0.7560956491103782,0.7452161064402848,0.7529456611154134,0.5088827882953869,0.5160813717087587
modules.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "0_Transformer",
6
+ "type": "sentence_transformers.models.Transformer"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1",
11
+ "path": "1_Pooling",
12
+ "type": "sentence_transformers.models.Pooling"
13
+ },
14
+ {
15
+ "idx": 2,
16
+ "name": "2",
17
+ "path": "2_Dense",
18
+ "type": "sentence_transformers.models.Dense"
19
+ }
20
+ ]