codewithdark commited on
Commit
7191f6d
·
verified ·
1 Parent(s): adf4ac7

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -60,10 +60,10 @@ The model can be used for text generation via its integrated `generate()` method
60
  ### Example: Direct Inference
61
 
62
  ```python
63
- from transformers import AutoModelForCausalLM, AutoTokenizer
64
 
65
  # Load the model and tokenizer from the hub
66
- model = LatentRecurrentDepthModel.from_pretrained("codewithdark/latent-recurrent-depth-lm")
67
  tokenizer = AutoTokenizer.from_pretrained("codewithdark/latent-recurrent-depth-lm")
68
 
69
  prompt = "In the realm of language modeling"
@@ -87,7 +87,7 @@ print(generated_text)
87
  from transformers import AutoTokenizer, AutoModelForCausalLM
88
 
89
  tokenizer = AutoTokenizer.from_pretrained("codewithdark/latent-recurrent-depth-lm")
90
- model = LatentRecurrentDepthModel.from_pretrained("codewithdark/latent-recurrent-depth-lm")
91
 
92
  prompt = "In the realm of language modeling"
93
  input_ids = tokenizer(prompt, return_tensors="pt").input_ids
 
60
  ### Example: Direct Inference
61
 
62
  ```python
63
+ from transformers import AutoModelForCausalLM, AutoTokenizer, AutoModel
64
 
65
  # Load the model and tokenizer from the hub
66
+ model = AutoModelForCausalLM.from_pretrained("codewithdark/latent-recurrent-depth-lm")
67
  tokenizer = AutoTokenizer.from_pretrained("codewithdark/latent-recurrent-depth-lm")
68
 
69
  prompt = "In the realm of language modeling"
 
87
  from transformers import AutoTokenizer, AutoModelForCausalLM
88
 
89
  tokenizer = AutoTokenizer.from_pretrained("codewithdark/latent-recurrent-depth-lm")
90
+ model = AutoModelForCausalLM.from_pretrained("codewithdark/latent-recurrent-depth-lm")
91
 
92
  prompt = "In the realm of language modeling"
93
  input_ids = tokenizer(prompt, return_tensors="pt").input_ids