Update README.md
Browse files
README.md
CHANGED
@@ -323,7 +323,7 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
323 |
|
324 |
# Question
|
325 |
question = "What are the movies of Tom Hanks?"
|
326 |
-
schema = "(:Actor)-[:ActedIn]->(:Movie)"
|
327 |
new_message = prepare_chat_prompt(question=question, schema=schema)
|
328 |
prompt = tokenizer.apply_chat_template(new_message, add_generation_prompt=True, tokenize=False)
|
329 |
inputs = tokenizer(prompt, return_tensors="pt", padding=True)
|
@@ -347,4 +347,10 @@ with torch.no_grad():
|
|
347 |
|
348 |
print(outputs)
|
349 |
> ["MATCH (a:Actor {Name: 'Tom Hanks'})-[:ActedIn]->(m:Movie) RETURN m"]
|
350 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
323 |
|
324 |
# Question
|
325 |
question = "What are the movies of Tom Hanks?"
|
326 |
+
schema = "(:Actor)-[:ActedIn]->(:Movie)" # Check the NOTE below on creating your own schemas
|
327 |
new_message = prepare_chat_prompt(question=question, schema=schema)
|
328 |
prompt = tokenizer.apply_chat_template(new_message, add_generation_prompt=True, tokenize=False)
|
329 |
inputs = tokenizer(prompt, return_tensors="pt", padding=True)
|
|
|
347 |
|
348 |
print(outputs)
|
349 |
> ["MATCH (a:Actor {Name: 'Tom Hanks'})-[:ActedIn]->(m:Movie) RETURN m"]
|
350 |
+
```
|
351 |
+
|
352 |
+
# NOTE on creating your own schemas:
|
353 |
+
* In the dataset we used the schemas are already provided. They are created either by
|
354 |
+
* Directly using the schema the inout data source provided OR
|
355 |
+
* Creating schema using neo4j-graphrag package (Check: SchemaReader.get_schema(...) function)
|
356 |
+
* In your own Neo4j database, you can utilize **neo4j-graphrag package::SchemaReader.get_schema(...) function
|