forked from mirjunaid26/large-language-models
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathnode2vec.py
88 lines (69 loc) · 2.83 KB
/
node2vec.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import networkx as nx
from gensim.models import Word2Vec
from gensim.models import KeyedVectors
# Define a sample knowledge graph (you can replace this with your own data)
knowledge_graph = nx.Graph()
knowledge_graph.add_nodes_from(['dog', 'cat', 'animal', 'pet'])
knowledge_graph.add_edges_from([('dog', 'animal'), ('cat', 'animal'), ('dog', 'pet'), ('cat', 'pet')])
# Train Node2Vec model to generate node embeddings
class Node2Vec:
def __init__(self, graph):
self.graph = graph
class Node2Vec:
def __init__(self, graph):
self.graph = graph
def train(self, dimensions=64, walk_length=30, num_walks=200, workers=4):
from gensim.models import Word2Vec
# Pre-compute transition probabilities
node2vec = Node2Vec(self.graph)
model = node2vec.fit(window=10, min_count=1, size=dimensions, walk_length=walk_length, num_walks=num_walks, workers=workers)
model.wv.save_word2vec_format('node2vec_embeddings.bin')
def fit(self, **kwargs):
# Generate random walks
walks = self._generate_walks(**kwargs)
# Train Word2Vec model
model = Word2Vec(walks, **kwargs)
return model
def _generate_walks(self, num_walks=200, walk_length=30):
walks = []
for _ in range(num_walks):
for node in self.graph.nodes():
walk = self._generate_random_walk(node, walk_length)
walks.append(walk)
return walks
def _generate_random_walk(self, start_node, walk_length):
walk = [start_node]
for _ in range(walk_length - 1):
neighbors = list(self.graph.neighbors(walk[-1]))
if neighbors:
walk.append(random.choice(neighbors))
else:
break
return walk
# Train Node2Vec model
node2vec = Node2Vec(knowledge_graph)
node2vec.train()
# Load trained embeddings
word_vectors = KeyedVectors.load_word2vec_format('node2vec_embeddings.bin', binary=True)
# Function to generate response to user query
def generate_response(query):
# Tokenize query and get node embeddings
tokens = query.split()
embeddings = [word_vectors[token] for token in tokens if token in word_vectors.vocab]
# Average node embeddings to represent the query
if embeddings:
query_embedding = sum(embeddings) / len(embeddings)
else:
return "I'm sorry, I don't understand your query."
# Find most similar nodes (concepts) in the knowledge graph
similar_nodes = word_vectors.similar_by_vector(query_embedding, topn=3)
# Generate response based on similar nodes
response = "You may be interested in: "
for node, _ in similar_nodes:
response += node + ", "
return response.strip(', ')
# Example usage
query = "dog and cat"
response = generate_response(query)
print("User: ", query)
print("Chatbot: ", response)