|
|
@ -66,6 +66,7 @@ def load_data(dataset: Literal["Stanford", "NotreDame", "BerkStan"]) -> nx.Graph
|
|
|
|
|
|
|
|
|
|
|
|
return G_dataset
|
|
|
|
return G_dataset
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def google_matrix(G, alpha=0.85, personalization=None, nodelist=None, weight="weight", dangling=None) -> np.matrix:
|
|
|
|
def google_matrix(G, alpha=0.85, personalization=None, nodelist=None, weight="weight", dangling=None) -> np.matrix:
|
|
|
|
|
|
|
|
|
|
|
|
"""Returns the Google matrix of the graph. NetworkX implementation.
|
|
|
|
"""Returns the Google matrix of the graph. NetworkX implementation.
|
|
|
@ -155,6 +156,7 @@ def google_matrix(G, alpha=0.85, personalization=None, nodelist=None, weight="we
|
|
|
|
|
|
|
|
|
|
|
|
return np.asmatrix(alpha * A + (1 - alpha) * p)
|
|
|
|
return np.asmatrix(alpha * A + (1 - alpha) * p)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def google_matrix_sparse(G, alpha=0.85, personalization=None, nodelist=None, weight="weight", dangling=None) -> np.matrix:
|
|
|
|
def google_matrix_sparse(G, alpha=0.85, personalization=None, nodelist=None, weight="weight", dangling=None) -> np.matrix:
|
|
|
|
|
|
|
|
|
|
|
|
""" Revised NetworkX implementation for sparse matrices. Returns the Ptilde matrix of the graph instead of the Google matrix.
|
|
|
|
""" Revised NetworkX implementation for sparse matrices. Returns the Ptilde matrix of the graph instead of the Google matrix.
|
|
|
@ -246,6 +248,7 @@ def google_matrix_sparse(G, alpha=0.85, personalization=None, nodelist=None, wei
|
|
|
|
|
|
|
|
|
|
|
|
return A, p
|
|
|
|
return A, p
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def pagerank_numpy(G, alpha=0.85, personalization=None, weight="weight", dangling=None):
|
|
|
|
def pagerank_numpy(G, alpha=0.85, personalization=None, weight="weight", dangling=None):
|
|
|
|
"""Returns the PageRank of the nodes in the graph. NetworkX implementation.
|
|
|
|
"""Returns the PageRank of the nodes in the graph. NetworkX implementation.
|
|
|
|
|
|
|
|
|
|
|
@ -307,6 +310,7 @@ def pagerank_numpy(G, alpha=0.85, personalization=None, weight="weight", danglin
|
|
|
|
norm = largest.sum()
|
|
|
|
norm = largest.sum()
|
|
|
|
return dict(zip(G, map(float, largest / norm)))
|
|
|
|
return dict(zip(G, map(float, largest / norm)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def pagerank(G, alpha=0.85, personalization=None, max_iter=10000, tol=1.0e-9, nstart=None, weight="weight", dangling=None,):
|
|
|
|
def pagerank(G, alpha=0.85, personalization=None, max_iter=10000, tol=1.0e-9, nstart=None, weight="weight", dangling=None,):
|
|
|
|
|
|
|
|
|
|
|
|
"""
|
|
|
|
"""
|
|
|
@ -422,6 +426,7 @@ def pagerank(G, alpha=0.85, personalization=None, max_iter=10000, tol=1.0e-9, ns
|
|
|
|
# this is a failure to converges
|
|
|
|
# this is a failure to converges
|
|
|
|
raise nx.PowerIterationFailedConvergence(max_iter)
|
|
|
|
raise nx.PowerIterationFailedConvergence(max_iter)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def shifted_pow_pagerank(G, alphas=[0.85, 0.9, 0.95, 0.99], max_iter=10000, tol=1.0e-9):
|
|
|
|
def shifted_pow_pagerank(G, alphas=[0.85, 0.9, 0.95, 0.99], max_iter=10000, tol=1.0e-9):
|
|
|
|
|
|
|
|
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|