GMRES does not work. Refined the code

main
Luca Lombardo 2 years ago
parent 462e21c0f2
commit 13412816a0

@ -1,210 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"import networkx as nx\n",
"import time\n",
"import math\n",
"import pandas as pd\n",
"import scipy as sp\n",
"import plotly.express as px\n",
"import plotly.graph_objs as go\n",
"from scipy.sparse import *\n",
"from scipy import linalg\n",
"from scipy.sparse.linalg import norm\n",
"from scipy.optimize import least_squares"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Algorithm 2 testing"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"# def Arnoldi(A, v, m): \n",
"# beta = norm(v)\n",
"# v = v/beta # dimension of v is n x 1\n",
"# H = sp.sparse.lil_matrix((m,m)) # dimension of H is m x m \n",
"# V = sp.sparse.lil_matrix((A.shape[0],m))\n",
"# V[:,0] = v # each column of V is a vector v\n",
"\n",
"# for j in range(m):\n",
"# print(\"j = \", j)\n",
"# w = A @ v \n",
"# for i in range(j):\n",
"# tmp = v.T @ w \n",
"# H[i,j] = tmp[0,0]\n",
"# w = w - H[i,j]*v \n",
" \n",
"# H[j,j-1] = norm(w) \n",
"\n",
"# if H[j,j-1] == 0: \n",
"# print(\"Arnoldi breakdown\")\n",
"# m = j\n",
"# v = 0\n",
"# break\n",
"# else:\n",
"# if j < m-1:\n",
"# v = w/H[j,j-1]\n",
"# # for i in range(A.shape[0]):\n",
"# # V[i,j+1] = v[i,0]\n",
"# V[:,j+1] = v\n",
"\n",
"# print(j, \" iterations completed\")\n",
"# print(\"V = \", V.shape)\n",
"# print(\"H = \", H.shape)\n",
"# print(\"v = \", v.shape)\n",
"# print(\"beta = \", beta)\n",
"\n",
"# return V, H, v, beta, j"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Defined as Algorithm 2 in the paper. It's needed since it's called by Algorithm 4"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"def Arnoldi(A,v0,m):\n",
" v = v0\n",
" beta = np.linalg.norm(v)\n",
" v = v/beta\n",
" H = sp.sparse.lil_matrix((m+1,m)) \n",
" V = sp.sparse.lil_matrix((A.shape[0],m+1))\n",
" V[:,0] = v # each column of V is a vector v\n",
"\n",
" for j in range(m):\n",
" w = A @ v \n",
" for i in range(j):\n",
" H[i,j] = v.T @ w # tmp is a 1x1 matrix, so it's O(1) in memory\n",
" w = w - H[i,j]*v \n",
" \n",
" H[j+1,j] = np.linalg.norm(w)\n",
"\n",
" if H[j+1,j] == 0:\n",
" print(\"Arnoldi breakdown\")\n",
" m = j\n",
" v = 0\n",
" break\n",
" else:\n",
" if j < m-1:\n",
" v = w/H[j+1,j]\n",
" V[:,j+1] = v\n",
"\n",
" print(j, \" iterations completed\")\n",
" print(\"V = \", V.shape)\n",
" print(\"H = \", H.shape)\n",
" print(\"v = \", v.shape)\n",
" print(\"beta = \", beta)\n",
"\n",
" return V, H, beta, j "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Creating a small test case"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"m = 100\n",
"n = 110\n",
"A = sp.sparse.rand(n,n, density=0.1, format='lil')\n",
"# generate a probability vector, with all the entries as 1/n\n",
"v = sp.sparse.lil_matrix((n,1))\n",
"for i in range(n):\n",
" v[i] = 1/n"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"99 iterations completed\n",
"V = (110, 101)\n",
"H = (101, 100)\n",
"v = (110, 1)\n",
"beta = 0.09534625892455922\n"
]
},
{
"data": {
"text/plain": [
"(<110x101 sparse matrix of type '<class 'numpy.float64'>'\n",
" \twith 11000 stored elements in List of Lists format>,\n",
" <101x100 sparse matrix of type '<class 'numpy.float64'>'\n",
" \twith 4879 stored elements in List of Lists format>,\n",
" <110x1 sparse matrix of type '<class 'numpy.float64'>'\n",
" \twith 110 stored elements in Compressed Sparse Row format>,\n",
" 0.09534625892455922,\n",
" 99)"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"Arnoldi(A,v,m)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.10.6 64-bit",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.6 (main, Nov 2 2022, 18:53:38) [GCC 11.3.0]"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "916dbcbb3f70747c44a77c7bcd40155683ae19c65e1c03b4aa3499c5328201f1"
}
}
},
"nbformat": 4,
"nbformat_minor": 2
}

@ -1,868 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"import networkx as nx\n",
"import time\n",
"import math\n",
"import pandas as pd\n",
"import scipy as sp\n",
"import plotly.express as px\n",
"import plotly.graph_objs as go\n",
"from scipy.sparse import *\n",
"from scipy import linalg\n",
"from scipy.sparse.linalg import norm\n",
"from scipy.optimize import least_squares"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Arnoldi \n",
"\n",
"This is a copy of the algorithm defined and tested in the notebook `algo2_testing`. It's an implementation of the Algorithm 2 from the paper. It's needed in this notebook since this function is called by the `algo4` function. It's implemented to return exactly what's needed in the `algo4` function.\n",
"\n",
"Everything will be reorganized in the main.py file once everything is working."
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"def Arnoldi(A,v0,m):\n",
" v = v0\n",
" beta = np.linalg.norm(v)\n",
" v = v/beta\n",
" H = sp.sparse.lil_matrix((m+1,m)) \n",
" V = sp.sparse.lil_matrix((A.shape[0],m+1))\n",
" V[:,0] = v # each column of V is a vector v\n",
"\n",
" for j in range(m):\n",
" w = A @ v \n",
" for i in range(j):\n",
" H[i,j] = v.T @ w # tmp is a 1x1 matrix, so it's O(1) in memory\n",
" w = w - H[i,j]*v \n",
" \n",
" H[j+1,j] = np.linalg.norm(w)\n",
"\n",
" if H[j+1,j] == 0:\n",
" # print(\"Arnoldi breakdown\")\n",
" m = j\n",
" v = 0\n",
" break\n",
" else:\n",
" if j < m-1:\n",
" v = w/H[j+1,j]\n",
" V[:,j+1] = v\n",
"\n",
" return V, H, beta, j "
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Algorithm 4 testing\n",
"\n",
"This algorithm is based on the \"Algorithm 4\" of the paper, the pseudocode provided by the authors is the following \n",
"\n",
"![](https://i.imgur.com/H92fru7.png)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"def compute_gamma(res, a, k): # function to compute gamma\n",
" gamma = np.ones(len(a))\n",
" for i in range(len(a)):\n",
" if i != k:\n",
" gamma[i] = (res[i]*a[k])/(res[k]*a[i])\n",
" else:\n",
" gamma[i] = 0\n",
" return gamma"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Basic test case with random numbers to test the algorithm."
]
},
{
"cell_type": "code",
"execution_count": 66,
"metadata": {},
"outputs": [],
"source": [
"def compute_ptilde(G: nx.Graph):\n",
" \"\"\"\n",
" Compute the ptilde matrix and the probability vector v\n",
" :param G: the graph\n",
" :return: the ptilde matrix and the probability vector v\n",
"\n",
" \"\"\"\n",
"\n",
" # given the graph G, return it's sparse matrix representation\n",
" A = nx.to_scipy_sparse_array(G, format='lil')\n",
"\n",
" # create a vector d (sparse), where d[i] = 1 if the i-th row of A not null, else 0\n",
" d = sp.sparse.lil_matrix((1, A.shape[0]))\n",
" for i in range(A.shape[0]):\n",
" # s is the sum of the i-th row of A\n",
" s = sp.sparse.lil_matrix.sum(A[[i],:])\n",
" if s == 0:\n",
" d[0,[i]] = 0\n",
"\n",
" # probability vector v = 1/n\n",
" v = np.repeat(1/A.shape[0], A.shape[0])\n",
"\n",
" # initialize the ptilde matrix\n",
" P = sp.sparse.lil_matrix((A.shape[0], A.shape[1]))\n",
"\n",
" # P(i,j) = 1/(number of non null entries in column j) if A(i,j) != 0, else 0\n",
" for j in range(A.shape[1]):\n",
" for i in range(A.shape[0]):\n",
" if A[i,j] != 0:\n",
" P[i,j] = 1/sp.sparse.lil_matrix.sum(A[:,[j]] != 0)\n",
"\n",
" Pt = P + v @ d.T\n",
"\n",
" return Pt, v "
]
},
{
"cell_type": "code",
"execution_count": 85,
"metadata": {},
"outputs": [],
"source": [
"def Algo4(Pt, v, m, a: list, tau, maxit: int, x):\n",
"\n",
" mv, iter = 0, 1 # mv is the number of matrix-vector products, iter is the number of iterations\n",
" \n",
" # initialize x as a random sparse matrix. Each col is the pagerank vector for a different alpha\n",
" x = sp.sparse.lil_matrix((Pt.shape[0], len(a)))\n",
"\n",
" # initialize the identity matrix of size Pt.shape\n",
" I = sp.sparse.eye(Pt.shape[0], Pt.shape[1], format='lil')\n",
"\n",
" # compute the residual vector, it is a matrix of size (n, len(a)). Each col is the residual vector for a different alpha. \n",
" r = sp.sparse.lil_matrix((Pt.shape[0], len(a)))\n",
" res = np.zeros(len(a))\n",
"\n",
" # compute the residual vector and the norm of each col in the vector res\n",
" for i in range(len(a)):\n",
" r[:,[i]] = sp.sparse.linalg.spsolve(I - a[i]*Pt, v)\n",
" col = r[:,[i]].toarray()\n",
" res[i] = np.linalg.norm(col)\n",
"\n",
" # this is a while loop in the paper\n",
" for _ in range(maxit):\n",
" # check if we have converged\n",
" err = np.absolute(np.max(res))\n",
" if err < tau:\n",
" print(\"Computation ended successfully in \", iter, \" iterations and \", mv, \" matrix-vector products.\")\n",
" return x, iter, mv\n",
"\n",
" print(\"\\niter = \", iter)\n",
" print(\"res: \", res)\n",
" print(\"err = \", err)\n",
"\n",
" # find k as the index of the largest residual\n",
" k = int(np.argmax(res))\n",
" print(\"k = \", k)\n",
"\n",
" # compute gamma as defined in the paper\n",
" gamma = compute_gamma(res, a, k)\n",
" \n",
" # Run Arnoldi\n",
" r_k = r[:,[k]].toarray() # r_k is the residual vector for alpha_k\n",
" A_arnoldi = (1/a[k])*I - Pt # A_arnoldi is the matrix used in Arnoldi\n",
" V, H, beta, j = Arnoldi((1/a[k])*I - Pt, r_k, m) # V is the matrix of vectors v, H is the Hessenberg matrix, beta is the norm of the last vector v, j is the number of iterations of Arnoldi\n",
" H = H[:-1,:] # remove the last row of H\n",
" V = V[:,:-1] # remove the last col of V\n",
" mv = mv + j # update the number of matrix-vector products\n",
"\n",
" H_e1 = np.zeros(H.shape[0]) \n",
" H_e1[0] = 1 # canonical vector e1 of size H.shape[0]\n",
"\n",
" # compute y as the minimizer of || beta*e1 - Hy ||_2 using the least squares method\n",
" y = sp.sparse.lil_matrix((H.shape[1],len(a))) # y is the matrix of vectors y, each col is the vector y for a different alpha\n",
"\n",
" # we only need the k-th col of y in this iteration\n",
" y[:,[k]] = sp.sparse.linalg.lsqr(H, beta*H_e1)[0]\n",
" y_k = y[:,[k]].toarray()\n",
"\n",
" # # Update x\n",
" x_new = x\n",
" x_new[:,[k]] = x[:,[k]] + V @ y_k\n",
"\n",
" V_e1 = np.zeros(V.shape[0])\n",
" V_e1[0] = 1 # canonical vector e1 of size V.shape[0]\n",
"\n",
" # Update res[k]\n",
" norm_k =np.linalg.norm(beta*V_e1 - V @ y_k) # this returns a scalar\n",
" res[k] = a[k]*norm_k\n",
"\n",
" # multi shift\n",
" for i in range(len(a)):\n",
" if i != k and res[i] >= tau:\n",
" if res[i] >= tau:\n",
" \n",
" H = H + ((1-a[i])/a[i] - (1-a[k])/a[k])*sp.sparse.eye(H.shape[0], H.shape[1], format='lil')\n",
"\n",
" # Compute z as described in the paper\n",
" z1 = H_e1*beta\n",
" z1 = z1.reshape(z1.shape[0],1)\n",
" z2 = H @ y[:,[1]]\n",
" z2 = z2.reshape(z2.shape[0],1)\n",
" z = z1 - z2\n",
"\n",
" # Solve the linear system for A and b\n",
" A = sp.sparse.hstack([H, z])\n",
" b = (beta*H_e1)\n",
"\n",
" # use the least squares method to solve the linear system\n",
" to_split = sp.sparse.linalg.lsqr(A, b.reshape(b.shape[0],1))[0]\n",
" \n",
" # the last element of to_split is the last element of gamma[i], the other elements are the elements of y[:[i]]\n",
" y[:,[i]] = to_split[:-1]\n",
" gamma[i] = to_split[-1]\n",
"\n",
" # update x\n",
" x_new[:,i] = x[:,i] + V @ y[:,[i]]\n",
"\n",
" # update the residual vector\n",
" # print(\"\\tupdating res[\", i, \"]\")\n",
" # print(\"\\tgamma[\", i, \"] = \", gamma[i])\n",
" # print(\"\\tres[\", k, \"] = \", res[k])\n",
" # print(\"\\ta[\", i, \"] = \", a[i])\n",
" # print(\"\\ta[\", k, \"] = \", a[k])\n",
" res[i] = (a[i]/a[k])*gamma[i]*res[k]\n",
" # print(\"\\tupdated res[\", i, \"] = \", res[i])\n",
" # print()\n",
"\n",
" else:\n",
" if res[i] < tau:\n",
" print(\"res[\", i, \"] is smaller than tau = \", tau, \" at iteration \", iter)\n",
"\n",
" iter = iter + 1\n",
" x = x_new\n",
"\n",
" raise Exception('Maximum number of iterations reached')"
]
},
{
"cell_type": "code",
"execution_count": 86,
"metadata": {},
"outputs": [],
"source": [
"G = nx.watts_strogatz_graph(1000, 4, 0.1)\n",
"Pt, v = compute_ptilde(G)\n",
"# a = [0.85, 0.86, 0.87, 0.88, 0.89, 0.90, 0.91, 0.92, 0.93, 0.94, 0.95, 0.96, 0.97, 0.98, 0.99]\n",
"a = [0.85, 0.86, 0.87, 0.88]\n",
"tau = 1e-6\n",
"maxit = 100\n",
"n = len(G.nodes)\n",
"x = sp.sparse.random(n, len(a), density=0.1, format='lil')"
]
},
{
"cell_type": "code",
"execution_count": 87,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/linsolve.py:168: SparseEfficiencyWarning: spsolve requires A be CSC or CSR matrix format\n",
" warn('spsolve requires A be CSC or CSR matrix format',\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"iter = 1\n",
"res: [0.21235414 0.22756312 0.24511308 0.26558937]\n",
"err = 0.26558937251088227\n",
"k = 3\n",
"\n",
"\n",
"iter = 2\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 3\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 4\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 5\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 6\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 7\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 8\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 9\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 10\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 11\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 12\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 13\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 14\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 15\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 16\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 17\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 18\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 19\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 20\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 21\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 22\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 23\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 24\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 25\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 26\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 27\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 28\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 29\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 30\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 31\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 32\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 33\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 34\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 35\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 36\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 37\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 38\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 39\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 40\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 41\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 42\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 43\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 44\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 45\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 46\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 47\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 48\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 49\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 50\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 51\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 52\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 53\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 54\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 55\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 56\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 57\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 58\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 59\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 60\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 61\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 62\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 63\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 64\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 65\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 66\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 67\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 68\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 69\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 70\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 71\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 72\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 73\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 74\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 75\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 76\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 77\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 78\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 79\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 80\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 81\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 82\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 83\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 84\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 85\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n",
"\n",
"\n",
"iter = 86\n",
"res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n",
"err = 10.69433447757171\n",
"k = 3\n"
]
},
{
"ename": "KeyboardInterrupt",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[87], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m x, \u001b[38;5;28miter\u001b[39m, mv \u001b[38;5;241m=\u001b[39m \u001b[43mAlgo4\u001b[49m\u001b[43m(\u001b[49m\u001b[43mPt\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mv\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m100\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43ma\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtau\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmaxit\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mx\u001b[49m\u001b[43m)\u001b[49m\n",
"Cell \u001b[0;32mIn[85], line 46\u001b[0m, in \u001b[0;36mAlgo4\u001b[0;34m(Pt, v, m, a, tau, maxit, x)\u001b[0m\n\u001b[1;32m 44\u001b[0m V, H, beta, j \u001b[38;5;241m=\u001b[39m Arnoldi((\u001b[38;5;241m1\u001b[39m\u001b[38;5;241m/\u001b[39ma[k])\u001b[38;5;241m*\u001b[39mI \u001b[38;5;241m-\u001b[39m Pt, r_k, m) \u001b[38;5;66;03m# V is the matrix of vectors v, H is the Hessenberg matrix, beta is the norm of the last vector v, j is the number of iterations of Arnoldi\u001b[39;00m\n\u001b[1;32m 45\u001b[0m H \u001b[38;5;241m=\u001b[39m H[:\u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m,:] \u001b[38;5;66;03m# remove the last row of H\u001b[39;00m\n\u001b[0;32m---> 46\u001b[0m V \u001b[38;5;241m=\u001b[39m \u001b[43mV\u001b[49m\u001b[43m[\u001b[49m\u001b[43m:\u001b[49m\u001b[43m,\u001b[49m\u001b[43m:\u001b[49m\u001b[38;5;241;43m-\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m]\u001b[49m \u001b[38;5;66;03m# remove the last col of V\u001b[39;00m\n\u001b[1;32m 47\u001b[0m mv \u001b[38;5;241m=\u001b[39m mv \u001b[38;5;241m+\u001b[39m j \u001b[38;5;66;03m# update the number of matrix-vector products\u001b[39;00m\n\u001b[1;32m 49\u001b[0m H_e1 \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39mzeros(H\u001b[38;5;241m.\u001b[39mshape[\u001b[38;5;241m0\u001b[39m]) \n",
"File \u001b[0;32m/usr/lib/python3.10/site-packages/scipy/sparse/_lil.py:211\u001b[0m, in \u001b[0;36mlil_matrix.__getitem__\u001b[0;34m(self, key)\u001b[0m\n\u001b[1;32m 209\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_get_intXint(\u001b[39m*\u001b[39mkey)\n\u001b[1;32m 210\u001b[0m \u001b[39m# Everything else takes the normal path.\u001b[39;00m\n\u001b[0;32m--> 211\u001b[0m \u001b[39mreturn\u001b[39;00m IndexMixin\u001b[39m.\u001b[39;49m\u001b[39m__getitem__\u001b[39;49m(\u001b[39mself\u001b[39;49m, key)\n",
"File \u001b[0;32m/usr/lib/python3.10/site-packages/scipy/sparse/_index.py:69\u001b[0m, in \u001b[0;36mIndexMixin.__getitem__\u001b[0;34m(self, key)\u001b[0m\n\u001b[1;32m 67\u001b[0m \u001b[39mif\u001b[39;00m row \u001b[39m==\u001b[39m \u001b[39mslice\u001b[39m(\u001b[39mNone\u001b[39;00m) \u001b[39mand\u001b[39;00m row \u001b[39m==\u001b[39m col:\n\u001b[1;32m 68\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mcopy()\n\u001b[0;32m---> 69\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_get_sliceXslice(row, col)\n\u001b[1;32m 70\u001b[0m \u001b[39melif\u001b[39;00m col\u001b[39m.\u001b[39mndim \u001b[39m==\u001b[39m \u001b[39m1\u001b[39m:\n\u001b[1;32m 71\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_get_sliceXarray(row, col)\n",
"File \u001b[0;32m/usr/lib/python3.10/site-packages/scipy/sparse/_lil.py:241\u001b[0m, in \u001b[0;36mlil_matrix._get_sliceXslice\u001b[0;34m(self, row, col)\u001b[0m\n\u001b[1;32m 239\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39m_get_sliceXslice\u001b[39m(\u001b[39mself\u001b[39m, row, col):\n\u001b[1;32m 240\u001b[0m row \u001b[39m=\u001b[39m \u001b[39mrange\u001b[39m(\u001b[39m*\u001b[39mrow\u001b[39m.\u001b[39mindices(\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mshape[\u001b[39m0\u001b[39m]))\n\u001b[0;32m--> 241\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_get_row_ranges(row, col)\n",
"File \u001b[0;32m/usr/lib/python3.10/site-packages/scipy/sparse/_lil.py:290\u001b[0m, in \u001b[0;36mlil_matrix._get_row_ranges\u001b[0;34m(self, rows, col_slice)\u001b[0m\n\u001b[1;32m 287\u001b[0m nj \u001b[39m=\u001b[39m \u001b[39mlen\u001b[39m(col_range)\n\u001b[1;32m 288\u001b[0m new \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_lil_container((\u001b[39mlen\u001b[39m(rows), nj), dtype\u001b[39m=\u001b[39m\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mdtype)\n\u001b[0;32m--> 290\u001b[0m _csparsetools\u001b[39m.\u001b[39;49mlil_get_row_ranges(\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mshape[\u001b[39m0\u001b[39;49m], \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mshape[\u001b[39m1\u001b[39;49m],\n\u001b[1;32m 291\u001b[0m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mrows, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mdata,\n\u001b[1;32m 292\u001b[0m new\u001b[39m.\u001b[39;49mrows, new\u001b[39m.\u001b[39;49mdata,\n\u001b[1;32m 293\u001b[0m rows,\n\u001b[1;32m 294\u001b[0m j_start, j_stop, j_stride, nj)\n\u001b[1;32m 296\u001b[0m \u001b[39mreturn\u001b[39;00m new\n",
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
]
}
],
"source": [
"x, iter, mv = Algo4(Pt, v, 100, a, tau, maxit, x)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.10.8 64-bit",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.8"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "e7370f93d1d0cde622a1f8e1c04877d8463912d04d973331ad4851f04de6915a"
}
}
},
"nbformat": 4,
"nbformat_minor": 2
}

@ -0,0 +1,150 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"import scipy as sp"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Arnoldi"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Defined as Algorithm 2 in the paper."
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"def Arnoldi(A,v0,m):\n",
" v = v0\n",
" beta = sp.linalg.norm(v)\n",
" v = v/beta\n",
" H = sp.sparse.lil_matrix((m+1,m)) \n",
" V = sp.sparse.lil_matrix((A.shape[0],m+1))\n",
" V[:,0] = v # each column of V is a vector v\n",
"\n",
" for j in range(m):\n",
" w = A @ v \n",
" for i in range(j):\n",
" H[i,j] = v.T @ w # tmp is a 1x1 matrix, so it's O(1) in memory\n",
" w = w - H[i,j]*v \n",
" \n",
" H[j+1,j] = np.linalg.norm(w)\n",
"\n",
" if H[j+1,j] == 0:\n",
" print(\"Arnoldi breakdown\")\n",
" m = j\n",
" v = 0\n",
" break\n",
" else:\n",
" if j < m-1:\n",
" v = w/H[j+1,j]\n",
" V[:,j+1] = v\n",
"\n",
" return V, H, beta, j "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Small test case\n",
"\n",
"The final implementation will be using all sparse arrays and matrices, no numpy arrays"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"n = 110\n",
"\n",
"# start with a random sparse matrix\n",
"A = sp.sparse.rand(n,n, density=0.1, format='lil')\n",
"\n",
"# Starting vector\n",
"v = np.repeat(1/n,n)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Now we can run the Arnoldi Process"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"The number of iterations is: 109\n",
"The matrix H is: (111, 110)\n",
"The matrix V is: (110, 111)\n"
]
}
],
"source": [
"V, H, beta, j = Arnoldi(A,v,110)\n",
"print(\"The number of iterations is: \", j)\n",
"print(\"The matrix H is: \", H.shape)\n",
"print(\"The matrix V is: \", V.shape)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"As we can see, it returns $H_{m+1}$ that is an upper-hassemberg matrix, with one extra row."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.10.8 64-bit",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.8"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "e7370f93d1d0cde622a1f8e1c04877d8463912d04d973331ad4851f04de6915a"
}
}
},
"nbformat": 4,
"nbformat_minor": 2
}

@ -0,0 +1,829 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import time\n",
"import numpy as np\n",
"import scipy as sp\n",
"import pandas as pd\n",
"import networkx as nx"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Arnoldi \n",
"\n",
"This is a copy of the algorithm defined and tested in the notebook `arnoldi.ipynb`. It's an implementation of the Algorithm 2 from the paper. It's needed during the computation of the shifted GMRES algorithm that we are trying to implement here."
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"def Arnoldi(A,v0,m):\n",
" v = v0\n",
" beta = np.linalg.norm(v)\n",
" v = v/beta\n",
" H = sp.sparse.lil_matrix((m+1,m)) \n",
" V = sp.sparse.lil_matrix((A.shape[0],m+1))\n",
" V[:,0] = v # each column of V is a vector v\n",
"\n",
" for j in range(m):\n",
" w = A @ v \n",
" for i in range(j):\n",
" H[i,j] = v.T @ w # tmp is a 1x1 matrix, so it's O(1) in memory\n",
" w = w - H[i,j]*v \n",
" \n",
" H[j+1,j] = np.linalg.norm(w)\n",
"\n",
" if H[j+1,j] == 0:\n",
" # print(\"Arnoldi breakdown\")\n",
" m = j\n",
" v = 0\n",
" break\n",
" else:\n",
" if j < m-1:\n",
" v = w/H[j+1,j]\n",
" V[:,j+1] = v\n",
"\n",
" return V, H, beta, j "
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Algorithm 4 testing\n",
"\n",
"This algorithm is based on the \"Algorithm 4\" of the paper, the pseudocode provided by the authors is the following \n",
"\n",
"![](https://i.imgur.com/H92fru7.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Auxiliary function"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"def compute_gamma(res, a, k): # function to compute gamma\n",
" gamma = np.ones(len(a))\n",
" for i in range(len(a)):\n",
" if i != k:\n",
" gamma[i] = (res[i]*a[k])/(res[k]*a[i])\n",
"\n",
" return gamma"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Basic test case with random numbers to test the algorithm."
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"def google_matrix_sparse(G, alpha=0.85, personalization=None, nodelist=None, weight=\"weight\", dangling=None) -> np.matrix:\n",
"\n",
" if nodelist is None:\n",
" nodelist = list(G)\n",
"\n",
" A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format=\"lil\", dtype=int)\n",
"\n",
" N = len(G)\n",
" if N == 0:\n",
" return np.asmatrix(A)\n",
"\n",
" # Personalization vector\n",
" if personalization is None:\n",
" p = np.repeat(1.0 / N, N)\n",
" p = sp.sparse.lil_array(p)\n",
" else:\n",
" p = np.array([personalization.get(n, 0) for n in nodelist], dtype=float)\n",
" if p.sum() == 0:\n",
" raise ZeroDivisionError\n",
" p /= p.sum()\n",
"\n",
" # Dangling nodes\n",
" if dangling is None:\n",
" dangling_weights = np.ones(N, dtype=int)\n",
" dangling_weights = sp.sparse.lil_array(dangling_weights, dtype=int)\n",
" else:\n",
" # Convert the dangling dictionary into an array in nodelist order\n",
" dangling_weights = np.array([dangling.get(n, 0) for n in nodelist], dtype=float)\n",
" dangling_weights /= dangling_weights.sum()\n",
"\n",
" # replace rows with all zeros with dangling_weights\n",
" A[[A.sum(axis=1)==0],:] = dangling_weights\n",
"\n",
" # Normalize rows\n",
" row_sums = A.sum(axis=1) # row sums\n",
" r_inv = np.power(row_sums.astype(float), -1).flatten() # inverse of row sums\n",
" r_inv[np.isinf(r_inv)] = 0.0 # replace inf with 0\n",
" R = sp.sparse.diags(r_inv) # create diagonal matrix\n",
" A = R.dot(A) \n",
"\n",
" return A, p.T"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"def Algo4(Pt, v, m, a: list, tau, maxit: int, x):\n",
"\n",
" mv, iter = 0, 1 # mv is the number of matrix-vector products, iter is the number of iterations\n",
" \n",
" # initialize x as a random sparse matrix. Each col is the pagerank vector for a different alpha\n",
" x = sp.sparse.lil_matrix((Pt.shape[0], len(a)))\n",
"\n",
" # initialize the identity matrix of size Pt.shape\n",
" I = sp.sparse.eye(Pt.shape[0], Pt.shape[1], format='lil')\n",
"\n",
" # compute the residual vector, it is a matrix of size (n, len(a)). Each col is the residual vector for a different alpha. \n",
" r = sp.sparse.lil_matrix((Pt.shape[0], len(a)))\n",
" res = np.zeros(len(a))\n",
"\n",
" # compute the residual vector and the norm of each col in the vector res\n",
" for i in range(len(a)):\n",
" r[:,[i]] = ((1 - a[i])/a[i])*v - ((1/a[i])*I - Pt).dot(x[:,[i]])\n",
" res[i] = sp.sparse.linalg.norm(r[:,[i]], ord='fro') # frobenius norm since l2 norm is not defined for sparse matrices in scipy\n",
"\n",
" for _ in range(maxit):\n",
" err = np.absolute(np.max(res)) # check if we have converged\n",
" if err < tau:\n",
" print(\"Computation ended successfully in \", iter, \" iterations and \", mv, \" matrix-vector products.\")\n",
" return x, iter, mv\n",
"\n",
" print(\"\\niter = \", iter)\n",
" print(\"res: \", res)\n",
" print(\"err = \", err)\n",
"\n",
" # find k as the index of the largest residual\n",
" k = int(np.argmax(res))\n",
" print(\"k = \", k)\n",
"\n",
" # compute gamma as defined in the paper\n",
" gamma = compute_gamma(res, a, k)\n",
" \n",
" # Run Arnoldi\n",
" r_k = r[:,[k]].toarray() # r_k is the residual vector for alpha_k\n",
" V, H, beta, j = Arnoldi((1/a[k])*I - Pt, r_k, m) # run Arnoldi\n",
" H = H[:-1,:] # remove the last row of H\n",
" V = V[:,:-1] # remove the last col of V\n",
" mv = mv + j # update the number of matrix-vector products\n",
"\n",
" H_e1 = np.zeros(H.shape[0]) \n",
" H_e1[0] = 1 # canonical vector e1 of size H.shape[0]\n",
"\n",
" # compute y as the minimizer of || beta*e1 - Hy ||_2 using the least squares method\n",
" y = sp.sparse.lil_matrix((H.shape[1],len(a))) \n",
"\n",
" # we only need the k-th col of y in this iteration\n",
" y[:,[k]] = sp.sparse.linalg.lsqr(H, beta*H_e1)[0]\n",
"\n",
" # # Update x\n",
" x_new = x\n",
" x_new[:,[k]] = x[:,[k]] + V @ y[:,[k]]\n",
"\n",
" V_e1 = np.zeros(V.shape[0])\n",
" V_e1[0] = 1 \n",
" V_e1 = sp.sparse.lil_matrix(V_e1) # canonical vector e1 of size V.shape[0]\n",
"\n",
" # Update res[k]\n",
" res[k] = a[k]* (sp.sparse.linalg.norm(beta*V_e1.T - V @ y[:,[k]]))\n",
"\n",
" # multi shift\n",
" for i in range(len(a)):\n",
" if i != k and res[i] >= tau:\n",
" if res[i] >= tau:\n",
" \n",
" H = H + ((1-a[i])/a[i] - (1-a[k])/a[k])*sp.sparse.eye(H.shape[0], H.shape[1], format='lil')\n",
"\n",
" # Compute z as described in the paper\n",
" z1 = H_e1.T*beta\n",
" z1 = z1.reshape(z1.shape[0],1)\n",
" z2 = H @ y[:,[1]]\n",
" z2 = z2.reshape(z2.shape[0],1)\n",
" z = z1 - z2\n",
"\n",
" # Solve the linear system for A and b\n",
" A = sp.sparse.hstack([H, z])\n",
" b = (beta*H_e1)\n",
"\n",
" to_split = sp.sparse.linalg.lsqr(A, b.reshape(b.shape[0],1))[0]\n",
" \n",
" # the last element of to_split is the last element of gamma[i], the other elements are the elements of y[:[i]]\n",
" y[:,[i]] = to_split[:-1]\n",
" gamma[i] = to_split[-1]\n",
"\n",
" # update x\n",
" x_new[:,i] = x[:,i] + V @ y[:,[i]]\n",
"\n",
" # update res[i]\n",
" res[i] = (a[i]/a[k])*gamma[i]*res[k]\n",
"\n",
" else:\n",
" if res[i] < tau:\n",
" print(\"res[\", i, \"] is smaller than tau = \", tau, \" at iteration \", iter)\n",
"\n",
" iter += 1\n",
" x = x_new\n",
"\n",
" raise Exception('Maximum number of iterations reached')"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"G = nx.watts_strogatz_graph(1000, 4, 0.1)\n",
"Pt, v = google_matrix_sparse(G)\n",
"# a = [0.85, 0.86, 0.87, 0.88, 0.89, 0.90, 0.91, 0.92, 0.93, 0.94, 0.95, 0.96, 0.97, 0.98, 0.99]\n",
"a = [0.85, 0.90, 0.95, 0.99]\n",
"n = len(G.nodes)\n",
"x = sp.sparse.random(n, len(a), density=0.1, format='lil')"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"iter = 1\n",
"res: [0.00558049 0.00351364 0.00166436 0.00031942]\n",
"err = 0.005580489988532435\n",
"k = 0\n",
"\n",
"iter = 2\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 3\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 4\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 5\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 6\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 7\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 8\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 9\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 10\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 11\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 12\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 13\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 14\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 15\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 16\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 17\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 18\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 19\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 20\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 21\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 22\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 23\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 24\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 25\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 26\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 27\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 28\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 29\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 30\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 31\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 32\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 33\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 34\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 35\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 36\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 37\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 38\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 39\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 40\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 41\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 42\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 43\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 44\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 45\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 46\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 47\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 48\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 49\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 50\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 51\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 52\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 53\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 54\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 55\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 56\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 57\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 58\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 59\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 60\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 61\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 62\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 63\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 64\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 65\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 66\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 67\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 68\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 69\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 70\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 71\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 72\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 73\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 74\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 75\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 76\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 77\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 78\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 79\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 80\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 81\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 82\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 83\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 84\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 85\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 86\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 87\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 88\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 89\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 90\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 91\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 92\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 93\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 94\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 95\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 96\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 97\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 98\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 99\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 100\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n"
]
},
{
"ename": "Exception",
"evalue": "Maximum number of iterations reached",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mException\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[7], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m x, \u001b[38;5;28miter\u001b[39m, mv \u001b[38;5;241m=\u001b[39m \u001b[43mAlgo4\u001b[49m\u001b[43m(\u001b[49m\u001b[43mPt\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mv\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m100\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43ma\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m1e-6\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m100\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mx\u001b[49m\u001b[43m)\u001b[49m\n",
"Cell \u001b[0;32mIn[5], line 101\u001b[0m, in \u001b[0;36mAlgo4\u001b[0;34m(Pt, v, m, a, tau, maxit, x)\u001b[0m\n\u001b[1;32m 98\u001b[0m \u001b[38;5;28miter\u001b[39m \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1\u001b[39m\n\u001b[1;32m 99\u001b[0m x \u001b[38;5;241m=\u001b[39m x_new\n\u001b[0;32m--> 101\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mMaximum number of iterations reached\u001b[39m\u001b[38;5;124m'\u001b[39m)\n",
"\u001b[0;31mException\u001b[0m: Maximum number of iterations reached"
]
}
],
"source": [
"x, iter, mv = Algo4(Pt, v, 100, a, 1e-6, 100, x)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.10.8 64-bit",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.8"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "e7370f93d1d0cde622a1f8e1c04877d8463912d04d973331ad4851f04de6915a"
}
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Loading…
Cancel
Save