{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "import networkx as nx\n", "import time\n", "import math\n", "import pandas as pd\n", "import scipy as sp\n", "import plotly.express as px\n", "import plotly.graph_objs as go\n", "from scipy.sparse import *\n", "from scipy import linalg\n", "from scipy.sparse.linalg import norm\n", "from scipy.optimize import least_squares" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Arnoldi \n", "\n", "This is a copy of the algorithm defined and tested in the notebook `algo2_testing`. It's an implementation of the Algorithm 2 from the paper. It's needed in this notebook since this function is called by the `algo4` function. It's implemented to return exactly what's needed in the `algo4` function.\n", "\n", "Everything will be reorganized in the main.py file once everything is working." ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "def Arnoldi(A,v0,m):\n", " v = v0\n", " beta = np.linalg.norm(v)\n", " v = v/beta\n", " H = sp.sparse.lil_matrix((m+1,m)) \n", " V = sp.sparse.lil_matrix((A.shape[0],m+1))\n", " V[:,0] = v # each column of V is a vector v\n", "\n", " for j in range(m):\n", " w = A @ v \n", " for i in range(j):\n", " H[i,j] = v.T @ w # tmp is a 1x1 matrix, so it's O(1) in memory\n", " w = w - H[i,j]*v \n", " \n", " H[j+1,j] = np.linalg.norm(w)\n", "\n", " if H[j+1,j] == 0:\n", " # print(\"Arnoldi breakdown\")\n", " m = j\n", " v = 0\n", " break\n", " else:\n", " if j < m-1:\n", " v = w/H[j+1,j]\n", " V[:,j+1] = v\n", "\n", " return V, H, beta, j " ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "# Algorithm 4 testing\n", "\n", "This algorithm is based on the \"Algorithm 4\" of the paper, the pseudocode provided by the authors is the following \n", "\n", "![](https://i.imgur.com/H92fru7.png)" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "def compute_gamma(res, a, k): # function to compute gamma\n", " gamma = np.ones(len(a))\n", " for i in range(len(a)):\n", " if i != k:\n", " gamma[i] = (res[i]*a[k])/(res[k]*a[i])\n", " else:\n", " gamma[i] = 0\n", " return gamma" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Basic test case with random numbers to test the algorithm." ] }, { "cell_type": "code", "execution_count": 66, "metadata": {}, "outputs": [], "source": [ "def compute_ptilde(G: nx.Graph):\n", " \"\"\"\n", " Compute the ptilde matrix and the probability vector v\n", " :param G: the graph\n", " :return: the ptilde matrix and the probability vector v\n", "\n", " \"\"\"\n", "\n", " # given the graph G, return it's sparse matrix representation\n", " A = nx.to_scipy_sparse_array(G, format='lil')\n", "\n", " # create a vector d (sparse), where d[i] = 1 if the i-th row of A not null, else 0\n", " d = sp.sparse.lil_matrix((1, A.shape[0]))\n", " for i in range(A.shape[0]):\n", " # s is the sum of the i-th row of A\n", " s = sp.sparse.lil_matrix.sum(A[[i],:])\n", " if s == 0:\n", " d[0,[i]] = 0\n", "\n", " # probability vector v = 1/n\n", " v = np.repeat(1/A.shape[0], A.shape[0])\n", "\n", " # initialize the ptilde matrix\n", " P = sp.sparse.lil_matrix((A.shape[0], A.shape[1]))\n", "\n", " # P(i,j) = 1/(number of non null entries in column j) if A(i,j) != 0, else 0\n", " for j in range(A.shape[1]):\n", " for i in range(A.shape[0]):\n", " if A[i,j] != 0:\n", " P[i,j] = 1/sp.sparse.lil_matrix.sum(A[:,[j]] != 0)\n", "\n", " Pt = P + v @ d.T\n", "\n", " return Pt, v " ] }, { "cell_type": "code", "execution_count": 85, "metadata": {}, "outputs": [], "source": [ "def Algo4(Pt, v, m, a: list, tau, maxit: int, x):\n", "\n", " mv, iter = 0, 1 # mv is the number of matrix-vector products, iter is the number of iterations\n", " \n", " # initialize x as a random sparse matrix. Each col is the pagerank vector for a different alpha\n", " x = sp.sparse.lil_matrix((Pt.shape[0], len(a)))\n", "\n", " # initialize the identity matrix of size Pt.shape\n", " I = sp.sparse.eye(Pt.shape[0], Pt.shape[1], format='lil')\n", "\n", " # compute the residual vector, it is a matrix of size (n, len(a)). Each col is the residual vector for a different alpha. \n", " r = sp.sparse.lil_matrix((Pt.shape[0], len(a)))\n", " res = np.zeros(len(a))\n", "\n", " # compute the residual vector and the norm of each col in the vector res\n", " for i in range(len(a)):\n", " r[:,[i]] = sp.sparse.linalg.spsolve(I - a[i]*Pt, v)\n", " col = r[:,[i]].toarray()\n", " res[i] = np.linalg.norm(col)\n", "\n", " # this is a while loop in the paper\n", " for _ in range(maxit):\n", " # check if we have converged\n", " err = np.absolute(np.max(res))\n", " if err < tau:\n", " print(\"Computation ended successfully in \", iter, \" iterations and \", mv, \" matrix-vector products.\")\n", " return x, iter, mv\n", "\n", " print(\"\\niter = \", iter)\n", " print(\"res: \", res)\n", " print(\"err = \", err)\n", "\n", " # find k as the index of the largest residual\n", " k = int(np.argmax(res))\n", " print(\"k = \", k)\n", "\n", " # compute gamma as defined in the paper\n", " gamma = compute_gamma(res, a, k)\n", " \n", " # Run Arnoldi\n", " r_k = r[:,[k]].toarray() # r_k is the residual vector for alpha_k\n", " A_arnoldi = (1/a[k])*I - Pt # A_arnoldi is the matrix used in Arnoldi\n", " V, H, beta, j = Arnoldi((1/a[k])*I - Pt, r_k, m) # V is the matrix of vectors v, H is the Hessenberg matrix, beta is the norm of the last vector v, j is the number of iterations of Arnoldi\n", " H = H[:-1,:] # remove the last row of H\n", " V = V[:,:-1] # remove the last col of V\n", " mv = mv + j # update the number of matrix-vector products\n", "\n", " H_e1 = np.zeros(H.shape[0]) \n", " H_e1[0] = 1 # canonical vector e1 of size H.shape[0]\n", "\n", " # compute y as the minimizer of || beta*e1 - Hy ||_2 using the least squares method\n", " y = sp.sparse.lil_matrix((H.shape[1],len(a))) # y is the matrix of vectors y, each col is the vector y for a different alpha\n", "\n", " # we only need the k-th col of y in this iteration\n", " y[:,[k]] = sp.sparse.linalg.lsqr(H, beta*H_e1)[0]\n", " y_k = y[:,[k]].toarray()\n", "\n", " # # Update x\n", " x_new = x\n", " x_new[:,[k]] = x[:,[k]] + V @ y_k\n", "\n", " V_e1 = np.zeros(V.shape[0])\n", " V_e1[0] = 1 # canonical vector e1 of size V.shape[0]\n", "\n", " # Update res[k]\n", " norm_k =np.linalg.norm(beta*V_e1 - V @ y_k) # this returns a scalar\n", " res[k] = a[k]*norm_k\n", "\n", " # multi shift\n", " for i in range(len(a)):\n", " if i != k and res[i] >= tau:\n", " if res[i] >= tau:\n", " \n", " H = H + ((1-a[i])/a[i] - (1-a[k])/a[k])*sp.sparse.eye(H.shape[0], H.shape[1], format='lil')\n", "\n", " # Compute z as described in the paper\n", " z1 = H_e1*beta\n", " z1 = z1.reshape(z1.shape[0],1)\n", " z2 = H @ y[:,[1]]\n", " z2 = z2.reshape(z2.shape[0],1)\n", " z = z1 - z2\n", "\n", " # Solve the linear system for A and b\n", " A = sp.sparse.hstack([H, z])\n", " b = (beta*H_e1)\n", "\n", " # use the least squares method to solve the linear system\n", " to_split = sp.sparse.linalg.lsqr(A, b.reshape(b.shape[0],1))[0]\n", " \n", " # the last element of to_split is the last element of gamma[i], the other elements are the elements of y[:[i]]\n", " y[:,[i]] = to_split[:-1]\n", " gamma[i] = to_split[-1]\n", "\n", " # update x\n", " x_new[:,i] = x[:,i] + V @ y[:,[i]]\n", "\n", " # update the residual vector\n", " # print(\"\\tupdating res[\", i, \"]\")\n", " # print(\"\\tgamma[\", i, \"] = \", gamma[i])\n", " # print(\"\\tres[\", k, \"] = \", res[k])\n", " # print(\"\\ta[\", i, \"] = \", a[i])\n", " # print(\"\\ta[\", k, \"] = \", a[k])\n", " res[i] = (a[i]/a[k])*gamma[i]*res[k]\n", " # print(\"\\tupdated res[\", i, \"] = \", res[i])\n", " # print()\n", "\n", " else:\n", " if res[i] < tau:\n", " print(\"res[\", i, \"] is smaller than tau = \", tau, \" at iteration \", iter)\n", "\n", " iter = iter + 1\n", " x = x_new\n", "\n", " raise Exception('Maximum number of iterations reached')" ] }, { "cell_type": "code", "execution_count": 86, "metadata": {}, "outputs": [], "source": [ "G = nx.watts_strogatz_graph(1000, 4, 0.1)\n", "Pt, v = compute_ptilde(G)\n", "# a = [0.85, 0.86, 0.87, 0.88, 0.89, 0.90, 0.91, 0.92, 0.93, 0.94, 0.95, 0.96, 0.97, 0.98, 0.99]\n", "a = [0.85, 0.86, 0.87, 0.88]\n", "tau = 1e-6\n", "maxit = 100\n", "n = len(G.nodes)\n", "x = sp.sparse.random(n, len(a), density=0.1, format='lil')" ] }, { "cell_type": "code", "execution_count": 87, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/usr/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/linsolve.py:168: SparseEfficiencyWarning: spsolve requires A be CSC or CSR matrix format\n", " warn('spsolve requires A be CSC or CSR matrix format',\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\n", "\n", "iter = 1\n", "res: [0.21235414 0.22756312 0.24511308 0.26558937]\n", "err = 0.26558937251088227\n", "k = 3\n", "\n", "\n", "iter = 2\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 3\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 4\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 5\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 6\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 7\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 8\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 9\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 10\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 11\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 12\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 13\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 14\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 15\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 16\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 17\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 18\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 19\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 20\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 21\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 22\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 23\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 24\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 25\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 26\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 27\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 28\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 29\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 30\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 31\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 32\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 33\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 34\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 35\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 36\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 37\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 38\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 39\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 40\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 41\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 42\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 43\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 44\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 45\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 46\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 47\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 48\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 49\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 50\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 51\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 52\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 53\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 54\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 55\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 56\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 57\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 58\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 59\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 60\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 61\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 62\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 63\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 64\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 65\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 66\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 67\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 68\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 69\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 70\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 71\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 72\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 73\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 74\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 75\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 76\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 77\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 78\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 79\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 80\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 81\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 82\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 83\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 84\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 85\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n", "\n", "\n", "iter = 86\n", "res: [ 0.81703891 0.8829876 0.10190349 10.69433448]\n", "err = 10.69433447757171\n", "k = 3\n" ] }, { "ename": "KeyboardInterrupt", "evalue": "", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", "Cell \u001b[0;32mIn[87], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m x, \u001b[38;5;28miter\u001b[39m, mv \u001b[38;5;241m=\u001b[39m \u001b[43mAlgo4\u001b[49m\u001b[43m(\u001b[49m\u001b[43mPt\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mv\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m100\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43ma\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtau\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmaxit\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mx\u001b[49m\u001b[43m)\u001b[49m\n", "Cell \u001b[0;32mIn[85], line 46\u001b[0m, in \u001b[0;36mAlgo4\u001b[0;34m(Pt, v, m, a, tau, maxit, x)\u001b[0m\n\u001b[1;32m 44\u001b[0m V, H, beta, j \u001b[38;5;241m=\u001b[39m Arnoldi((\u001b[38;5;241m1\u001b[39m\u001b[38;5;241m/\u001b[39ma[k])\u001b[38;5;241m*\u001b[39mI \u001b[38;5;241m-\u001b[39m Pt, r_k, m) \u001b[38;5;66;03m# V is the matrix of vectors v, H is the Hessenberg matrix, beta is the norm of the last vector v, j is the number of iterations of Arnoldi\u001b[39;00m\n\u001b[1;32m 45\u001b[0m H \u001b[38;5;241m=\u001b[39m H[:\u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m,:] \u001b[38;5;66;03m# remove the last row of H\u001b[39;00m\n\u001b[0;32m---> 46\u001b[0m V \u001b[38;5;241m=\u001b[39m \u001b[43mV\u001b[49m\u001b[43m[\u001b[49m\u001b[43m:\u001b[49m\u001b[43m,\u001b[49m\u001b[43m:\u001b[49m\u001b[38;5;241;43m-\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m]\u001b[49m \u001b[38;5;66;03m# remove the last col of V\u001b[39;00m\n\u001b[1;32m 47\u001b[0m mv \u001b[38;5;241m=\u001b[39m mv \u001b[38;5;241m+\u001b[39m j \u001b[38;5;66;03m# update the number of matrix-vector products\u001b[39;00m\n\u001b[1;32m 49\u001b[0m H_e1 \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39mzeros(H\u001b[38;5;241m.\u001b[39mshape[\u001b[38;5;241m0\u001b[39m]) \n", "File \u001b[0;32m/usr/lib/python3.10/site-packages/scipy/sparse/_lil.py:211\u001b[0m, in \u001b[0;36mlil_matrix.__getitem__\u001b[0;34m(self, key)\u001b[0m\n\u001b[1;32m 209\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_get_intXint(\u001b[39m*\u001b[39mkey)\n\u001b[1;32m 210\u001b[0m \u001b[39m# Everything else takes the normal path.\u001b[39;00m\n\u001b[0;32m--> 211\u001b[0m \u001b[39mreturn\u001b[39;00m IndexMixin\u001b[39m.\u001b[39;49m\u001b[39m__getitem__\u001b[39;49m(\u001b[39mself\u001b[39;49m, key)\n", "File \u001b[0;32m/usr/lib/python3.10/site-packages/scipy/sparse/_index.py:69\u001b[0m, in \u001b[0;36mIndexMixin.__getitem__\u001b[0;34m(self, key)\u001b[0m\n\u001b[1;32m 67\u001b[0m \u001b[39mif\u001b[39;00m row \u001b[39m==\u001b[39m \u001b[39mslice\u001b[39m(\u001b[39mNone\u001b[39;00m) \u001b[39mand\u001b[39;00m row \u001b[39m==\u001b[39m col:\n\u001b[1;32m 68\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mcopy()\n\u001b[0;32m---> 69\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_get_sliceXslice(row, col)\n\u001b[1;32m 70\u001b[0m \u001b[39melif\u001b[39;00m col\u001b[39m.\u001b[39mndim \u001b[39m==\u001b[39m \u001b[39m1\u001b[39m:\n\u001b[1;32m 71\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_get_sliceXarray(row, col)\n", "File \u001b[0;32m/usr/lib/python3.10/site-packages/scipy/sparse/_lil.py:241\u001b[0m, in \u001b[0;36mlil_matrix._get_sliceXslice\u001b[0;34m(self, row, col)\u001b[0m\n\u001b[1;32m 239\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39m_get_sliceXslice\u001b[39m(\u001b[39mself\u001b[39m, row, col):\n\u001b[1;32m 240\u001b[0m row \u001b[39m=\u001b[39m \u001b[39mrange\u001b[39m(\u001b[39m*\u001b[39mrow\u001b[39m.\u001b[39mindices(\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mshape[\u001b[39m0\u001b[39m]))\n\u001b[0;32m--> 241\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_get_row_ranges(row, col)\n", "File \u001b[0;32m/usr/lib/python3.10/site-packages/scipy/sparse/_lil.py:290\u001b[0m, in \u001b[0;36mlil_matrix._get_row_ranges\u001b[0;34m(self, rows, col_slice)\u001b[0m\n\u001b[1;32m 287\u001b[0m nj \u001b[39m=\u001b[39m \u001b[39mlen\u001b[39m(col_range)\n\u001b[1;32m 288\u001b[0m new \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_lil_container((\u001b[39mlen\u001b[39m(rows), nj), dtype\u001b[39m=\u001b[39m\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mdtype)\n\u001b[0;32m--> 290\u001b[0m _csparsetools\u001b[39m.\u001b[39;49mlil_get_row_ranges(\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mshape[\u001b[39m0\u001b[39;49m], \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mshape[\u001b[39m1\u001b[39;49m],\n\u001b[1;32m 291\u001b[0m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mrows, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mdata,\n\u001b[1;32m 292\u001b[0m new\u001b[39m.\u001b[39;49mrows, new\u001b[39m.\u001b[39;49mdata,\n\u001b[1;32m 293\u001b[0m rows,\n\u001b[1;32m 294\u001b[0m j_start, j_stop, j_stride, nj)\n\u001b[1;32m 296\u001b[0m \u001b[39mreturn\u001b[39;00m new\n", "\u001b[0;31mKeyboardInterrupt\u001b[0m: " ] } ], "source": [ "x, iter, mv = Algo4(Pt, v, 100, a, tau, maxit, x)" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3.10.8 64-bit", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.8" }, "orig_nbformat": 4, "vscode": { "interpreter": { "hash": "e7370f93d1d0cde622a1f8e1c04877d8463912d04d973331ad4851f04de6915a" } } }, "nbformat": 4, "nbformat_minor": 2 }