You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

844 lines
29 KiB
Plaintext

{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import time\n",
"import numpy as np\n",
"import scipy as sp\n",
"import pandas as pd\n",
"import networkx as nx"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Arnoldi \n",
"\n",
"This is a copy of the algorithm defined and tested in the notebook `arnoldi.ipynb`. It's an implementation of the Algorithm 2 from the paper. It's needed during the computation of the shifted GMRES algorithm that we are trying to implement here."
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"def Arnoldi(A,v0,m):\n",
" v = v0\n",
" beta = np.linalg.norm(v)\n",
" v = v/beta\n",
" H = sp.sparse.lil_matrix((m+1,m)) \n",
" V = sp.sparse.lil_matrix((A.shape[0],m+1))\n",
" V[:,0] = v # each column of V is a vector v\n",
"\n",
" for j in range(m):\n",
" w = A @ v \n",
" for i in range(j):\n",
" H[i,j] = v.T @ w # tmp is a 1x1 matrix, so it's O(1) in memory\n",
" w = w - H[i,j]*v \n",
" \n",
" H[j+1,j] = np.linalg.norm(w)\n",
"\n",
" if H[j+1,j] == 0:\n",
" # print(\"Arnoldi breakdown\")\n",
" m = j\n",
" v = 0\n",
" break\n",
" else:\n",
" if j < m-1:\n",
" v = w/H[j+1,j]\n",
" V[:,j+1] = v\n",
"\n",
" return V, H, beta, j "
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Algorithm 4 testing\n",
"\n",
"This algorithm is based on the \"Algorithm 4\" of the paper, the pseudocode provided by the authors is the following \n",
"\n",
"![](https://i.imgur.com/H92fru7.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Auxiliary function"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"def compute_gamma(res, a, k): # function to compute gamma\n",
" gamma = np.ones(len(a))\n",
" for i in range(len(a)):\n",
" if i != k:\n",
" gamma[i] = (res[i]*a[k])/(res[k]*a[i])\n",
"\n",
" return gamma"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Auxiliary function to compute the $\\tilde P$ matrix described in the reference paper"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"def google_matrix_sparse(G, alpha=0.85, personalization=None, nodelist=None, weight=\"weight\", dangling=None) -> np.matrix:\n",
"\n",
" if nodelist is None:\n",
" nodelist = list(G)\n",
"\n",
" A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format=\"lil\", dtype=int)\n",
"\n",
" N = len(G)\n",
" if N == 0:\n",
" return np.asmatrix(A)\n",
"\n",
" # Personalization vector\n",
" if personalization is None:\n",
" p = np.repeat(1.0 / N, N)\n",
" p = sp.sparse.lil_array(p)\n",
" else:\n",
" p = np.array([personalization.get(n, 0) for n in nodelist], dtype=float)\n",
" if p.sum() == 0:\n",
" raise ZeroDivisionError\n",
" p /= p.sum()\n",
"\n",
" # Dangling nodes\n",
" if dangling is None:\n",
" dangling_weights = np.ones(N, dtype=int)\n",
" dangling_weights = sp.sparse.lil_array(dangling_weights, dtype=int)\n",
" else:\n",
" # Convert the dangling dictionary into an array in nodelist order\n",
" dangling_weights = np.array([dangling.get(n, 0) for n in nodelist], dtype=float)\n",
" dangling_weights /= dangling_weights.sum()\n",
"\n",
" # replace rows with all zeros with dangling_weights\n",
" A[[A.sum(axis=1)==0],:] = dangling_weights\n",
"\n",
" # Normalize rows\n",
" row_sums = A.sum(axis=1) # row sums\n",
" r_inv = np.power(row_sums.astype(float), -1).flatten() # inverse of row sums\n",
" r_inv[np.isinf(r_inv)] = 0.0 # replace inf with 0\n",
" R = sp.sparse.diags(r_inv) # create diagonal matrix\n",
" A = R.dot(A) \n",
"\n",
" return A, p.T"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"`Algorithm 4` described in the reference paper "
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"def Algo4(Pt, v, m, a: list, tau, maxit: int, x):\n",
"\n",
" mv, iter = 0, 1 # mv is the number of matrix-vector products, iter is the number of iterations\n",
" \n",
" # initialize x as a random sparse matrix. Each col is the pagerank vector for a different alpha\n",
" x = sp.sparse.lil_matrix((Pt.shape[0], len(a)))\n",
"\n",
" # initialize the identity matrix of size Pt.shape\n",
" I = sp.sparse.eye(Pt.shape[0], Pt.shape[1], format='lil')\n",
"\n",
" # compute the residual vector, it is a matrix of size (n, len(a)). Each col is the residual vector for a different alpha. \n",
" r = sp.sparse.lil_matrix((Pt.shape[0], len(a)))\n",
" res = np.zeros(len(a))\n",
"\n",
" # compute the residual vector and the norm of each col in the vector res\n",
" for i in range(len(a)):\n",
" r[:,[i]] = ((1 - a[i])/a[i])*v - ((1/a[i])*I - Pt).dot(x[:,[i]])\n",
" res[i] = sp.sparse.linalg.norm(r[:,[i]], ord='fro') # frobenius norm since l2 norm is not defined for sparse matrices in scipy\n",
"\n",
" for _ in range(maxit):\n",
" err = np.absolute(np.max(res)) # check if we have converged\n",
" if err < tau:\n",
" print(\"Computation ended successfully in \", iter, \" iterations and \", mv, \" matrix-vector products.\")\n",
" return x, iter, mv\n",
"\n",
" print(\"\\niter = \", iter)\n",
" print(\"res: \", res)\n",
" print(\"err = \", err)\n",
"\n",
" # find k as the index of the largest residual\n",
" k = int(np.argmax(res))\n",
" print(\"k = \", k)\n",
"\n",
" # compute gamma as defined in the paper\n",
" gamma = compute_gamma(res, a, k)\n",
" \n",
" # Run Arnoldi\n",
" r_k = r[:,[k]].toarray() # r_k is the residual vector for alpha_k\n",
" V, H, beta, j = Arnoldi((1/a[k])*I - Pt, r_k, m) # run Arnoldi\n",
" H = H[:-1,:] # remove the last row of H\n",
" V = V[:,:-1] # remove the last col of V\n",
" mv = mv + j # update the number of matrix-vector products\n",
"\n",
" H_e1 = np.zeros(H.shape[0]) \n",
" H_e1[0] = 1 # canonical vector e1 of size H.shape[0]\n",
"\n",
" # compute y as the minimizer of || beta*e1 - Hy ||_2 using the least squares method\n",
" y = sp.sparse.lil_matrix((H.shape[1],len(a))) \n",
"\n",
" # we only need the k-th col of y in this iteration\n",
" y[:,[k]] = sp.sparse.linalg.lsqr(H, beta*H_e1)[0]\n",
"\n",
" # # Update x\n",
" x_new = x\n",
" x_new[:,[k]] = x[:,[k]] + V @ y[:,[k]]\n",
"\n",
" V_e1 = np.zeros(V.shape[0])\n",
" V_e1[0] = 1 \n",
" V_e1 = sp.sparse.lil_matrix(V_e1) # canonical vector e1 of size V.shape[0]\n",
"\n",
" # Update res[k]\n",
" res[k] = a[k]* (sp.sparse.linalg.norm(beta*V_e1.T - V @ y[:,[k]]))\n",
"\n",
" # multi shift\n",
" for i in range(len(a)):\n",
" if i != k and res[i] >= tau:\n",
" if res[i] >= tau:\n",
" \n",
" H = H + ((1-a[i])/a[i] - (1-a[k])/a[k])*sp.sparse.eye(H.shape[0], H.shape[1], format='lil')\n",
"\n",
" # Compute z as described in the paper\n",
" z1 = H_e1.T*beta\n",
" z1 = z1.reshape(z1.shape[0],1)\n",
" z2 = H @ y[:,[1]]\n",
" z2 = z2.reshape(z2.shape[0],1)\n",
" z = z1 - z2\n",
"\n",
" # Solve the linear system for A and b\n",
" A = sp.sparse.hstack([H, z])\n",
" b = (beta*H_e1)\n",
"\n",
" to_split = sp.sparse.linalg.lsqr(A, b.reshape(b.shape[0],1))[0]\n",
" \n",
" # the last element of to_split is the last element of gamma[i], the other elements are the elements of y[:[i]]\n",
" y[:,[i]] = to_split[:-1]\n",
" gamma[i] = to_split[-1]\n",
"\n",
" # update x\n",
" x_new[:,i] = x[:,i] + V @ y[:,[i]]\n",
"\n",
" # update res[i]\n",
" res[i] = (a[i]/a[k])*gamma[i]*res[k]\n",
"\n",
" else:\n",
" if res[i] < tau:\n",
" print(\"res[\", i, \"] is smaller than tau = \", tau, \" at iteration \", iter)\n",
"\n",
" iter += 1\n",
" x = x_new\n",
"\n",
" raise Exception('Maximum number of iterations reached')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Random Numerical Test Case on a small-world random graph"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"G = nx.watts_strogatz_graph(1000, 4, 0.1)\n",
"Pt, v = google_matrix_sparse(G)\n",
"# a = [0.85, 0.86, 0.87, 0.88, 0.89, 0.90, 0.91, 0.92, 0.93, 0.94, 0.95, 0.96, 0.97, 0.98, 0.99]\n",
"a = [0.85, 0.90, 0.95, 0.99]\n",
"n = len(G.nodes)\n",
"x = sp.sparse.random(n, len(a), density=0.1, format='lil')"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"iter = 1\n",
"res: [0.00558049 0.00351364 0.00166436 0.00031942]\n",
"err = 0.005580489988532435\n",
"k = 0\n",
"\n",
"iter = 2\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 3\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 4\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 5\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 6\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 7\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 8\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 9\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 10\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 11\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 12\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 13\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 14\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 15\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 16\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 17\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 18\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 19\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 20\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 21\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 22\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 23\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 24\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 25\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 26\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 27\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 28\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 29\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 30\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 31\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 32\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 33\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 34\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 35\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 36\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 37\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 38\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 39\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 40\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 41\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 42\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 43\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 44\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 45\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 46\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 47\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 48\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 49\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 50\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 51\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 52\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 53\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 54\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 55\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 56\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 57\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 58\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 59\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 60\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 61\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 62\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 63\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 64\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 65\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 66\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 67\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 68\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 69\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 70\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 71\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 72\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 73\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 74\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 75\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 76\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 77\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 78\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 79\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 80\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 81\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 82\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 83\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 84\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 85\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 86\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 87\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 88\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 89\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 90\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 91\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 92\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 93\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 94\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 95\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 96\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 97\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 98\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 99\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n",
"\n",
"iter = 100\n",
"res: [ 6.30826211e-03 1.27202498e-07 -1.07175457e-08 -1.83681405e-08]\n",
"err = 0.006308262114154177\n",
"k = 0\n"
]
},
{
"ename": "Exception",
"evalue": "Maximum number of iterations reached",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mException\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[7], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m x, \u001b[38;5;28miter\u001b[39m, mv \u001b[38;5;241m=\u001b[39m \u001b[43mAlgo4\u001b[49m\u001b[43m(\u001b[49m\u001b[43mPt\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mv\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m100\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43ma\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m1e-6\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m100\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mx\u001b[49m\u001b[43m)\u001b[49m\n",
"Cell \u001b[0;32mIn[5], line 101\u001b[0m, in \u001b[0;36mAlgo4\u001b[0;34m(Pt, v, m, a, tau, maxit, x)\u001b[0m\n\u001b[1;32m 98\u001b[0m \u001b[38;5;28miter\u001b[39m \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1\u001b[39m\n\u001b[1;32m 99\u001b[0m x \u001b[38;5;241m=\u001b[39m x_new\n\u001b[0;32m--> 101\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mMaximum number of iterations reached\u001b[39m\u001b[38;5;124m'\u001b[39m)\n",
"\u001b[0;31mException\u001b[0m: Maximum number of iterations reached"
]
}
],
"source": [
"x, iter, mv = Algo4(Pt, v, 100, a, 1e-6, 100, x)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.10.8 64-bit",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.9"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "e7370f93d1d0cde622a1f8e1c04877d8463912d04d973331ad4851f04de6915a"
}
}
},
"nbformat": 4,
"nbformat_minor": 2
}