Compare commits
No commits in common. 'master' and 'dev' have entirely different histories.
@ -1,260 +1,45 @@
|
||||
\documentclass[11pt]{article}
|
||||
\usepackage[margin=1.2in]{geometry}
|
||||
\usepackage[utf8]{inputenc}
|
||||
\usepackage[T1]{fontenc}
|
||||
\usepackage[english]{babel}
|
||||
\usepackage{fourier}
|
||||
\usepackage{amsthm}
|
||||
\usepackage{amssymb}
|
||||
\usepackage{amsmath}
|
||||
\usepackage{amsfonts}
|
||||
\usepackage{mathtools}
|
||||
\usepackage{latexsym}
|
||||
\usepackage{graphicx}
|
||||
\usepackage{float}
|
||||
\usepackage{etoolbox}
|
||||
\usepackage{hyperref}
|
||||
\usepackage{tikz}
|
||||
\usepackage{pgfplots}
|
||||
\pgfplotsset{compat=1.18}
|
||||
\usepackage{lipsum}
|
||||
\usepackage{algorithm}
|
||||
\usepackage{algpseudocode}
|
||||
\usepackage{mathtools}
|
||||
\usepackage{nccmath}
|
||||
\usepackage{eucal}
|
||||
\usepackage[most]{tcolorbox}
|
||||
\newtcolorbox[auto counter]{problem}[1][]{%
|
||||
enhanced,
|
||||
breakable,
|
||||
colback=white,
|
||||
colbacktitle=white,
|
||||
coltitle=black,
|
||||
fonttitle=\bfseries,
|
||||
boxrule=.6pt,
|
||||
titlerule=.2pt,
|
||||
toptitle=3pt,
|
||||
bottomtitle=3pt,
|
||||
title=GitHub repository of this project}
|
||||
% !TeX program = pdflatex
|
||||
\documentclass{mathreport} % Uses our custom mathreport.cls
|
||||
|
||||
\RequirePackage[activate={true,nocompatibility},final,tracking=true,kerning=true,spacing=true]{microtype}
|
||||
\SetTracking{encoding={*}, shape=sc}{40} % Spacing for Small Caps
|
||||
% Load bibliography
|
||||
\addbibresource{references.bib}
|
||||
\usepackage{lipsum} % Just for the demo text
|
||||
|
||||
|
||||
\newcommand{\R}{\mathbb{R}}
|
||||
\newcommand{\N}{\mathbb{N}}
|
||||
\newcommand{\Z}{\mathbb{Z}}
|
||||
\newcommand{\Q}{\mathbb{Q}}
|
||||
\newcommand{\C}{\mathbb{C}}
|
||||
\newcommand{\V}{\mathcal{V}}
|
||||
\newcommand{\E}{\mathcal{E}}
|
||||
\newcommand{\G}{\mathcal{G}}
|
||||
\renewcommand{\L}{\mathcal{L}}
|
||||
|
||||
\newcommand{\defeq}{\vcentcolon=}
|
||||
\newcommand{\eqdef}{=\vcentcolon}
|
||||
\newcommand{\norm}[1]{\left\lVert#1\right\rVert}
|
||||
|
||||
\newtheorem{theorem}{Theorem}[section]
|
||||
\newtheorem{lemma}[theorem]{Lemma}
|
||||
\newtheorem{proposition}[theorem]{Proposition}
|
||||
\newtheorem{corollary}[theorem]{Corollary}
|
||||
\theoremstyle{definition}
|
||||
\newtheorem{definition}[theorem]{Definition}
|
||||
\newtheorem{example}[theorem]{Example}
|
||||
\theoremstyle{remark}
|
||||
\newtheorem{remark}[theorem]{Remark}
|
||||
|
||||
\title{%
|
||||
Accelerated Filtering on Graphs using Lanczos method
|
||||
\\ \large Scientific Computing project report}
|
||||
\author{Alberto Defendi}
|
||||
|
||||
\date{}
|
||||
|
||||
\setlength{\parskip}{1em}
|
||||
\setlength{\parindent}{0em}
|
||||
% --- Document Metadata ---
|
||||
\title{\normalfont\scshape\Large The Geometry of Complex Systems}
|
||||
\author{\normalfont\itshape A. N. Other}
|
||||
\date{\small\today}
|
||||
|
||||
\begin{document}
|
||||
|
||||
\maketitle
|
||||
|
||||
\begin{abstract}
|
||||
\noindent The Lanczos algorithm \ldots
|
||||
\noindent \small \textbf{\textit{Abstract.}} \lipsum[1][1-4]
|
||||
\end{abstract}
|
||||
|
||||
{\setlength{\parskip}{0em}
|
||||
\tableofcontents}
|
||||
|
||||
\section{Introduction}
|
||||
This document uses a custom class file (\texttt{mathreport.cls}). This keeps the main file clean. The typography is set to Bringhurst's standards: wide margins, Palatino font, and old-style figures (e.g., 12345).
|
||||
|
||||
We introduce basic graph theory concepts and briefly overview the results used in the project experiments.
|
||||
|
||||
\subsection{Signal processing on graphs}
|
||||
|
||||
We consider a weighted undirected graph $ \G = (\V, \E, \mathcal{W}) $,
|
||||
where $ \V \subset \R^N $ is the set of vertices, $ \E \subset \R^M $ is the set of edges, and $ \mathcal{W}
|
||||
: \V \times \mathcal{V} \to \R$ is a weight function. The weight function can be
|
||||
represented with a $ N \times N $ matrix $ W $ such that
|
||||
$W_{i,j} =
|
||||
\begin{cases}
|
||||
W(v_i, v_j), & \text{if } (v_i, v_j) \in \E \\
|
||||
0, & \text{otherwise}
|
||||
\end{cases}
|
||||
\quad \text{for all } i,j = 1, \dots, |\V|
|
||||
$,
|
||||
and for our needs, we assume $ W $ to be symmetric, that is $ W_{i,j} = W_{j,i} $.
|
||||
|
||||
In the study of signal processing on graph, we model the idea of sending a signal to a node as
|
||||
assigning a value to a vertex with a function $ s : \V \to \R$, whose values can be represented as a vector
|
||||
$ s = s(\V) = [s_1, \dots, s_N] \in \R^N $ where each entry $ s_i \defeq s(v_i)$ represents
|
||||
the signal sent over a node $ v_i \in \V$. We also keep track of the weight of each vertex
|
||||
with a function $ d(i) \defeq \sum_{j=1}^N W_{i,j} $, that we represent with the matrix $D =
|
||||
\text{diag}(d(1), \dots, d(N))$.
|
||||
|
||||
In this setting, we introduce the graph Laplacian $\L$ defined as
|
||||
$ \L = D - W$, where $D $ is the diagonal degree matrix with entries $D_{ii} = d(i)$. By
|
||||
construction, $\L^T = \mathcal{L}$, thus the Lanczos method can be safely applied to our
|
||||
problem.
|
||||
|
||||
\begin{remark}
|
||||
The Laplacian $\L$ is symmetric and semi-definite (it is diagonally dominant), hence by the
|
||||
spectral theorem it admits the decomposition
|
||||
\begin{equation*}
|
||||
\L = U \Lambda U^{*},
|
||||
\end{equation*}
|
||||
where $ U = \left[ u_0,\dots,u_{N-1} \right] \in O(N)$ is called Fourier basis, and $ \Lambda =
|
||||
\text{diag}\left(\lambda_0, \dots, \lambda_{N-1}\right)$ is the matrix of eigenvalues of $ \L $,
|
||||
without loss of generality we can assume $ 0 =\lambda_0 \leq \lambda_1 \dots \leq \lambda_{N-1} $
|
||||
and the vectors in $ U $ to be in the same order that the eigenvalues.
|
||||
\footnote{this assumption is aligned with the PyGSP $ \text{compute\_fourier\_basis()} $ function
|
||||
implementation, used in this work to compute the matrix $ \Lambda $. Clarifying this assumption is
|
||||
outside our scope.}.
|
||||
\end{remark}
|
||||
|
||||
\begin{definition}[Graph signal]
|
||||
A graph signal is a continuous function $ g : \R^+ \to \R $.
|
||||
\end{definition}
|
||||
Diagonalising the Laplacian would give an easy way to compute the function $ g(\L) $ by
|
||||
evaluating it on the eigenvalues of $ \L $, which means computing $ g(\L) = U g(\Lambda) U^{*} $.
|
||||
In matrix notation, applying a filter to a graph $ \G $ corresponds to the operation
|
||||
\begin{equation*}
|
||||
s^\prime \defeq g(\L)s = U g(\L) U^{*}s.
|
||||
\end{equation*}
|
||||
\begin{remark}[Computational cost]
|
||||
Computing the Fourier basis for $ \L $ is computationally expensive for large graphs, thus the
|
||||
choice of using the Lanczos method for $ g(\L) $, with its computational cost of $ O(M \dot |\E|) $,
|
||||
it offers an efficient alternative in computing $ g(\L) $. However, storing the basis $ V_M $ costs MN
|
||||
additional memory, that could be avoided using a two-step implementation, that we leave for future
|
||||
work.
|
||||
\end{remark}
|
||||
|
||||
\subsection{The Lanczos method}
|
||||
\section{Mathematical Theory}
|
||||
We define our primary operator in the Hilbert space $\mathcal{H}$.
|
||||
|
||||
\begin{definition}
|
||||
Given a matrix $ A \in \R^{N \times N} $ and a vector $ b \in R^N $, the Krylov subspace of order $j$ is defined as the set $ \mathcal{K}_j (A,b) = \{ b, Ab, A^2b, \dots,
|
||||
A^{j-1}b\} $. We represent the basis of this subspace in a matrix $ V_M = \left[ v_1,\dots,v_M
|
||||
\right] \in \R^{N \times M}$.
|
||||
\begin{definition}[Compact Operator]
|
||||
An operator $T: X \to Y$ is compact if $\overline{T(B_X)}$ is compact in $Y$.
|
||||
\end{definition}
|
||||
We now consider the Arnoldi relation
|
||||
\begin{equation*}
|
||||
AV_j = V_jH_j + h_{j+1,j}v_{j+1}e_j^{*}, \\ \hspace{20pt}
|
||||
H_j = V_j^{*}AV_j =
|
||||
\begin{bmatrix}
|
||||
h_{11} & \dots & \dots & h_{1,j} \\
|
||||
h_{21} & h_{22} & & \vdots \\
|
||||
& \ddots & \ddots & \vdots \\
|
||||
& & h_{j,j-1} & h_{j,j}.
|
||||
\end{bmatrix}
|
||||
\end{equation*}
|
||||
Letting
|
||||
\begin{equation*}
|
||||
\alpha_j \defeq h_{j,j},\hspace{20pt}\beta_j \defeq h_{j-1,j},
|
||||
\end{equation*}
|
||||
if $ A $ is symmetric and positive defined, then so is $ H_j $. Because $ H_j $ is both symmetric
|
||||
and upper and lower Hessenberg matrix, then it is tridiagonal, and we refer to it as $ T_j $, hence
|
||||
the Arnoldi relation takes the form:
|
||||
|
||||
\begin{equation*}
|
||||
AV_j = V_j \alpha_j + \beta_j v_{j+1}e_j^{T}, \\ \hspace{20pt}
|
||||
T_j = V_j^{\top} A V_j = \begin{bmatrix}
|
||||
\alpha_1 & \beta_1 \\
|
||||
\beta_1 & \ddots & \ddots \\
|
||||
& \ddots & \ddots & \beta_{j-1} \\
|
||||
& & \beta_{j-1} & \alpha_j
|
||||
\end{bmatrix}.
|
||||
\end{equation*}.
|
||||
\begin{theorem}[Spectral Theorem]
|
||||
There exists an orthonormal basis of eigenvectors.
|
||||
\end{theorem}
|
||||
|
||||
HERE PUT ALGORITHM
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
\section{Experiments}
|
||||
|
||||
\subsection{}
|
||||
|
||||
Consider the filter $ g : [0, \lambda_{\text{max}}] \to \R$ and a signal vector $ s \in \R^N $, by a
|
||||
a result of Gallopolus and Saad (see ?) it holds\footnote{This results holds outside the graph
|
||||
signal processing context, that is for any function $f :\Omega \to \C$, where $ \Omega \subset \Lambda(A) $.} that
|
||||
\begin{equation}
|
||||
g(\L)s \approx \norm{s}_2 V_M g(T_M) e_1 \eqdef g_M \label{eq:1}
|
||||
Consider the harmonic series shown in \cref{eq:harmonic}. The styling is handled entirely by the external class file.
|
||||
\begin{equation} \label{eq:harmonic}
|
||||
H_n = \sum_{k=1}^n \frac{1}{k} \approx \ln n + \gamma
|
||||
\end{equation}
|
||||
|
||||
\begin{definition}(Errors)
|
||||
We define the Lanczos iteration error as $\norm{g_{M+j} - g_M}_2 $, where $ j $ is small, and the true
|
||||
error as $ \norm{e_M} = \norm{g(\L)s - g_M} $.
|
||||
\end{definition}
|
||||
The scope of this experiment is verifying numerically equation \eqref{eq:1}
|
||||
|
||||
Studiamo i grafi di Erdos-Reiny e di tipo Sensors. Dal plot possiamo
|
||||
Figura (dida: Grafi di ER e sensor colorati in base al segnale (non filtrato, sopra) e filtrato
|
||||
attraverso la valutazione $g(\L)s$.
|
||||
test
|
||||
|
||||
\begin{tikzpicture}
|
||||
\begin{axis}[
|
||||
% Size parameters suitable for standard 2-column IEEE/Springer papers
|
||||
width=8cm,
|
||||
height=5.5cm,
|
||||
axis lines=middle,
|
||||
xlabel={$t$},
|
||||
ylabel={$f(t)$},
|
||||
xmin=-1.2, xmax=1.2,
|
||||
ymin=-0.2, ymax=1.2,
|
||||
% Clean fractional ticks
|
||||
xtick={-1, -0.5, 0, 0.5, 1},
|
||||
xticklabels={$-1$, $-\frac{1}{2}$, $0$, $\frac{1}{2}$, $1$},
|
||||
ytick={0, 1},
|
||||
ticklabel style={font=\footnotesize},
|
||||
xlabel style={anchor=west},
|
||||
ylabel style={anchor=south},
|
||||
enlargelimits=false,
|
||||
% Define the function natively.
|
||||
% Note: pgfplots evaluates trig functions in degrees by default.
|
||||
% 1/2 pi rad = 90 deg; pi*t rad = 180*t deg.
|
||||
declare function={
|
||||
f(\t) = (abs(\t) <= 0.5) * sin(90 * (cos(180*\t))^2);
|
||||
}
|
||||
]
|
||||
|
||||
\addplot [
|
||||
domain=-1.2:1.2,
|
||||
samples=300, % High sample count for smooth paper-quality rendering
|
||||
thick,
|
||||
blue!80!black % Professional dark blue (prints better than pure blue)
|
||||
] {f(x)};
|
||||
|
||||
\end{axis}
|
||||
\end{tikzpicture}
|
||||
|
||||
|
||||
\clearpage
|
||||
\bibliographystyle{unsrt}
|
||||
\bibliography{ref}
|
||||
\nocite{*}
|
||||
|
||||
\section{Results and Discussion}
|
||||
\lipsum[2-4]
|
||||
|
||||
\printbibliography
|
||||
|
||||
\end{document}
|
||||
|
||||
@ -1,9 +0,0 @@
|
||||
@article{susnjara2015,
|
||||
title={Accelerated filtering on graphs using Lanczos method},
|
||||
author={Ana Susnjara and Nathanael Perraudin and Daniel Kressner and Pierre Vandergheynst},
|
||||
year={2015},
|
||||
eprint={1509.04537},
|
||||
archivePrefix={arXiv},
|
||||
primaryClass={math.NA},
|
||||
url={https://arxiv.org/abs/1509.04537},
|
||||
}
|
||||
@ -0,0 +1,7 @@
|
||||
@book{bringhurst2004,
|
||||
author = {Robert Bringhurst},
|
||||
title = {The Elements of Typographic Style},
|
||||
year = {2004},
|
||||
publisher = {Hartley \& Marks},
|
||||
address = {Vancouver}
|
||||
}
|
||||
@ -1,180 +0,0 @@
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.ticker as ticker
|
||||
import numpy as np
|
||||
import numpy.linalg as LA
|
||||
|
||||
# Requires latex installed
|
||||
import scienceplots # noqa: F401
|
||||
import scipy
|
||||
from pygsp import graphs
|
||||
|
||||
from afgl.util.build_T_matrix import build_T_matrix
|
||||
from afgl.util.lanczos import lanczos
|
||||
from afgl.util.plot import latex_log_formatter
|
||||
|
||||
|
||||
def plot_graphs(G_ER, G_Sensor, s: np.ndarray, N: int, p: float) -> None:
|
||||
"""Visualization of signal being filtered of two different types of graphs."""
|
||||
fig, axs = plt.subplots(2, 2, figsize=(6.6, 5))
|
||||
|
||||
# Set coordinates
|
||||
G_ER.set_coordinates()
|
||||
G_Sensor.set_coordinates()
|
||||
|
||||
signal_ER = filter_signal_with_fourier(G_ER, s)
|
||||
signal_S = filter_signal_with_fourier(G_Sensor, s)
|
||||
|
||||
# TOP LEFT
|
||||
G_ER.plot(s, ax=axs[0, 0], vertex_size=15, edge_width=0.5, edge_color="gray")
|
||||
axs[0, 0].set_title(rf"Erdős-Rényi Graph $(N = {N}, p = {p})$", pad=20)
|
||||
axs[0, 0].set_axis_off()
|
||||
|
||||
# BOTTOM LEFT
|
||||
G_ER.plot(
|
||||
signal_ER, ax=axs[1, 0], vertex_size=15, edge_width=0.5, edge_color="gray"
|
||||
)
|
||||
axs[1, 0].set_title("", pad=20)
|
||||
axs[1, 0].set_axis_off()
|
||||
|
||||
# TOP RIGHT
|
||||
G_Sensor.plot(s, ax=axs[0, 1], vertex_size=15, edge_width=0.5, edge_color="gray")
|
||||
axs[0, 1].set_title(rf"Sensor Network $(N = {N})$", pad=20)
|
||||
axs[0, 1].set_axis_off()
|
||||
|
||||
# BOTTOM RIGHT
|
||||
G_Sensor.plot(
|
||||
signal_S, ax=axs[1, 1], vertex_size=15, edge_width=0.5, edge_color="gray"
|
||||
)
|
||||
axs[1, 1].set_title("", pad=20)
|
||||
axs[1, 1].set_axis_off()
|
||||
|
||||
# Prevent label/title overlap
|
||||
plt.savefig("./out/printed_graphs.pdf", bbox_inches="tight")
|
||||
|
||||
|
||||
def g_extended(t: np.ndarray) -> np.ndarray:
|
||||
return np.sin(1 / 2 * np.pi * (np.cos(np.pi * t) ** 2))
|
||||
|
||||
|
||||
"""
|
||||
Evaluates the function sin(0.5*pi*cos(pi*t)^2)chi_[-1/2,1/2] where chi_I is the
|
||||
characteristic function of I, as defined in example 1 (see [1]).
|
||||
"""
|
||||
|
||||
|
||||
def g(T: np.ndarray) -> np.ndarray:
|
||||
if scipy.sparse.issparse(T):
|
||||
# Operator & not supporting sparse matrix
|
||||
T = T.toarray()
|
||||
Chi = ((T >= -1 / 2) & (T <= 1 / 2)).astype(int)
|
||||
# Apply g_extended where Chi is True, else output 0
|
||||
return np.where(Chi, g_extended(T), 0)
|
||||
|
||||
|
||||
def compute_g_M(
|
||||
V: np.ndarray, alp: np.ndarray, beta: np.ndarray, s: np.ndarray
|
||||
) -> np.ndarray:
|
||||
"""
|
||||
Computes the approximation g_M (see [1]) using Lanczos
|
||||
"""
|
||||
M = len(alp)
|
||||
e_1 = np.zeros(M)
|
||||
e_1[0] = 1
|
||||
T = build_T_matrix(alp, beta)
|
||||
|
||||
eigvals, eigvecs = LA.eigh(T)
|
||||
g_T = eigvecs @ np.diag(g(eigvals)) @ eigvecs.T
|
||||
|
||||
y = LA.norm(s) * (g_T @ e_1)
|
||||
return V @ y
|
||||
|
||||
|
||||
def filter_signal_with_fourier(G, s: np.ndarray) -> np.ndarray:
|
||||
G.compute_fourier_basis()
|
||||
U = G.U
|
||||
return (U @ np.diag(g(G.e)) @ U.T) @ s
|
||||
|
||||
|
||||
def plot_error_comparison(
|
||||
l_err_ER: np.ndarray, t_err_ER: np.ndarray, l_err_S: np.ndarray, t_err_S: np.ndarray
|
||||
) -> None:
|
||||
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(6.6, 2.5))
|
||||
|
||||
# Left plot (Erdos-Renyi)
|
||||
ax1.plot(l_err_ER, label=r"$\left\lVert g_{M+3} - g_M \right\rVert_2$")
|
||||
ax1.plot(t_err_ER, label=r"$\left\lVert e_M \right\rVert_2$")
|
||||
ax1.set_title("Erdős-Rényi graph")
|
||||
|
||||
# Right plot (Sensor)
|
||||
ax2.plot(l_err_S, label=r"$\left\lVert g_{M+3} - g_M \right\rVert_2$")
|
||||
ax2.plot(t_err_S, label=r"$\left\lVert e_M \right\rVert_2$")
|
||||
ax2.set_title("Sensor graph")
|
||||
|
||||
# Apply identical formatting to both subplots
|
||||
for ax in (ax1, ax2):
|
||||
ax.xaxis.set_major_locator(ticker.MultipleLocator(50))
|
||||
ax.set_yscale("log")
|
||||
ax.yaxis.set_major_formatter(ticker.FuncFormatter(latex_log_formatter))
|
||||
ax.legend()
|
||||
|
||||
# Prevents overlapping of labels between the subplots
|
||||
plt.tight_layout()
|
||||
|
||||
plt.savefig("./out/ex1_estimate.pdf", bbox_inches="tight")
|
||||
|
||||
|
||||
def run_comparison_1_for_graph(
|
||||
G, s: np.ndarray, M_MAX: int
|
||||
) -> tuple[np.ndarray, np.ndarray]:
|
||||
"""Compares the error with error generated by Lanczos.
|
||||
|
||||
Args:
|
||||
G: Graph
|
||||
s: Signal vector
|
||||
|
||||
Returns:
|
||||
[lanczos_err, true_err]: Vectors of errors norm(g_{M+3} - g_M) and norm(e_M)
|
||||
as defined in [1]
|
||||
"""
|
||||
G.compute_laplacian("combinatorial")
|
||||
L = G.L
|
||||
|
||||
j = 3
|
||||
V, alp, beta = lanczos(L, s, M_MAX + j)
|
||||
|
||||
lanczos_err = np.zeros(M_MAX)
|
||||
true_err = np.zeros(M_MAX)
|
||||
|
||||
GLs = filter_signal_with_fourier(G, s)
|
||||
|
||||
for M in range(1, M_MAX + 1):
|
||||
g_M = compute_g_M(V[:, :M], alp[:M], beta[: M - 1], s)
|
||||
g_Mj = compute_g_M(V[:, : M + j], alp[: M + j], beta[: M + j - 1], s)
|
||||
|
||||
lanczos_err[M - 1] = LA.norm(g_Mj - g_M)
|
||||
true_err[M - 1] = LA.norm(GLs - g_M)
|
||||
|
||||
return lanczos_err, true_err
|
||||
|
||||
|
||||
def run() -> None:
|
||||
"""Ripete il test corrispondente ad Example 1 dell'articolo limitandosi al
|
||||
metodo di Lanczos (no Chebyshev) e utilizzando come funzione g(t) = sin(0.5π
|
||||
cos(πt)2) * chi_{[-0.5, 0.5]}.
|
||||
"""
|
||||
N = 500
|
||||
M = 200
|
||||
p = 0.04
|
||||
|
||||
s = np.random.randint(1, 10000, N).astype(float)
|
||||
# Normalize s as in request
|
||||
s /= LA.norm(s)
|
||||
|
||||
G_ER = graphs.ErdosRenyi(N, p)
|
||||
G_S = graphs.Sensor(N)
|
||||
|
||||
l_err_ER, t_err_ER = run_comparison_1_for_graph(G_ER, s, M)
|
||||
l_err_S, t_err_S = run_comparison_1_for_graph(G_S, s, M)
|
||||
|
||||
plot_error_comparison(l_err_ER, t_err_ER, l_err_S, t_err_S)
|
||||
# plot_graphs(G_ER, G_S, s, N, p)
|
||||
@ -1,36 +0,0 @@
|
||||
import numpy as np
|
||||
import numpy.linalg as LA
|
||||
from pygsp import graphs
|
||||
|
||||
from afgl.util.lanczos import lanczos
|
||||
|
||||
|
||||
def run() -> None:
|
||||
"""Genera i grafi di di Erdos-Reny di grandezza crescente (ad esempio 250,
|
||||
500,1000, 2000, 4000) e parametro p = 0.04 e misura il tempo
|
||||
computazionale del metodo di Lanczos utilizzando come soglia per il criterio
|
||||
d'arresto epsilon = 10^-2 (o una soglia a scelta).
|
||||
|
||||
Args:
|
||||
None
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
n = 6
|
||||
p = 0.04
|
||||
|
||||
M = 200
|
||||
|
||||
N_VALUES = 250 * (2 ** np.arange(n))
|
||||
|
||||
for N in N_VALUES:
|
||||
s = np.random.randint(1, 10000, N).astype(float)
|
||||
# Normalize s as in request
|
||||
s /= LA.norm(s)
|
||||
|
||||
G = graphs.ErdosRenyi(N, p)
|
||||
G.compute_laplacian("combinatorial")
|
||||
L = G.L
|
||||
|
||||
lanczos(L, s, M)
|
||||
@ -0,0 +1,40 @@
|
||||
import numpy as np
|
||||
import numpy.linalg as LA
|
||||
|
||||
"""
|
||||
Arguments
|
||||
L Real valued NxN symmetric matrix
|
||||
s vector of size N
|
||||
M natural number indicating basis size
|
||||
|
||||
Returns
|
||||
-------
|
||||
V : ndarray
|
||||
M-dimensional vector with orthonormal columns.
|
||||
alp : ndarray
|
||||
M-dimensional array of scalars.
|
||||
beta : ndarray
|
||||
M-dimensional array of scalars.
|
||||
"""
|
||||
|
||||
|
||||
def lanczos(L, s, M):
|
||||
N = len(s)
|
||||
alp = np.zeros(M)
|
||||
beta = np.zeros(M - 1)
|
||||
V = np.zeros((N, M))
|
||||
V[:, 0] = s / LA.norm(s)
|
||||
|
||||
for j in range(M):
|
||||
w = L @ V[:, j]
|
||||
alp[j] = np.dot(V[:, j], w)
|
||||
|
||||
v_tilde = w - V[:, j] * alp[j]
|
||||
if j > 0:
|
||||
v_tilde = v_tilde - V[:, j - 1] * beta[j - 1]
|
||||
|
||||
if j < M - 1:
|
||||
beta[j] = LA.norm(v_tilde)
|
||||
V[:, j + 1] = v_tilde / beta[j]
|
||||
|
||||
return [V, alp, beta]
|
||||
@ -1,15 +0,0 @@
|
||||
import numpy as np
|
||||
|
||||
|
||||
def build_T_matrix(alp, beta):
|
||||
"""Constructs Lanczos T tridiagonal matrix.
|
||||
|
||||
Args:
|
||||
alp: Vector of alphas of size N
|
||||
beta: Vector of betas of size N-1
|
||||
|
||||
Returns:
|
||||
T : Matrix T
|
||||
"""
|
||||
|
||||
return np.diag(alp) + np.diag(beta, -1) + np.diag(beta, 1)
|
||||
@ -1,56 +0,0 @@
|
||||
import numpy as np
|
||||
import numpy.linalg as LA
|
||||
|
||||
|
||||
def double_orthogonalization(V, w, j):
|
||||
# Why it works well until j+1? it should be until j-1 from Demmel
|
||||
for _ in range(2):
|
||||
w -= V[:, : j + 1] @ (V[:, : j + 1].T @ w)
|
||||
return w
|
||||
|
||||
|
||||
def no_orthogonalization(V, w, alp, beta, j):
|
||||
w = w - alp[j] * V[:, j]
|
||||
if j > 0:
|
||||
w = w - beta[j - 1] * V[:, j - 1]
|
||||
return w
|
||||
|
||||
|
||||
def lanczos(L, s, M):
|
||||
"""
|
||||
Classic Lanczos method (without re-orthogonalization)
|
||||
|
||||
Arguments
|
||||
L : Real valued NxN symmetric matrix
|
||||
s : vector of size N
|
||||
M : natural number indicating basis size
|
||||
|
||||
Returns
|
||||
-------
|
||||
V : ndarray
|
||||
M-dimensional vector with orthonormal columns.
|
||||
alp : ndarray
|
||||
M-dimensional array of scalars.
|
||||
beta : ndarray
|
||||
M-dimensional array of scalars.
|
||||
"""
|
||||
N = len(s)
|
||||
alp = np.zeros(M)
|
||||
beta = np.zeros(M - 1)
|
||||
V = np.zeros((N, M))
|
||||
V[:, 0] = s / LA.norm(s)
|
||||
|
||||
for j in range(M):
|
||||
w = L @ V[:, j]
|
||||
alp[j] = np.dot(V[:, j], w)
|
||||
|
||||
w = no_orthogonalization(V, w, alp, beta, j)
|
||||
|
||||
if j < M - 1:
|
||||
beta[j] = LA.norm(w)
|
||||
if beta[j] < 1e-14:
|
||||
print("BREAKDOWN")
|
||||
return V[:, : j + 1], alp[: j + 1], beta[:j]
|
||||
V[:, j + 1] = w / beta[j]
|
||||
|
||||
return V, alp, beta
|
||||
@ -1,28 +0,0 @@
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import scienceplots # noqa: F401
|
||||
from pygsp import plotting
|
||||
|
||||
|
||||
def latex_sci(val: float, decimals: int = 2) -> str:
|
||||
"""Converts a value to LaTeX scientific notation A x 10^{B}."""
|
||||
if val == 0:
|
||||
return "0"
|
||||
exponent = int(np.floor(np.log10(abs(val))))
|
||||
mantissa = val / 10**exponent
|
||||
return rf"{mantissa:.{decimals}f} \times 10^{{{exponent}}}"
|
||||
|
||||
|
||||
def latex_log_formatter(y: float, pos: int) -> str:
|
||||
"""Custom formatter to render tick labels as LaTeX 10^{n}."""
|
||||
if y <= 0:
|
||||
return ""
|
||||
# Extract the exponent using log10
|
||||
n = int(np.round(np.log10(y)))
|
||||
return f"$10^{{{n}}}$"
|
||||
|
||||
|
||||
def plot_setup() -> None:
|
||||
plotting.BACKEND = "matplotlib"
|
||||
plt.style.use(["science"])
|
||||
# TODO match font with document
|
||||
Loading…
Reference in New Issue