Compare commits

...

13 Commits
dev ... master

Author SHA1 Message Date
alberto fdbe1b2d55 Major fixes to pipeline.
Error now are close in plot. Still Lanczos orthogonalization selection
must be fixed.
1 month ago
alberto 23a95fe1f2 Minor fix. 1 month ago
alberto ec756aaf8d Minor fix to breakdown. 1 month ago
alberto a57b1a4496 Add theoretical introduction. 1 month ago
alberto 710f3da157 Match order with pseudocode. 1 month ago
alberto dcc0916063 Rename v_tilde to w. 1 month ago
alberto eb2d82115a Refactor and add test 2. 1 month ago
alberto 1cba3734ec Refactor code. 1 month ago
alberto 7948c92c74 Fix plots. 1 month ago
alberto 0b2afa71e6 Try fix ex1 1 month ago
alberto ded4672c25 Example 1 works with bug.
Errors are distant
1 month ago
alberto 57d19802f7 Begin example 1 and some refactoring. 1 month ago
alberto 09b382098c Try graph plotting and add breakdown. 1 month ago

@ -7,6 +7,7 @@ dependencies = [
"matplotlib>=3.10.8",
"numpy>=2.0.0",
"pygsp>=0.6.1",
"scienceplots>=2.2.1",
"scipy>=1.10.0",
]
requires-python = ">=3.10"

@ -1,45 +1,260 @@
% !TeX program = pdflatex
\documentclass{mathreport} % Uses our custom mathreport.cls
\documentclass[11pt]{article}
\usepackage[margin=1.2in]{geometry}
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
\usepackage[english]{babel}
\usepackage{fourier}
\usepackage{amsthm}
\usepackage{amssymb}
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{mathtools}
\usepackage{latexsym}
\usepackage{graphicx}
\usepackage{float}
\usepackage{etoolbox}
\usepackage{hyperref}
\usepackage{tikz}
\usepackage{pgfplots}
\pgfplotsset{compat=1.18}
\usepackage{lipsum}
\usepackage{algorithm}
\usepackage{algpseudocode}
\usepackage{mathtools}
\usepackage{nccmath}
\usepackage{eucal}
\usepackage[most]{tcolorbox}
\newtcolorbox[auto counter]{problem}[1][]{%
enhanced,
breakable,
colback=white,
colbacktitle=white,
coltitle=black,
fonttitle=\bfseries,
boxrule=.6pt,
titlerule=.2pt,
toptitle=3pt,
bottomtitle=3pt,
title=GitHub repository of this project}
% Load bibliography
\addbibresource{references.bib}
\usepackage{lipsum} % Just for the demo text
\RequirePackage[activate={true,nocompatibility},final,tracking=true,kerning=true,spacing=true]{microtype}
\SetTracking{encoding={*}, shape=sc}{40} % Spacing for Small Caps
% --- Document Metadata ---
\title{\normalfont\scshape\Large The Geometry of Complex Systems}
\author{\normalfont\itshape A. N. Other}
\date{\small\today}
\begin{document}
\newcommand{\R}{\mathbb{R}}
\newcommand{\N}{\mathbb{N}}
\newcommand{\Z}{\mathbb{Z}}
\newcommand{\Q}{\mathbb{Q}}
\newcommand{\C}{\mathbb{C}}
\newcommand{\V}{\mathcal{V}}
\newcommand{\E}{\mathcal{E}}
\newcommand{\G}{\mathcal{G}}
\renewcommand{\L}{\mathcal{L}}
\newcommand{\defeq}{\vcentcolon=}
\newcommand{\eqdef}{=\vcentcolon}
\newcommand{\norm}[1]{\left\lVert#1\right\rVert}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{corollary}[theorem]{Corollary}
\theoremstyle{definition}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{example}[theorem]{Example}
\theoremstyle{remark}
\newtheorem{remark}[theorem]{Remark}
\title{%
Accelerated Filtering on Graphs using Lanczos method
\\ \large Scientific Computing project report}
\author{Alberto Defendi}
\date{}
\setlength{\parskip}{1em}
\setlength{\parindent}{0em}
\begin{document}
\maketitle
\begin{abstract}
\noindent \small \textbf{\textit{Abstract.}} \lipsum[1][1-4]
\noindent The Lanczos algorithm \ldots
\end{abstract}
{\setlength{\parskip}{0em}
\tableofcontents}
\section{Introduction}
This document uses a custom class file (\texttt{mathreport.cls}). This keeps the main file clean. The typography is set to Bringhurst's standards: wide margins, Palatino font, and old-style figures (e.g., 12345).
\section{Mathematical Theory}
We define our primary operator in the Hilbert space $\mathcal{H}$.
We introduce basic graph theory concepts and briefly overview the results used in the project experiments.
\subsection{Signal processing on graphs}
We consider a weighted undirected graph $ \G = (\V, \E, \mathcal{W}) $,
where $ \V \subset \R^N $ is the set of vertices, $ \E \subset \R^M $ is the set of edges, and $ \mathcal{W}
: \V \times \mathcal{V} \to \R$ is a weight function. The weight function can be
represented with a $ N \times N $ matrix $ W $ such that
$W_{i,j} =
\begin{cases}
W(v_i, v_j), & \text{if } (v_i, v_j) \in \E \\
0, & \text{otherwise}
\end{cases}
\quad \text{for all } i,j = 1, \dots, |\V|
$,
and for our needs, we assume $ W $ to be symmetric, that is $ W_{i,j} = W_{j,i} $.
In the study of signal processing on graph, we model the idea of sending a signal to a node as
assigning a value to a vertex with a function $ s : \V \to \R$, whose values can be represented as a vector
$ s = s(\V) = [s_1, \dots, s_N] \in \R^N $ where each entry $ s_i \defeq s(v_i)$ represents
the signal sent over a node $ v_i \in \V$. We also keep track of the weight of each vertex
with a function $ d(i) \defeq \sum_{j=1}^N W_{i,j} $, that we represent with the matrix $D =
\text{diag}(d(1), \dots, d(N))$.
In this setting, we introduce the graph Laplacian $\L$ defined as
$ \L = D - W$, where $D $ is the diagonal degree matrix with entries $D_{ii} = d(i)$. By
construction, $\L^T = \mathcal{L}$, thus the Lanczos method can be safely applied to our
problem.
\begin{remark}
The Laplacian $\L$ is symmetric and semi-definite (it is diagonally dominant), hence by the
spectral theorem it admits the decomposition
\begin{equation*}
\L = U \Lambda U^{*},
\end{equation*}
where $ U = \left[ u_0,\dots,u_{N-1} \right] \in O(N)$ is called Fourier basis, and $ \Lambda =
\text{diag}\left(\lambda_0, \dots, \lambda_{N-1}\right)$ is the matrix of eigenvalues of $ \L $,
without loss of generality we can assume $ 0 =\lambda_0 \leq \lambda_1 \dots \leq \lambda_{N-1} $
and the vectors in $ U $ to be in the same order that the eigenvalues.
\footnote{this assumption is aligned with the PyGSP $ \text{compute\_fourier\_basis()} $ function
implementation, used in this work to compute the matrix $ \Lambda $. Clarifying this assumption is
outside our scope.}.
\end{remark}
\begin{definition}[Graph signal]
A graph signal is a continuous function $ g : \R^+ \to \R $.
\end{definition}
Diagonalising the Laplacian would give an easy way to compute the function $ g(\L) $ by
evaluating it on the eigenvalues of $ \L $, which means computing $ g(\L) = U g(\Lambda) U^{*} $.
In matrix notation, applying a filter to a graph $ \G $ corresponds to the operation
\begin{equation*}
s^\prime \defeq g(\L)s = U g(\L) U^{*}s.
\end{equation*}
\begin{remark}[Computational cost]
Computing the Fourier basis for $ \L $ is computationally expensive for large graphs, thus the
choice of using the Lanczos method for $ g(\L) $, with its computational cost of $ O(M \dot |\E|) $,
it offers an efficient alternative in computing $ g(\L) $. However, storing the basis $ V_M $ costs MN
additional memory, that could be avoided using a two-step implementation, that we leave for future
work.
\end{remark}
\subsection{The Lanczos method}
\begin{definition}[Compact Operator]
An operator $T: X \to Y$ is compact if $\overline{T(B_X)}$ is compact in $Y$.
\begin{definition}
Given a matrix $ A \in \R^{N \times N} $ and a vector $ b \in R^N $, the Krylov subspace of order $j$ is defined as the set $ \mathcal{K}_j (A,b) = \{ b, Ab, A^2b, \dots,
A^{j-1}b\} $. We represent the basis of this subspace in a matrix $ V_M = \left[ v_1,\dots,v_M
\right] \in \R^{N \times M}$.
\end{definition}
We now consider the Arnoldi relation
\begin{equation*}
AV_j = V_jH_j + h_{j+1,j}v_{j+1}e_j^{*}, \\ \hspace{20pt}
H_j = V_j^{*}AV_j =
\begin{bmatrix}
h_{11} & \dots & \dots & h_{1,j} \\
h_{21} & h_{22} & & \vdots \\
& \ddots & \ddots & \vdots \\
& & h_{j,j-1} & h_{j,j}.
\end{bmatrix}
\end{equation*}
Letting
\begin{equation*}
\alpha_j \defeq h_{j,j},\hspace{20pt}\beta_j \defeq h_{j-1,j},
\end{equation*}
if $ A $ is symmetric and positive defined, then so is $ H_j $. Because $ H_j $ is both symmetric
and upper and lower Hessenberg matrix, then it is tridiagonal, and we refer to it as $ T_j $, hence
the Arnoldi relation takes the form:
\begin{theorem}[Spectral Theorem]
There exists an orthonormal basis of eigenvectors.
\end{theorem}
\begin{equation*}
AV_j = V_j \alpha_j + \beta_j v_{j+1}e_j^{T}, \\ \hspace{20pt}
T_j = V_j^{\top} A V_j = \begin{bmatrix}
\alpha_1 & \beta_1 \\
\beta_1 & \ddots & \ddots \\
& \ddots & \ddots & \beta_{j-1} \\
& & \beta_{j-1} & \alpha_j
\end{bmatrix}.
\end{equation*}.
Consider the harmonic series shown in \cref{eq:harmonic}. The styling is handled entirely by the external class file.
\begin{equation} \label{eq:harmonic}
H_n = \sum_{k=1}^n \frac{1}{k} \approx \ln n + \gamma
HERE PUT ALGORITHM
\section{Experiments}
\subsection{}
Consider the filter $ g : [0, \lambda_{\text{max}}] \to \R$ and a signal vector $ s \in \R^N $, by a
a result of Gallopolus and Saad (see ?) it holds\footnote{This results holds outside the graph
signal processing context, that is for any function $f :\Omega \to \C$, where $ \Omega \subset \Lambda(A) $.} that
\begin{equation}
g(\L)s \approx \norm{s}_2 V_M g(T_M) e_1 \eqdef g_M \label{eq:1}
\end{equation}
\section{Results and Discussion}
\lipsum[2-4]
\begin{definition}(Errors)
We define the Lanczos iteration error as $\norm{g_{M+j} - g_M}_2 $, where $ j $ is small, and the true
error as $ \norm{e_M} = \norm{g(\L)s - g_M} $.
\end{definition}
The scope of this experiment is verifying numerically equation \eqref{eq:1}
Studiamo i grafi di Erdos-Reiny e di tipo Sensors. Dal plot possiamo
Figura (dida: Grafi di ER e sensor colorati in base al segnale (non filtrato, sopra) e filtrato
attraverso la valutazione $g(\L)s$.
test
\begin{tikzpicture}
\begin{axis}[
% Size parameters suitable for standard 2-column IEEE/Springer papers
width=8cm,
height=5.5cm,
axis lines=middle,
xlabel={$t$},
ylabel={$f(t)$},
xmin=-1.2, xmax=1.2,
ymin=-0.2, ymax=1.2,
% Clean fractional ticks
xtick={-1, -0.5, 0, 0.5, 1},
xticklabels={$-1$, $-\frac{1}{2}$, $0$, $\frac{1}{2}$, $1$},
ytick={0, 1},
ticklabel style={font=\footnotesize},
xlabel style={anchor=west},
ylabel style={anchor=south},
enlargelimits=false,
% Define the function natively.
% Note: pgfplots evaluates trig functions in degrees by default.
% 1/2 pi rad = 90 deg; pi*t rad = 180*t deg.
declare function={
f(\t) = (abs(\t) <= 0.5) * sin(90 * (cos(180*\t))^2);
}
]
\addplot [
domain=-1.2:1.2,
samples=300, % High sample count for smooth paper-quality rendering
thick,
blue!80!black % Professional dark blue (prints better than pure blue)
] {f(x)};
\end{axis}
\end{tikzpicture}
\clearpage
\bibliographystyle{unsrt}
\bibliography{ref}
\nocite{*}
\printbibliography
\end{document}

@ -0,0 +1,9 @@
@article{susnjara2015,
title={Accelerated filtering on graphs using Lanczos method},
author={Ana Susnjara and Nathanael Perraudin and Daniel Kressner and Pierre Vandergheynst},
year={2015},
eprint={1509.04537},
archivePrefix={arXiv},
primaryClass={math.NA},
url={https://arxiv.org/abs/1509.04537},
}

@ -1,7 +0,0 @@
@book{bringhurst2004,
author = {Robert Bringhurst},
title = {The Elements of Typographic Style},
year = {2004},
publisher = {Hartley \& Marks},
address = {Vancouver}
}

@ -0,0 +1,180 @@
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import numpy.linalg as LA
# Requires latex installed
import scienceplots # noqa: F401
import scipy
from pygsp import graphs
from afgl.util.build_T_matrix import build_T_matrix
from afgl.util.lanczos import lanczos
from afgl.util.plot import latex_log_formatter
def plot_graphs(G_ER, G_Sensor, s: np.ndarray, N: int, p: float) -> None:
"""Visualization of signal being filtered of two different types of graphs."""
fig, axs = plt.subplots(2, 2, figsize=(6.6, 5))
# Set coordinates
G_ER.set_coordinates()
G_Sensor.set_coordinates()
signal_ER = filter_signal_with_fourier(G_ER, s)
signal_S = filter_signal_with_fourier(G_Sensor, s)
# TOP LEFT
G_ER.plot(s, ax=axs[0, 0], vertex_size=15, edge_width=0.5, edge_color="gray")
axs[0, 0].set_title(rf"Erdős-Rényi Graph $(N = {N}, p = {p})$", pad=20)
axs[0, 0].set_axis_off()
# BOTTOM LEFT
G_ER.plot(
signal_ER, ax=axs[1, 0], vertex_size=15, edge_width=0.5, edge_color="gray"
)
axs[1, 0].set_title("", pad=20)
axs[1, 0].set_axis_off()
# TOP RIGHT
G_Sensor.plot(s, ax=axs[0, 1], vertex_size=15, edge_width=0.5, edge_color="gray")
axs[0, 1].set_title(rf"Sensor Network $(N = {N})$", pad=20)
axs[0, 1].set_axis_off()
# BOTTOM RIGHT
G_Sensor.plot(
signal_S, ax=axs[1, 1], vertex_size=15, edge_width=0.5, edge_color="gray"
)
axs[1, 1].set_title("", pad=20)
axs[1, 1].set_axis_off()
# Prevent label/title overlap
plt.savefig("./out/printed_graphs.pdf", bbox_inches="tight")
def g_extended(t: np.ndarray) -> np.ndarray:
return np.sin(1 / 2 * np.pi * (np.cos(np.pi * t) ** 2))
"""
Evaluates the function sin(0.5*pi*cos(pi*t)^2)chi_[-1/2,1/2] where chi_I is the
characteristic function of I, as defined in example 1 (see [1]).
"""
def g(T: np.ndarray) -> np.ndarray:
if scipy.sparse.issparse(T):
# Operator & not supporting sparse matrix
T = T.toarray()
Chi = ((T >= -1 / 2) & (T <= 1 / 2)).astype(int)
# Apply g_extended where Chi is True, else output 0
return np.where(Chi, g_extended(T), 0)
def compute_g_M(
V: np.ndarray, alp: np.ndarray, beta: np.ndarray, s: np.ndarray
) -> np.ndarray:
"""
Computes the approximation g_M (see [1]) using Lanczos
"""
M = len(alp)
e_1 = np.zeros(M)
e_1[0] = 1
T = build_T_matrix(alp, beta)
eigvals, eigvecs = LA.eigh(T)
g_T = eigvecs @ np.diag(g(eigvals)) @ eigvecs.T
y = LA.norm(s) * (g_T @ e_1)
return V @ y
def filter_signal_with_fourier(G, s: np.ndarray) -> np.ndarray:
G.compute_fourier_basis()
U = G.U
return (U @ np.diag(g(G.e)) @ U.T) @ s
def plot_error_comparison(
l_err_ER: np.ndarray, t_err_ER: np.ndarray, l_err_S: np.ndarray, t_err_S: np.ndarray
) -> None:
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(6.6, 2.5))
# Left plot (Erdos-Renyi)
ax1.plot(l_err_ER, label=r"$\left\lVert g_{M+3} - g_M \right\rVert_2$")
ax1.plot(t_err_ER, label=r"$\left\lVert e_M \right\rVert_2$")
ax1.set_title("Erdős-Rényi graph")
# Right plot (Sensor)
ax2.plot(l_err_S, label=r"$\left\lVert g_{M+3} - g_M \right\rVert_2$")
ax2.plot(t_err_S, label=r"$\left\lVert e_M \right\rVert_2$")
ax2.set_title("Sensor graph")
# Apply identical formatting to both subplots
for ax in (ax1, ax2):
ax.xaxis.set_major_locator(ticker.MultipleLocator(50))
ax.set_yscale("log")
ax.yaxis.set_major_formatter(ticker.FuncFormatter(latex_log_formatter))
ax.legend()
# Prevents overlapping of labels between the subplots
plt.tight_layout()
plt.savefig("./out/ex1_estimate.pdf", bbox_inches="tight")
def run_comparison_1_for_graph(
G, s: np.ndarray, M_MAX: int
) -> tuple[np.ndarray, np.ndarray]:
"""Compares the error with error generated by Lanczos.
Args:
G: Graph
s: Signal vector
Returns:
[lanczos_err, true_err]: Vectors of errors norm(g_{M+3} - g_M) and norm(e_M)
as defined in [1]
"""
G.compute_laplacian("combinatorial")
L = G.L
j = 3
V, alp, beta = lanczos(L, s, M_MAX + j)
lanczos_err = np.zeros(M_MAX)
true_err = np.zeros(M_MAX)
GLs = filter_signal_with_fourier(G, s)
for M in range(1, M_MAX + 1):
g_M = compute_g_M(V[:, :M], alp[:M], beta[: M - 1], s)
g_Mj = compute_g_M(V[:, : M + j], alp[: M + j], beta[: M + j - 1], s)
lanczos_err[M - 1] = LA.norm(g_Mj - g_M)
true_err[M - 1] = LA.norm(GLs - g_M)
return lanczos_err, true_err
def run() -> None:
"""Ripete il test corrispondente ad Example 1 dell'articolo limitandosi al
metodo di Lanczos (no Chebyshev) e utilizzando come funzione g(t) = sin(0.5π
cos(πt)2) * chi_{[-0.5, 0.5]}.
"""
N = 500
M = 200
p = 0.04
s = np.random.randint(1, 10000, N).astype(float)
# Normalize s as in request
s /= LA.norm(s)
G_ER = graphs.ErdosRenyi(N, p)
G_S = graphs.Sensor(N)
l_err_ER, t_err_ER = run_comparison_1_for_graph(G_ER, s, M)
l_err_S, t_err_S = run_comparison_1_for_graph(G_S, s, M)
plot_error_comparison(l_err_ER, t_err_ER, l_err_S, t_err_S)
# plot_graphs(G_ER, G_S, s, N, p)

@ -0,0 +1,36 @@
import numpy as np
import numpy.linalg as LA
from pygsp import graphs
from afgl.util.lanczos import lanczos
def run() -> None:
"""Genera i grafi di di Erdos-Reny di grandezza crescente (ad esempio 250,
500,1000, 2000, 4000) e parametro p = 0.04 e misura il tempo
computazionale del metodo di Lanczos utilizzando come soglia per il criterio
d'arresto epsilon = 10^-2 (o una soglia a scelta).
Args:
None
Returns:
None
"""
n = 6
p = 0.04
M = 200
N_VALUES = 250 * (2 ** np.arange(n))
for N in N_VALUES:
s = np.random.randint(1, 10000, N).astype(float)
# Normalize s as in request
s /= LA.norm(s)
G = graphs.ErdosRenyi(N, p)
G.compute_laplacian("combinatorial")
L = G.L
lanczos(L, s, M)

@ -1,40 +0,0 @@
import numpy as np
import numpy.linalg as LA
"""
Arguments
L Real valued NxN symmetric matrix
s vector of size N
M natural number indicating basis size
Returns
-------
V : ndarray
M-dimensional vector with orthonormal columns.
alp : ndarray
M-dimensional array of scalars.
beta : ndarray
M-dimensional array of scalars.
"""
def lanczos(L, s, M):
N = len(s)
alp = np.zeros(M)
beta = np.zeros(M - 1)
V = np.zeros((N, M))
V[:, 0] = s / LA.norm(s)
for j in range(M):
w = L @ V[:, j]
alp[j] = np.dot(V[:, j], w)
v_tilde = w - V[:, j] * alp[j]
if j > 0:
v_tilde = v_tilde - V[:, j - 1] * beta[j - 1]
if j < M - 1:
beta[j] = LA.norm(v_tilde)
V[:, j + 1] = v_tilde / beta[j]
return [V, alp, beta]

@ -1,10 +1,13 @@
# src/afgl/main.py
import sys
import afgl.ex_1 as ex_1
from afgl.util.plot import plot_setup
def run():
"""Main execution function."""
print("Starting the Accelerated Filtering Graphs Lanczos (AFGL) process...")
def run() -> None:
plot_setup()
ex_1.run()
# t_2.run()
if __name__ == "__main__":

@ -0,0 +1,15 @@
import numpy as np
def build_T_matrix(alp, beta):
"""Constructs Lanczos T tridiagonal matrix.
Args:
alp: Vector of alphas of size N
beta: Vector of betas of size N-1
Returns:
T : Matrix T
"""
return np.diag(alp) + np.diag(beta, -1) + np.diag(beta, 1)

@ -0,0 +1,56 @@
import numpy as np
import numpy.linalg as LA
def double_orthogonalization(V, w, j):
# Why it works well until j+1? it should be until j-1 from Demmel
for _ in range(2):
w -= V[:, : j + 1] @ (V[:, : j + 1].T @ w)
return w
def no_orthogonalization(V, w, alp, beta, j):
w = w - alp[j] * V[:, j]
if j > 0:
w = w - beta[j - 1] * V[:, j - 1]
return w
def lanczos(L, s, M):
"""
Classic Lanczos method (without re-orthogonalization)
Arguments
L : Real valued NxN symmetric matrix
s : vector of size N
M : natural number indicating basis size
Returns
-------
V : ndarray
M-dimensional vector with orthonormal columns.
alp : ndarray
M-dimensional array of scalars.
beta : ndarray
M-dimensional array of scalars.
"""
N = len(s)
alp = np.zeros(M)
beta = np.zeros(M - 1)
V = np.zeros((N, M))
V[:, 0] = s / LA.norm(s)
for j in range(M):
w = L @ V[:, j]
alp[j] = np.dot(V[:, j], w)
w = no_orthogonalization(V, w, alp, beta, j)
if j < M - 1:
beta[j] = LA.norm(w)
if beta[j] < 1e-14:
print("BREAKDOWN")
return V[:, : j + 1], alp[: j + 1], beta[:j]
V[:, j + 1] = w / beta[j]
return V, alp, beta

@ -0,0 +1,28 @@
import matplotlib.pyplot as plt
import numpy as np
import scienceplots # noqa: F401
from pygsp import plotting
def latex_sci(val: float, decimals: int = 2) -> str:
"""Converts a value to LaTeX scientific notation A x 10^{B}."""
if val == 0:
return "0"
exponent = int(np.floor(np.log10(abs(val))))
mantissa = val / 10**exponent
return rf"{mantissa:.{decimals}f} \times 10^{{{exponent}}}"
def latex_log_formatter(y: float, pos: int) -> str:
"""Custom formatter to render tick labels as LaTeX 10^{n}."""
if y <= 0:
return ""
# Extract the exponent using log10
n = int(np.round(np.log10(y)))
return f"$10^{{{n}}}$"
def plot_setup() -> None:
plotting.BACKEND = "matplotlib"
plt.style.use(["science"])
# TODO match font with document

@ -1,16 +1,31 @@
import numpy as np
import numpy.linalg as LA
from afgl.lanczos import lanczos
from afgl.ex_1 import compute_g_M, filter_signal_with_fourier, g
from afgl.util.build_T_matrix import build_T_matrix
from afgl.util.lanczos import lanczos
from pygsp import graphs
"""
Todo: better test case
"""
def g_evaluation_should_respect_chi():
A = np.array([[1, 0, 1], [0, 0, 0], [1, 0, 0]])
expected_gA = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 1]])
assert (expected_gA == g(A).astype(int)).all()
def g_evaluation_should_return_matrix_of_zeros():
A = 1 / 2 * np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]])
assert (g(A).astype(int) == np.zeros((3, 3))).all()
def test_lanczos_return_correct_solution():
def test_lanczos_return_correct_solution_with_dense():
"""Tests correctness of solution of Lx=s comparing Lanczos projected
solution with numpy solve function.
"""
N = 1000
M = 999
# Generate a good conditioned matrix
eigvals = np.random.uniform(10000, 100000, N)
Q, _ = LA.qr(np.random.randn(N, N))
L = Q @ np.diag(eigvals) @ Q.T
@ -18,7 +33,7 @@ def test_lanczos_return_correct_solution():
s = np.random.randint(1, 10, N)
[V, alp, beta] = lanczos(L, s, M)
T = np.diag(alp) + np.diag(beta, -1) + np.diag(beta, 1)
T = build_T_matrix(alp, beta)
x = LA.solve(L, s)
e_1 = np.zeros(M)
@ -27,3 +42,35 @@ def test_lanczos_return_correct_solution():
x_lanczos = V @ y
assert LA.norm(x - x_lanczos) < 1e-10
def test_function_g_with_graph_laplacian():
N = 1000
p = 0.04
j = 3
n = 5
M_VALS = 25 * (2 ** np.arange(n))
M_VALS = [200]
for M in M_VALS:
s = np.random.randint(1, 10000, N).astype(float)
# Normalize s as in request
s /= LA.norm(s)
GRAPHS = [graphs.ErdosRenyi(N, p), graphs.Sensor(N)]
for G in GRAPHS:
G.compute_laplacian()
L = G.L
V, alp, beta = lanczos(L, s, M + j)
GLs = filter_signal_with_fourier(G, s)
g_M = compute_g_M(V[:, 0:M], alp[0:M], beta[0 : M - 1], s)
g_Mj = compute_g_M(V[:, 0 : M + j], alp[0 : M + j], beta[0 : M + j - 1], s)
diff = LA.norm(g_Mj - g_M)
e_M = LA.norm(GLs - g_M)
assert abs(diff - e_M) < 1e-2

@ -15,6 +15,7 @@ dependencies = [
{ name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" },
{ name = "numpy", version = "2.4.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" },
{ name = "pygsp" },
{ name = "scienceplots" },
{ name = "scipy", version = "1.15.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" },
{ name = "scipy", version = "1.17.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" },
]
@ -24,6 +25,7 @@ requires-dist = [
{ name = "matplotlib", specifier = ">=3.10.8" },
{ name = "numpy", specifier = ">=2.0.0" },
{ name = "pygsp", specifier = ">=0.6.1" },
{ name = "scienceplots", specifier = ">=2.2.1" },
{ name = "scipy", specifier = ">=1.10.0" },
]
@ -737,6 +739,18 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" },
]
[[package]]
name = "scienceplots"
version = "2.2.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "matplotlib" },
]
sdist = { url = "https://files.pythonhosted.org/packages/ae/a5/5f858668ca1a513033a7f0d55cd12b0940a12e822f9f61f317ce344e07c6/scienceplots-2.2.1.tar.gz", hash = "sha256:51ad98c420e499d3284d07b6447a4b3aedd8ec122aca39ba91c58205226c408a", size = 17966, upload-time = "2026-02-25T01:26:54.603Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/cc/22/14e7b20f7d11f3e9ea9b5ae6acf9ea695c423eee785906cd5c5914a841dc/scienceplots-2.2.1-py3-none-any.whl", hash = "sha256:a1a9f670cbf5b59d92cdd3250be85b079ee4cf08bcaf2ace5ef57995be0b6c42", size = 30237, upload-time = "2026-02-25T01:26:53.298Z" },
]
[[package]]
name = "scipy"
version = "1.15.3"

Loading…
Cancel
Save