def vis(STdata):
= STdata.shape[1]
N = plt.subplots(5,1,(15,5))
fig, ax for n in range(N):
0])
ax[n].plot(fiveVTS[:,#fig.set_width()
*5)
fig.set_height(Nreturn fig
1st ST-GCN Example dividing train and test
ST-GCN
Try to divide train and test(GNAR fivenet)
import
import rpy2
import rpy2.robjects as ro
from rpy2.robjects.vectors import FloatVector
from rpy2.robjects.packages import importr
import torch
import numpy as np
from tqdm import tqdm
import torch.nn.functional as F
from torch_geometric_temporal.nn.recurrent import GConvGRU
import matplotlib.pyplot as plt
import pandas as pd
import time
from scipy.interpolate import interp1d
class RecurrentGCN(torch.nn.Module):
def __init__(self, node_features, filters):
super(RecurrentGCN, self).__init__()
self.recurrent = GConvGRU(node_features, filters, 2)
self.linear = torch.nn.Linear(filters, 1)
def forward(self, x, edge_index, edge_weight):
= self.recurrent(x, edge_index, edge_weight)
h = F.relu(h)
h = self.linear(h)
h return h
R
%load_ext rpy2.ipython
%%R
library(GNAR) library(igraph)
R[write to console]: Loading required package: igraph
R[write to console]:
Attaching package: ‘igraph’
R[write to console]: The following objects are masked from ‘package:stats’:
decompose, spectrum
R[write to console]: The following object is masked from ‘package:base’:
union
R[write to console]: Loading required package: wordcloud
R[write to console]: Loading required package: RColorBrewer
Data
%%R
<- as.matrix(fiveNet)
edges "fiveNode") data(
%R -o fiveVTS
%R -o edges
- node: 5
- time 200
functions
def vis(spatiotemporaldata):
= spatiotemporaldata.shape[1]
N = plt.subplots(N,1)
fig, ax for n in range(N):
ax[n].plot(spatiotemporaldata[:,n])'node='+str(n))
ax[n].set_title(#fig.set_width()
*2)
fig.set_figheight(N
fig.tight_layout()return fig
def vis2(spatiotemporaldata1,spatiotemporaldata2):
= spatiotemporaldata1.shape[1]
N = plt.subplots(N,1)
fig, ax for n in range(N):
='data1')
ax[n].plot(spatiotemporaldata1[:,n],label='data2')
ax[n].plot(spatiotemporaldata2[:,n],label'node='+str(n))
ax[n].set_title(
ax[n].legend()#fig.set_width()
*2)
fig.set_figheight(N
fig.tight_layout()return fig
= torch.tensor(edges) edges_tensor
= edges_tensor.nonzero() nonzero_indices
= np.array(nonzero_indices).T fiveNet_edge
= 200
T = 5 # number of Nodes
N = fiveNet_edge
E = np.array([1,2,3,4,5])
V = np.arange(0,T)
t = 1 node_features
= torch.tensor(E)
edge_index = torch.tensor(np.array([1,1,1,1,1,1,1,1,1,1]),dtype=torch.float32) edge_attr
; vis(fiveVTS)
= fiveVTS[:int(len(fiveVTS)*0.8)]
fiveVTS_train = fiveVTS[int(len(fiveVTS)*0.8):] fiveVTS_test
; vis(fiveVTS_train)
; vis(fiveVTS_test)
Random Missing Values
1)
np.random.seed(= np.random.choice(159,16,replace=False) seed_number1
3)
np.random.seed(= np.random.choice(159,16,replace=False) seed_number2
5)
np.random.seed(= np.random.choice(159,16,replace=False) seed_number3
7)
np.random.seed(= np.random.choice(159,16,replace=False) seed_number4
9)
np.random.seed(= np.random.choice(159,16,replace=False) seed_number5
= fiveVTS_train.copy() fiveVTS_train_backup
0] = float('nan') fiveVTS_train[seed_number1,
1] = float('nan') fiveVTS_train[seed_number2,
2] = float('nan') fiveVTS_train[seed_number3,
3] = float('nan') fiveVTS_train[seed_number4,
4] = float('nan') fiveVTS_train[seed_number5,
; vis(fiveVTS_train)
1. Mean
= fiveVTS_train.copy() fiveVTS_train_mean
0] = np.nanmean(fiveVTS_train_mean[:,0]) fiveVTS_train_mean[seed_number1,
1] = np.nanmean(fiveVTS_train_mean[:,1]) fiveVTS_train_mean[seed_number2,
2] = np.nanmean(fiveVTS_train_mean[:,2]) fiveVTS_train_mean[seed_number3,
3] = np.nanmean(fiveVTS_train_mean[:,3]) fiveVTS_train_mean[seed_number4,
4] = np.nanmean(fiveVTS_train_mean[:,4]) fiveVTS_train_mean[seed_number5,
; vis(fiveVTS_train_mean)
1) ST-GCN
= torch.tensor(fiveVTS_train_mean).reshape(160,5,1).float() mean_f_fiveVTS_train
= mean_f_fiveVTS_train[:159,:,:]
mean_X_fiveVTS = mean_f_fiveVTS_train[1:,:,:] mean_y_fiveVTS
= RecurrentGCN(node_features=1, filters=4)
model
= torch.optim.Adam(model.parameters(), lr=0.01)
optimizer
model.train()
for epoch in tqdm(range(50)):
for time, (xt,yt) in enumerate(zip(mean_X_fiveVTS,mean_y_fiveVTS)):
= model(xt, edge_index, edge_attr)
y_hat = torch.mean((y_hat-yt)**2)
cost
cost.backward()
optimizer.step() optimizer.zero_grad()
100%|██████████| 50/50 [00:27<00:00, 1.84it/s]
= torch.stack([model(xt, edge_index, edge_attr) for xt in mean_X_fiveVTS]).detach().numpy() mean_fhat_fiveVTS
= torch.tensor(fiveVTS_test.reshape(40,5,1)[:-1,:,:]).float() xt_test
= torch.stack([model(xt, edge_index, edge_attr) for xt in xt_test]).detach().numpy() mean_fhat_fiveVTS_forecast
1:],mean_fhat_fiveVTS_forecast); vis2(fiveVTS_test[
; vis2(fiveVTS_train_mean,mean_fhat_fiveVTS)
2) Fourier transform
=np.zeros((159*N,159*N)) w
for i in range(159*N):
for j in range(159*N):
if i==j :
= 0
w[i,j] elif np.abs(i-j) <= 1 :
= 1 w[i,j]
# np.fft(mean_fhat_fiveVTS[:,0,0])
# mean_fhat_fiveVTS.shape
# fft_result =np.stack([np.fft.fft(mean_fhat_fiveVTS[:,n,0]) for n in range(N)]).T
# plt.plot(abs(fft_result[:,0])**2)
= np.array(w.sum(axis=1))
d = np.diag(d)
D = np.array(np.diag(1/np.sqrt(d)) @ (D-w) @ np.diag(1/np.sqrt(d)))
L = np.linalg.eigh(L)
lamb, Psi = np.diag(lamb) Lamb
= Psi.T @ mean_fhat_fiveVTS.reshape(159*N,1) fhatbar
3) Ebayes
= importr('EbayesThresh').ebayesthresh
ebayesthresh = ebayesthresh(FloatVector(fhatbar)) fhatbar_threshed
plt.plot(fhatbar) plt.plot(fhatbar_threshed)
4) Inverse Fourier transform
= Psi @ fhatbar_threshed
fhatbarhat = fhatbarhat.reshape(159,N,1) fhatbarhat_mean_spatio_temporal
159,5)); vis2(mean_fhat_fiveVTS,fhatbarhat_mean_spatio_temporal.reshape(
5) ST-GCN
0] = fhatbarhat_mean_spatio_temporal[seed_number1,0,0]
fiveVTS_train_mean[seed_number1,1] = fhatbarhat_mean_spatio_temporal[seed_number2,1,0]
fiveVTS_train_mean[seed_number2,2] = fhatbarhat_mean_spatio_temporal[seed_number3,2,0]
fiveVTS_train_mean[seed_number3,3] = fhatbarhat_mean_spatio_temporal[seed_number4,3,0]
fiveVTS_train_mean[seed_number4,4] = fhatbarhat_mean_spatio_temporal[seed_number5,4,0]
fiveVTS_train_mean[seed_number5,; vis(fiveVTS_train_mean)
= RecurrentGCN(node_features=1, filters=4)
model
= torch.optim.Adam(model.parameters(), lr=0.01)
optimizer
model.train()for epoch in tqdm(range(50)):
for time, (xt,yt) in enumerate(zip(mean_X_fiveVTS,mean_y_fiveVTS)):
= model(xt, edge_index, edge_attr)
y_hat = torch.mean((y_hat-yt)**2)
cost
cost.backward()
optimizer.step() optimizer.zero_grad()
100%|██████████| 50/50 [00:26<00:00, 1.88it/s]
= torch.stack([model(xt, edge_index, edge_attr) for xt in mean_X_fiveVTS]).detach().numpy() mean_fhat_spatio_temporal
= torch.stack([model(xt, edge_index, edge_attr) for xt in xt_test]).detach().numpy() mean_fhat_spatio_temporal_test
1:],mean_fhat_spatio_temporal_test); vis2(fiveVTS_test[
; vis2(fhatbarhat_mean_spatio_temporal,mean_fhat_spatio_temporal)
for i in tqdm(range(50)):
## GFT
= Psi.T @ mean_fhat_fiveVTS.reshape(159*N,1)
fhatbar
## Ebayes
= importr('EbayesThresh').ebayesthresh
ebayesthresh = ebayesthresh(FloatVector(fhatbar))
fhatbar_threshed #plt.plot(fhatbar)
#plt.plot(fhatbar_threshed)
## inverse GFT
= Psi @ fhatbar_threshed
fhatbarhat = fhatbarhat.reshape(159,N,1)
fhatbarhat_mean_spatio_temporal #vis2(mean_fhat_fiveVTS,fhatbarhat_mean_spatio_temporal.reshape(159,5));
## STGCN
0] = fhatbarhat_mean_spatio_temporal[seed_number1,0,0]
fiveVTS_train_mean[seed_number1,1] = fhatbarhat_mean_spatio_temporal[seed_number1,1,0]
fiveVTS_train_mean[seed_number2,2] = fhatbarhat_mean_spatio_temporal[seed_number1,2,0]
fiveVTS_train_mean[seed_number3,3] = fhatbarhat_mean_spatio_temporal[seed_number1,3,0]
fiveVTS_train_mean[seed_number4,4] = fhatbarhat_mean_spatio_temporal[seed_number1,4,0]
fiveVTS_train_mean[seed_number5,#vis(fiveVTS_train_mean);
#model = RecurrentGCN(node_features=1, filters=4)
#optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
#model.train()
for epoch in range(1):
for time, (xt,yt) in enumerate(zip(mean_X_fiveVTS,mean_y_fiveVTS)):
= model(xt, edge_index, edge_attr)
y_hat = torch.mean((y_hat-yt)**2)
cost
cost.backward()
optimizer.step()
optimizer.zero_grad()
= torch.stack([model(xt, edge_index, edge_attr) for xt in mean_X_fiveVTS]).detach().numpy()
mean_fhat_spatio_temporal = torch.stack([model(xt, edge_index, edge_attr) for xt in xt_test]).detach().numpy()
mean_fhat_spatio_temporal_test #vis2(fiveVTS_test[1:],mean_fhat_spatio_temporal_test);
#vis2(fiveVTS_train_backup,mean_fhat_spatio_temporal);
100%|██████████| 50/50 [00:55<00:00, 1.10s/it]
; vis2(fiveVTS_train_backup,mean_fhat_spatio_temporal)
; vis2(fiveVTS_train_backup,mean_fhat_spatio_temporal)
6) Fourier transform
=np.zeros((159*N,159*N)) w
for i in range(159*N):
for j in range(159*N):
if i==j :
= 0
w[i,j] elif np.abs(i-j) <= 1 :
= 1 w[i,j]
= np.array(w.sum(axis=1))
d = np.diag(d)
D = np.array(np.diag(1/np.sqrt(d)) @ (D-w) @ np.diag(1/np.sqrt(d)))
L = np.linalg.eigh(L)
lamb, Psi = np.diag(lamb) Lamb
= Psi.T @ mean_fhat_spatio_temporal.reshape(159*N,1)
fhatbar = fhatbar**2 power
7) Ebayes
= importr('EbayesThresh').ebayesthresh
ebayesthresh = ebayesthresh(FloatVector(fhatbar)) fhatbar_threshed
plt.plot(fhatbar)
plt.plot(fhatbar) plt.plot(fhatbar_threshed)
8) Inverse Fourier transform
= Psi @ fhatbar_threshed
fhatbarhat = fhatbarhat.reshape(159,N,1) fhatbarhat_mean_spatio_temporal2
159,5)); vis2(mean_fhat_spatio_temporal,fhatbarhat_mean_spatio_temporal2.reshape(
9) ST-GCN
0] = fhatbarhat_mean_spatio_temporal[seed_number1,0,0]
fiveVTS_train_mean[seed_number1,1] = fhatbarhat_mean_spatio_temporal[seed_number2,1,0]
fiveVTS_train_mean[seed_number2,2] = fhatbarhat_mean_spatio_temporal[seed_number3,2,0]
fiveVTS_train_mean[seed_number3,3] = fhatbarhat_mean_spatio_temporal[seed_number4,3,0]
fiveVTS_train_mean[seed_number4,4] = fhatbarhat_mean_spatio_temporal[seed_number5,4,0]
fiveVTS_train_mean[seed_number5,; vis(fiveVTS_train_mean)
= RecurrentGCN(node_features=1, filters=4)
model
= torch.optim.Adam(model.parameters(), lr=0.01)
optimizer
model.train()for epoch in tqdm(range(50)):
for time, (xt,yt) in enumerate(zip(mean_X_fiveVTS,mean_y_fiveVTS)):
= model(xt, edge_index, edge_attr)
y_hat = torch.mean((y_hat-yt)**2)
cost
cost.backward()
optimizer.step() optimizer.zero_grad()
100%|██████████| 50/50 [00:27<00:00, 1.84it/s]
= torch.stack([model(xt, edge_index, edge_attr) for xt in mean_X_fiveVTS]).detach().numpy() mean_fhat_spatio_temporal2
= torch.stack([model(xt, edge_index, edge_attr) for xt in xt_test]).detach().numpy() mean_fhat_spatio_temporal_test2
1:],mean_fhat_spatio_temporal_test2); vis2(fiveVTS_test[
; vis2(fhatbarhat_mean_spatio_temporal2,mean_fhat_spatio_temporal2)
10) Fourier transform
=np.zeros((159*N,159*N)) w
for i in range(159*N):
for j in range(159*N):
if i==j :
= 0
w[i,j] elif np.abs(i-j) <= 1 :
= 1 w[i,j]
= np.array(w.sum(axis=1))
d = np.diag(d)
D = np.array(np.diag(1/np.sqrt(d)) @ (D-w) @ np.diag(1/np.sqrt(d)))
L = np.linalg.eigh(L)
lamb, Psi = np.diag(lamb) Lamb
= Psi.T @ mean_fhat_spatio_temporal.reshape(159*N,1)
fhatbar = fhatbar**2 power
11) Ebayes
= importr('EbayesThresh').ebayesthresh
ebayesthresh = ebayesthresh(FloatVector(fhatbar)) fhatbar_threshed
12) Inverse Fourier transform
= Psi @ fhatbar_threshed
fhatbarhat = fhatbarhat.reshape(159,N,1) fhatbarhat_mean_spatio_temporal3
13) ST-GCN
0] = fhatbarhat_mean_spatio_temporal[seed_number1,0,0]
fiveVTS_train_mean[seed_number1,1] = fhatbarhat_mean_spatio_temporal[seed_number2,1,0]
fiveVTS_train_mean[seed_number2,2] = fhatbarhat_mean_spatio_temporal[seed_number3,2,0]
fiveVTS_train_mean[seed_number3,3] = fhatbarhat_mean_spatio_temporal[seed_number4,3,0]
fiveVTS_train_mean[seed_number4,4] = fhatbarhat_mean_spatio_temporal[seed_number5,4,0]
fiveVTS_train_mean[seed_number5,; vis(fiveVTS_train_mean)
= RecurrentGCN(node_features=1, filters=4)
model
= torch.optim.Adam(model.parameters(), lr=0.01)
optimizer
model.train()
for epoch in tqdm(range(50)):
for time, (xt,yt) in enumerate(zip(mean_X_fiveVTS,mean_y_fiveVTS)):
= model(xt, edge_index, edge_attr)
y_hat = torch.mean((y_hat-yt)**2)
cost
cost.backward()
optimizer.step() optimizer.zero_grad()
100%|██████████| 50/50 [00:26<00:00, 1.86it/s]
= torch.stack([model(xt, edge_index, edge_attr) for xt in mean_X_fiveVTS]).detach().numpy() mean_fhat_spatio_temporal3
= torch.stack([model(xt, edge_index, edge_attr) for xt in xt_test]).detach().numpy() mean_fhat_spatio_temporal_test3
1:],mean_fhat_spatio_temporal_test3); vis2(fiveVTS_test[
; vis2(fhatbarhat_mean_spatio_temporal3,mean_fhat_spatio_temporal3)
= []
one for i in range(N):
1:,i] - mean_fhat_fiveVTS_forecast.reshape(39,5)[:,i]))) one.append(np.mean((fiveVTS_test[
= []
two for i in range(N):
1:,i] - mean_fhat_spatio_temporal_test.reshape(39,5)[:,i]))) two.append(np.mean((fiveVTS_test[
= []
three for i in range(N):
1:,i] - mean_fhat_spatio_temporal_test2.reshape(39,5)[:,i]))) three.append(np.mean((fiveVTS_test[
= []
four for i in range(N):
1:,i] - mean_fhat_spatio_temporal_test3.reshape(39,5)[:,i]))) four.append(np.mean((fiveVTS_test[
'one':one,'two':two,'three':three,'four':four}) pd.DataFrame({
one | two | three | four | |
---|---|---|---|---|
0 | -0.196310 | -0.189000 | -0.173563 | -0.200559 |
1 | -0.161632 | -0.135003 | -0.142250 | -0.159892 |
2 | 0.079347 | 0.106893 | 0.108179 | 0.079011 |
3 | -0.267653 | -0.244438 | -0.248220 | -0.269292 |
4 | -0.162464 | -0.135709 | -0.130221 | -0.167336 |
2. Linear Interpolation
= pd.DataFrame(fiveVTS_train)
_df ='linear', inplace=True)
_df.interpolate(method= _df.fillna(0) _df
= np.array(_df).reshape(160,5) linear_fiveVTS_train
1) ST-GCN
= torch.tensor(linear_fiveVTS_train).reshape(160,5,1).float() linear_f_fiveVTS_train
= linear_f_fiveVTS_train[:159,:,:]
linear_X_fiveVTS = linear_f_fiveVTS_train[1:,:,:] linear_y_fiveVTS
= RecurrentGCN(node_features=1, filters=4)
model
= torch.optim.Adam(model.parameters(), lr=0.01)
optimizer
model.train()
for epoch in tqdm(range(50)):
for time, (xt,yt) in enumerate(zip(linear_X_fiveVTS,linear_y_fiveVTS)):
= model(xt, edge_index, edge_attr)
y_hat = torch.mean((y_hat-yt)**2)
cost
cost.backward()
optimizer.step() optimizer.zero_grad()
= torch.stack([model(xt, edge_index, edge_attr) for xt in linear_X_fiveVTS]).detach().numpy() linear_fhat_fiveVTS
= torch.tensor(fiveVTS_test.reshape(40,5,1)[:-1,:,:]).float() xt_test
= torch.stack([model(xt, edge_index, edge_attr) for xt in xt_test]).detach().numpy() linear_fhat_fiveVTS_forecast
1:],linear_fhat_fiveVTS_forecast); vis2(fiveVTS_test[
; vis2(linear_fiveVTS_train,linear_f_fiveVTS_train)
2) Fourier transform
=np.zeros((159*N,159*N)) w
for i in range(159*N):
for j in range(159*N):
if i==j :
= 0
w[i,j] elif np.abs(i-j) <= 1 :
= 1 w[i,j]
= np.array(w.sum(axis=1))
d = np.diag(d)
D = np.array(np.diag(1/np.sqrt(d)) @ (D-w) @ np.diag(1/np.sqrt(d)))
L = np.linalg.eigh(L)
lamb, Psi = np.diag(lamb) Lamb
= Psi.T @ linear_fhat_fiveVTS.reshape(159*N,1)
fhatbar = fhatbar**2 power
3) Ebayes
159,5)[:,0]**2) plt.plot(fhatbar.reshape(
= importr('EbayesThresh').ebayesthresh
ebayesthresh = ebayesthresh(FloatVector(fhatbar)) fhatbar_threshed
plt.plot(fhatbar) plt.plot(fhatbar_threshed)
4) Inverse Fourier transform
= Psi @ fhatbar_threshed
fhatbarhat = fhatbarhat.reshape(159,N,1) fhatbarhat_linear_spatio_temporal
159,5)); vis2(linear_fhat_fiveVTS,fhatbarhat_linear_spatio_temporal.reshape(
5) ST-GCN
= torch.tensor(fhatbarhat_linear_spatio_temporal).reshape(159,5,1).float() linear_spatio_temporal
= linear_spatio_temporal[:158,:,:]
linear_X_spatio_temporal = linear_spatio_temporal[1:,:,:] linear_y_spatio_temporal
= RecurrentGCN(node_features=1, filters=4)
model
= torch.optim.Adam(model.parameters(), lr=0.01)
optimizer
model.train()
for epoch in tqdm(range(50)):
for time, (xt,yt) in enumerate(zip(linear_X_spatio_temporal,linear_y_spatio_temporal)):
= model(xt, edge_index, edge_attr)
y_hat = torch.mean((y_hat-yt)**2)
cost
cost.backward()
optimizer.step() optimizer.zero_grad()
= torch.stack([model(xt, edge_index, edge_attr) for xt in linear_X_spatio_temporal]).detach().numpy() linear_fhat_spatio_temporal
= torch.stack([model(xt, edge_index, edge_attr) for xt in xt_test]).detach().numpy() linear_fhat_spatio_temporal_test
1:],linear_fhat_spatio_temporal_test); vis2(fiveVTS_test[
; vis2(fhatbarhat_linear_spatio_temporal,linear_fhat_spatio_temporal)
6) Fourier transform
=np.zeros((158*N,158*N)) w
for i in range(158*N):
for j in range(158*N):
if i==j :
= 0
w[i,j] elif np.abs(i-j) <= 1 :
= 1 w[i,j]
= np.array(w.sum(axis=1))
d = np.diag(d)
D = np.array(np.diag(1/np.sqrt(d)) @ (D-w) @ np.diag(1/np.sqrt(d)))
L = np.linalg.eigh(L)
lamb, Psi = np.diag(lamb) Lamb
= Psi.T @ linear_fhat_spatio_temporal.reshape(158*N,1)
fhatbar = fhatbar**2 power
7) Ebayes
= importr('EbayesThresh').ebayesthresh
ebayesthresh = ebayesthresh(FloatVector(fhatbar)) fhatbar_threshed
plt.plot(fhatbar) plt.plot(fhatbar_threshed)
8) Inverse Fourier transform
= Psi @ fhatbar_threshed
fhatbarhat = fhatbarhat.reshape(158,N,1) fhatbarhat_linear_spatio_temporal2
158,5)); vis2(linear_fhat_spatio_temporal,fhatbarhat_linear_spatio_temporal2.reshape(
9) ST-GCN
= torch.tensor(fhatbarhat_linear_spatio_temporal2).reshape(158,5,1).float() linear_spatio_temporal2
= linear_spatio_temporal2[:157,:,:]
linear_X_spatio_temporal2 = linear_spatio_temporal2[1:,:,:] linear_y_spatio_temporal2
= RecurrentGCN(node_features=1, filters=4)
model
= torch.optim.Adam(model.parameters(), lr=0.01)
optimizer
model.train()
for epoch in tqdm(range(50)):
for time, (xt,yt) in enumerate(zip(linear_X_spatio_temporal2,linear_y_spatio_temporal2)):
= model(xt, edge_index, edge_attr)
y_hat = torch.mean((y_hat-yt)**2)
cost
cost.backward()
optimizer.step() optimizer.zero_grad()
= torch.stack([model(xt, edge_index, edge_attr) for xt in linear_X_spatio_temporal2]).detach().numpy() linear_fhat_spatio_temporal2
= torch.stack([model(xt, edge_index, edge_attr) for xt in xt_test]).detach().numpy() linear_fhat_spatio_temporal_test2
1:],linear_fhat_spatio_temporal_test2); vis2(fiveVTS_test[
; vis2(fhatbarhat_linear_spatio_temporal2,linear_fhat_spatio_temporal2)
= []
one for i in range(N):
1:,i] - linear_fhat_fiveVTS_forecast.reshape(39,5)[:,i]))) one.append(np.mean((fiveVTS_test[
= []
two for i in range(N):
1:,i] - linear_fhat_spatio_temporal_test.reshape(39,5)[:,i]))) two.append(np.mean((fiveVTS_test[
= []
three for i in range(N):
1:,i] - linear_fhat_spatio_temporal_test2.reshape(39,5)[:,i]))) three.append(np.mean((fiveVTS_test[
'one':one,'two':two,'three':three}) pd.DataFrame({
10) Fourier transform
=np.zeros((157*N,157*N)) w
for i in range(157*N):
for j in range(157*N):
if i==j :
= 0
w[i,j] elif np.abs(i-j) <= 1 :
= 1 w[i,j]
= np.array(w.sum(axis=1))
d = np.diag(d)
D = np.array(np.diag(1/np.sqrt(d)) @ (D-w) @ np.diag(1/np.sqrt(d)))
L = np.linalg.eigh(L)
lamb, Psi = np.diag(lamb) Lamb
= Psi.T @ linear_fhat_spatio_temporal2.reshape(157*N,1) fhatbar
11) Ebayes
= importr('EbayesThresh').ebayesthresh
ebayesthresh = ebayesthresh(FloatVector(fhatbar)) fhatbar_threshed
plt.plot(fhatbar) plt.plot(fhatbar_threshed)
12) Inverse Fourier transform
= Psi @ fhatbar_threshed
fhatbarhat = fhatbarhat.reshape(157,N,1) fhatbarhat_linear_spatio_temporal3
157,5)); vis2(linear_fhat_spatio_temporal2,fhatbarhat_linear_spatio_temporal3.reshape(
13) ST-GCN
= torch.tensor(fhatbarhat_linear_spatio_temporal3).reshape(157,5,1).float() linear_spatio_temporal3
= linear_spatio_temporal3[:156,:,:]
linear_X_spatio_temporal3 = linear_spatio_temporal3[1:,:,:] linear_y_spatio_temporal3
= RecurrentGCN(node_features=1, filters=4)
model
= torch.optim.Adam(model.parameters(), lr=0.01)
optimizer
model.train()
for epoch in tqdm(range(50)):
for time, (xt,yt) in enumerate(zip(linear_X_spatio_temporal3,linear_y_spatio_temporal3)):
= model(xt, edge_index, edge_attr)
y_hat = torch.mean((y_hat-yt)**2)
cost
cost.backward()
optimizer.step() optimizer.zero_grad()
= torch.stack([model(xt, edge_index, edge_attr) for xt in linear_X_spatio_temporal3]).detach().numpy() linear_fhat_spatio_temporal3
= torch.stack([model(xt, edge_index, edge_attr) for xt in xt_test]).detach().numpy() linear_fhat_spatio_temporal_test3
1:],linear_fhat_spatio_temporal_test3); vis2(fiveVTS_test[
; vis2(fhatbarhat_linear_spatio_temporal3,linear_fhat_spatio_temporal3)
= []
one for i in range(N):
1:,i] - linear_fhat_fiveVTS_forecast.reshape(39,5)[:,i]))) one.append(np.mean((fiveVTS_test[
= []
two for i in range(N):
1:,i] - linear_fhat_spatio_temporal_test.reshape(39,5)[:,i]))) two.append(np.mean((fiveVTS_test[
= []
three for i in range(N):
1:,i] - linear_fhat_spatio_temporal_test2.reshape(39,5)[:,i]))) three.append(np.mean((fiveVTS_test[
= []
four for i in range(N):
1:,i] - linear_fhat_spatio_temporal_test3.reshape(39,5)[:,i]))) four.append(np.mean((fiveVTS_test[
'one':one,'two':two,'three':three,'four':four}) pd.DataFrame({
3. GNAR
mean
%R -i fiveVTS_train_mean
%%R
<- predict(GNARfit(vts = fiveVTS_train_mean[1:160,], net = fiveNet, alphaOrder = 2, betaOrder = c(1, 1)),n.ahead=40) meanprediction
%R -o meanprediction
= []
a for i in range(40):
40,5)[i] - fiveVTS_test[i])) a.append((meanprediction.reshape(
**2).mean(axis=0) ((pd.DataFrame(a))
0 1.256051
1 1.083987
2 1.117363
3 0.952564
4 1.943335
dtype: float64
**2).mean(axis=0).mean() ((pd.DataFrame(a))
1.270659819100067