import warnings
import scipy
warnings.filterwarnings('ignore')
import os
import math
import numpy as np, pandas as pd, math
import matplotlib.pyplot as plt
import geopandas as gp
import copy
import seaborn as sns
###
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import torch.nn.utils.prune as prune
from torch.optim import LBFGS
###
from pysal.lib import weights
import pysal as ps
import esda
import spreg
###
from scipy import sparse
from scipy.sparse import coo_matrix
from scipy.sparse import diags
from scipy.sparse import csr_matrix
from scipy.spatial import distance
from scipy.sparse import eye
from scipy import sparse
from scipy import stats
from scipy.sparse import csgraph
from scipy.stats import norm
import scipy.io as sio
###
from sklearn import preprocessing
from sklearn.metrics import mean_squared_error as mse
from sklearn.metrics import median_absolute_error as mae
from sklearn.neighbors import NearestNeighbors
from numpy.linalg import *
from scipy.optimize import minimize
from sklearn import metrics
import 数据生成
import numpy as np
from scipy.stats import norm
import matplotlib.pyplot as plt
def evaluate_MAPE(model, input_x, input_y, idx, target_tensor): ## idx: use ids in the validation set
'''
Monitor the Mean absolute percentage error of validation set during the training
'''
model.eval()
output = model(input_x, input_y)
MAPE = F.l1_loss(output[idx], target_tensor[idx], reduce=False) / abs(target_tensor[idx])
return MAPE.mean().item()
def step(model, optimizer, input_x, input_y, idx, target_tensor):
criterion = nn.MSELoss()
output = model(input_x)
loss = criterion(output[idx], target_tensor[idx])
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss.mean().item()
# Model
class HNET(nn.Module):
def __init__(self, f_in, n_classes, hidden=[16], use_bias=True, activation=nn.ReLU()):
if hidden == []:
super().__init__()
self.layers = []
self.dropouts = []
#print("A输入:",f_in)
self.out_layer = nn.Linear(f_in, n_classes)
else:
super().__init__()
self.f_in = f_in
self.n_classes = n_classes
self.use_bias = use_bias
layers = []
for f_in, f_out in zip([f_in] + hidden[:-1], hidden):
#print("B输入:",f_in)
layers += [nn.Linear(f_in, f_out, bias=use_bias)]
layers += [activation]
self.layers = nn.Sequential(*layers)
#("C输出:", f_out)
self.out_layer = nn.Linear(f_out, n_classes, bias=False)
def forward(self, x):
for layer in self.layers:
x = layer(x)
return self.out_layer(x)
class MNET(nn.Module):
def __init__(self, f_in, n_classes, M, mask):
super().__init__()
self.f_in = f_in
self.n_classes = n_classes
#("D输入:", f_in)
self.out_layer = nn.Linear(f_in, n_classes, bias=False)
self.out_layer.weight = Parameter(M)
for p in self.out_layer.parameters():
p.requires_grad = False
self.out_layer = prune.custom_from_mask(self.out_layer, name='weight', mask=mask)
def forward(self, x):
x = x.view(-1, self.f_in) # 确保 x 的形状是 (batch_size, f_in)
return self.out_layer(x)
class bNET(nn.Module):
def __init__(self, f_in, n_classes):
super().__init__()
self.layers = []
self.dropouts = []
#print("输入E",f_in)
self.out_layer = nn.Linear(f_in, n_classes)
self.out_layer.weight = Parameter(
torch.ones(f_in).unsqueeze(0))
self.out_layer.weight.requires_grad = False # 不更新权重
def forward(self, x):
return self.out_layer(x)
class SARNET(nn.Module):
def __init__(self, d_x, n_y, W1, W2, mask1, mask2, M, xhidden=[16], yhidden=[16]):
super().__init__()
self.d = d_x
self.n = n_y
self.W1 = W1
self.W2 = W2
self.xhidden = xhidden
self.yhidden = yhidden
self.HX1 = HNET(2, 1, xhidden, activation=nn.ReLU())
self.HX2 = HNET(2, 1, xhidden, activation=nn.ReLU())
self.MX1 = MNET(n_y, n_y, M, mask1
本站资源均来自互联网,仅供研究学习,禁止违法使用和商用,产生法律纠纷本站概不负责!如果侵犯了您的权益请与我们联系!
转载请注明出处: 免费源码网-免费的源码资源网站 » python实现了一个基于神经网络的模型训练和评估流程,涉及到多种自定义神经网络架构的定义以及利用这些模型对生成的数据进行处理
发表评论 取消回复