发布
社区首页 >问答首页 >Informer时间序列预测模型超参数优化?

Informer时间序列预测模型超参数优化?

提问于 2023-03-28 11:17:18
回答 0关注 0查看 389

我是用的sklearn库里的RandomizedSearchCV()函数随机优化,但是在后面拟合fit()函数的时候出现了TypeError: fit() takes from 2 to 3 positional arguments but 5 were given的问题,fit()输入和exp_informer.py文件里Exp_Informer类中 _process_one_batch函数里给model输入的数据相同,不知道是不是数据输入还是有问题,请教一下,万分感谢,代码如下:

代码语言:javascript
代码运行次数:0
复制
import os
import numpy as np
import pandas as pd

import torch
from torch.utils.data import Dataset, DataLoader
from sklearn.preprocessing import StandardScaler

from utils.tools import StandardScaler
from utils.timefeatures import time_features
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import GridSearchCV
from scipy.stats import uniform
from models import model

import warnings
warnings.filterwarnings('ignore')


class Dataset_Custom(Dataset):
    def __init__(self, root_path='./data/TEC_Dataset/', flag='train', size=None,
                 features='M', data_path='3_years_data.csv',
                 target='F324', scale=True, inverse=False, timeenc=0, freq='h', cols=None,
                 ):

        # size [seq_len, label_len, pred_len]
        # info
        if size == None:
            self.seq_len = 96
            self.label_len = 48
            self.pred_len = 24
        else:
            self.seq_len = size[0]
            self.label_len = size[1]
            self.pred_len = size[2]
        # init
        assert flag in ['train', 'test', 'val']
        type_map = {'train':0, 'val':1, 'test':2}
        self.set_type = type_map[flag]

        self.features = features
        self.target = target
        self.scale = scale
        self.inverse = inverse
        self.timeenc = timeenc
        self.freq = freq
        self.cols = cols
        self.root_path = root_path
        self.data_path = data_path
        self.use_gpu = False
        self.use_multi_gpu = False
        self.devices = '0,1,2,3'
        self.gpu = 0
        self.__read_data__()

    def __read_data__(self):
        self.scaler = StandardScaler()
        df_raw = pd.read_csv(os.path.join(self.root_path,
                                          self.data_path))
        '''
        df_raw.columns: ['date', ...(other features), target feature]
        '''
        # cols = list(df_raw.columns);
        if self.cols:
            cols =self.cols.copy()
            cols.remove(self.target)
        else:
            cols = list(df_raw.columns); cols.remove(self.target); cols.remove('date')
        df_raw = df_raw[['date' ] +cols +[self.target]]

        num_train = int(len(df_raw ) *0.7)
        num_test = int(len(df_raw ) *0.2)
        num_vali = len(df_raw) - num_train - num_test
        border1s = [0, num_train -self.seq_len, len(df_raw) -num_test -self.seq_len]
        border2s = [num_train, num_train +num_vali, len(df_raw)]
        border1 = border1s[self.set_type]
        border2 = border2s[self.set_type]

        if self.features=='M' or self.features=='MS':
            cols_data = df_raw.columns[1:]
            df_data = df_raw[cols_data]
        elif self.features=='S':
            df_data = df_raw[[self.target]]

        if self.scale:
            train_data = df_data[border1s[0]:border2s[0]]
            self.scaler.fit(train_data.values)
            data = self.scaler.transform(df_data.values)
        else:
            data = df_data.values

        df_stamp = df_raw[['date']][border1:border2]
        df_stamp['date'] = pd.to_datetime(df_stamp.date)
        data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq)

        self.data_x = data[border1:border2]
        if self.inverse:
            self.data_y = df_data.values[border1:border2]
        else:
            self.data_y = data[border1:border2]
        self.data_stamp = data_stamp

    def __getitem__(self, index):
        s_begin = index
        s_end = s_begin + self.seq_len
        r_begin = s_end - self.label_len
        r_end = r_begin + self.label_len + self.pred_len

        seq_x = self.data_x[s_begin:s_end]
        if self.inverse:
            seq_y = np.concatenate([self.data_x[r_begin:r_begin +self.label_len], self.data_y[r_begin +self.label_len:r_end]], 0)
        else:
            seq_y = self.data_y[r_begin:r_end]
        seq_x_mark = self.data_stamp[s_begin:s_end]
        seq_y_mark = self.data_stamp[r_begin:r_end]

        return seq_x, seq_y, seq_x_mark, seq_y_mark

    def __len__(self):
        return len(self.data_x) - self.seq_len- self.pred_len + 1

    def inverse_transform(self, data):
        return self.scaler.inverse_transform(data)

    def _get_data(self, flag):

        embed = 'timeF'
        batch_size = 32
        freq = 'h'
        seq_len = 96
        label_len = 48
        pred_len = 24
        num_workers = 0
        data = 'TEC3y'

        data_dict = {
            'WTH': Dataset_Custom,
            'ECL': Dataset_Custom,
            'Solar': Dataset_Custom,
            'TEC3y': Dataset_Custom,
            'TEC5y': Dataset_Custom,
            'TEC8y': Dataset_Custom,
            'TEC10Y': Dataset_Custom,
            'custom': Dataset_Custom,
        }
        Data = data_dict[data]
        timeenc = 0 if embed!='timeF' else 1

        if flag == 'test':
            shuffle_flag = False; drop_last = True; batch_size = batch_size; freq=freq
        else:
            shuffle_flag = True; drop_last = True; batch_size = batch_size; freq=freq
        data_set = Data(
            root_path=self.root_path,
            data_path=self.data_path,
            flag=flag,
            size=[seq_len, label_len, pred_len],
            features=self.features,
            target=self.target,
            inverse=self.inverse,
            timeenc=timeenc,
            freq=freq,
            cols=self.cols
        )
        print(flag, len(data_set))
        data_loader = DataLoader(
            data_set,
            batch_size=batch_size,
            shuffle=shuffle_flag,
            num_workers=num_workers,
            drop_last=drop_last)

        return data_set, data_loader

    def _acquire_device(self):
        if self.use_gpu:
            os.environ["CUDA_VISIBLE_DEVICES"] = str(self.gpu) if not self.use_multi_gpu else self.devices
            device = torch.device('cuda:{}'.format(self.gpu))
            print('Use GPU: cuda:{}'.format(self.gpu))
        else:
            device = torch.device('cpu')
            print('Use CPU')
        return device

print('数据读取完成')


train_set, train_loader = Dataset_Custom()._get_data(flag='train')
print(train_set)
print(train_loader)

'''
a = Dataset_Custom().data_x
b,c,d,e = Dataset_Custom().__getitem__()
print(a.shape)
print(b.shape)
print(b)
print(c.shape)
print(c)
print(d.shape)
print(d)
print(e.shape)
print(e)

'''
print('开始超参数寻优试验')
rand = model.Informer(enc_in=324, dec_in=324, c_out=324, out_len=24, factor=5, d_model=512, n_heads=8,
                              e_layers=3, d_layers=2, d_ff=512, attn='prob', embed='fixed', freq='h', activation='gelu',
                              output_attention=False, distil=True, mix=True, device=torch.device('cuda:0'), seq_len=96,
                              label_len=48)

param_dict =dict( dropout=uniform(loc=0, scale=4), # dropout取值空间
                  early_stopping_patience=range(3, 15, 1), # early_stopping_patience取值空间
                  lr=np.linspace(0.0001, 0.000001, 50), # learning_rate取值空间
                  epoch=range(100, 400, 10),

)

rand_model = RandomizedSearchCV(rand,
                                param_distributions=param_dict,
                                n_iter=300,
                                scoring='neg_mean_squared_error',
                                verbose=3,
                                n_jobs=-1,
                                refit=True,
                                cv=5,
                                pre_dispatch=-2,
                                random_state=0,
                                return_train_score=False,
                                error_score=np.nan
                                )

for i, (batch_x, batch_y, batch_x_mark,batch_y_mark) in enumerate(train_loader):
    device = Dataset_Custom()._acquire_device()
    batch_x = batch_x.float().to(device)
    batch_y = batch_y.float()

    batch_x_mark = batch_x_mark.float().to(device)
    batch_y_mark = batch_y_mark.float().to(device)
    inp = torch.zeros([batch_y.shape[0], 24, batch_y.shape[-1]]).float()
    dec_inp = torch.cat([batch_y[:, : 48, :], inp], dim=1).float().to(device)

    rand_model.fit(batch_x, batch_x_mark,dec_inp, batch_y_mark)
    best_estimator = rand_model.best_estimator_
    print(best_estimator)

    print(rand_model.best_score_)

回答

和开发者交流更多问题细节吧,去 写回答
相关文章

相似问题

相关问答用户
领券
问题归档专栏文章快讯文章归档关键词归档开发者手册归档开发者手册 Section 归档