kaggle janstreet

# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load

import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)

# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory

import os
for dirname, _, filenames in os.walk('/kaggle/input'):
    for filename in filenames:
        print(os.path.join(dirname, filename))

# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" 
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import numpy as np
import pandas as pd
import datatable as dt
from tqdm import tqdm
from random import choices
import gc

import torch
import torch.nn as nn
import torch.nn.functional as F
import pytorch_lightning as pl
from torch.optim.lr_scheduler import ReduceLROnPlateau
from pytorch_lightning.callbacks.early_stopping import EarlyStopping

Preprocessing

%%time
data = dt.fread('/kaggle/input/jane-street-market-prediction/train.csv') 
train = data.to_pandas()
del data

train = train.query('date > 85').reset_index(drop = True) 
train = train[train['weight'] != 0]
torch.manual_seed(2021)
features = [c for c in train.columns if 'feature' in c]
for i in features:
    x = train[i].mean()  
    #features_mean.append(x)
    train[i] = train[i].fillna(x)


train['action'] = ((train['resp'].values) > 0).astype(int)
resp_cols = ['resp_1', 'resp_2', 'resp_3', 'resp', 'resp_4']
X_train = train.loc[:, train.columns.str.contains('feature')].values
y_train = np.stack([(train[c] > 0).astype('int') for c in resp_cols]).T
X_train.shape
class MLP_Dataset:
    def __init__(self, dataset, targets):
        self.dataset = dataset
        self.targets = targets

    def __len__(self):
        return self.dataset.shape[0]

    def __getitem__(self, item):
        return {
            'x': torch.tensor(self.dataset[item, :], dtype=torch.float),
            'y': torch.tensor(self.targets[item], dtype=torch.float)
        }

PurgedGroupTimeSeriesSplit

https://www.kaggle.com/gogo827jz/jane-street-ffill-xgboost-purgedtimeseriescv

from sklearn.model_selection import KFold
from sklearn.model_selection._split import _BaseKFold, indexable, _num_samples
from sklearn.utils.validation import _deprecate_positional_args

# modified code for group gaps; source
# https://github.com/getgaurav2/scikit-learn/blob/d4a3af5cc9da3a76f0266932644b884c99724c57/sklearn/model_selection/_split.py#L2243
class PurgedGroupTimeSeriesSplit(_BaseKFold):
    """Time Series cross-validator variant with non-overlapping groups.
    Allows for a gap in groups to avoid potentially leaking info from
    train into test if the model has windowed or lag features.
    Provides train/test indices to split time series data samples
    that are observed at fixed time intervals according to a
    third-party provided group.
    In each split, test indices must be higher than before, and thus shuffling
    in cross validator is inappropriate.
    This cross-validation object is a variation of :class:`KFold`.
    In the kth split, it returns first k folds as train set and the
    (k+1)th fold as test set.
    The same group will not appear in two different folds (the number of
    distinct groups has to be at least equal to the number of folds).
    Note that unlike standard cross-validation methods, successive
    training sets are supersets of those that come before them.
    Read more in the :ref:`User Guide <cross_validation>`.
    Parameters
    ----------
    n_splits : int, default=5
        Number of splits. Must be at least 2.
    max_train_group_size : int, default=Inf
        Maximum group size for a single training set.
    group_gap : int, default=None
        Gap between train and test
    max_test_group_size : int, default=Inf
        We discard this number of groups from the end of each train split
    """

    @_deprecate_positional_args
    def __init__(self,
                 n_splits=5,
                 *,
                 max_train_group_size=np.inf,
                 max_test_group_size=np.inf,
                 group_gap=None,
                 verbose=False
                 ):
        super().__init__(n_splits, shuffle=False, random_state=None)
        self.max_train_group_size = max_train_group_size
        self.group_gap = group_gap
        self.max_test_group_size = max_test_group_size
        self.verbose = verbose

    def split(self, X, y=None, groups=None):
        """Generate indices to split data into training and test set.
        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Training data, where n_samples is the number of samples
            and n_features is the number of features.
        y : array-like of shape (n_samples,)
            Always ignored, exists for compatibility.
        groups : array-like of shape (n_samples,)
            Group labels for the samples used while splitting the dataset into
            train/test set.
        Yields
        ------
        train : ndarray
            The training set indices for that split.
        test : ndarray
            The testing set indices for that split.
        """
        if groups is None:
            raise ValueError(
                "The 'groups' parameter should not be None")
        X, y, groups = indexable(X, y, groups)
        n_samples = _num_samples(X)
        n_splits = self.n_splits
        group_gap = self.group_gap
        max_test_group_size = self.max_test_group_size
        max_train_group_size = self.max_train_group_size
        n_folds = n_splits + 1
        group_dict = {}
        u, ind = np.unique(groups, return_index=True)
        unique_groups = u[np.argsort(ind)]
        n_samples = _num_samples(X)
        n_groups = _num_samples(unique_groups)
        for idx in np.arange(n_samples):
            if (groups[idx] in group_dict):
                group_dict[groups[idx]].append(idx)
            else:
                group_dict[groups[idx]] = [idx]
        if n_folds > n_groups:
            raise ValueError(
                ("Cannot have number of folds={0} greater than"
                 " the number of groups={1}").format(n_folds,
                                                     n_groups))

        group_test_size = min(n_groups // n_folds, max_test_group_size)
        group_test_starts = range(n_groups - n_splits * group_test_size,
                                  n_groups, group_test_size)
        for group_test_start in group_test_starts:
            train_array = []
            test_array = []

            group_st = max(0, group_test_start - group_gap - max_train_group_size)
            for train_group_idx in unique_groups[group_st:(group_test_start - group_gap)]:
                train_array_tmp = group_dict[train_group_idx]
                
                train_array = np.sort(np.unique(
                                      np.concatenate((train_array,
                                                      train_array_tmp)),
                                      axis=None), axis=None)

            train_end = train_array.size
            for test_group_idx in unique_groups[group_test_start:
                                                group_test_start +
                                                group_test_size]:
                test_array_tmp = group_dict[test_group_idx]
                test_array = np.sort(np.unique(
                                              np.concatenate((test_array,
                                                              test_array_tmp)),
                                     axis=None), axis=None)

            test_array  = test_array[group_gap:]
            
            
            if self.verbose > 0:
                    pass
                    
            yield [int(i) for i in train_array], [int(i) for i in test_array]
class MLP(nn.Module):
    def __init__(self,config):
        super(MLP,self).__init__()
        self.criterion = nn.BCELoss()
        self.lr = config["lr"]
        input_shape = 130
        layers = []
        drop_out = [config[key] for key, v in config.items() if 'dropout' in key]
        hidden_size = [config[key] for key,v in config.items() if 'layer' in key]
        for i in range(len(hidden_size)):
            out_shape = hidden_size[i]
            
            layers.append(nn.Dropout(drop_out[i]))
            layers.append(nn.Linear(input_shape, out_shape))
            layers.append(nn.BatchNorm1d(out_shape))
            layers.append(nn.SiLU()) # SiLU aka swish
                          
            input_shape = out_shape
        
        
        ##define final layer
        layers.append(nn.Dropout(drop_out[-1]))
        layers.append(nn.Linear(input_shape, 5))
        layers.append(nn.Sigmoid())
        
        self.model = torch.nn.Sequential(*layers)
    def forward(self,x):
        x = self.model(x)
        return x
config = {
    "layer_1_size": 128,
    "layer_2_size": 256,
    "layer_3_size": 64,
    "layer_4_size": 32,

    "dropout_1":0.1,
    "dropout_2":0.2,
    "dropout_3":0.2,
    "dropout_4":0.3,
    "dropout_output":0.4,
    "lr": 1e-3
}
class MLP_DataModule(pl.LightningDataModule):
    def __init__(self, data, targets, BATCH_SIZE, fold = None):
        super().__init__()
        self.BATCH_SIZE = BATCH_SIZE
        self.data = data
        self.targets = targets
        self.fold = fold
        
    def preapre_data(self):
        pass
    
    def setup(self, stage=None):
        pass
        
    def train_dataloader(self):
        
        dataset = MLP_Dataset(dataset = self.data, targets = self.targets)
        train_loader = torch.utils.data.DataLoader(dataset, batch_size=self.BATCH_SIZE)
        return train_loader
    
    def valid_dataloader(self):
        dataset = MLP_Dataset(dataset = self.data, targets = self.targets)
        valid_loader = torch.utils.data.DataLoader(dataset,batch_size=self.BATCH_SIZE)
        return valid_loader

    
    def test_dataloader(self):
        return None
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device
FOLDS=5
gkf = PurgedGroupTimeSeriesSplit(n_splits = FOLDS, group_gap=20)
splits = list(gkf.split(y_train, groups=train['date'].values))
def Train(params,models,num_epochs=15, batch_size=4096, patience=6):
    loss_fn = nn.BCELoss().to(device)
    config = {**params}
    Val_Loss = 0
    N_Samples = 0
    loss_dict = {}
    for fold, (train_idx, valid_idx) in tqdm(enumerate(splits)):
        print('Fold : {}'.format(fold))
        loss_dict[fold] = []
        tr_x, tr_y = X_train[train_idx], y_train[train_idx]
        train_loader = MLP_DataModule(data=tr_x, targets=tr_y, BATCH_SIZE=batch_size).train_dataloader()
        
        val_x, val_y = X_train[valid_idx], y_train[valid_idx]
        val_loader = MLP_DataModule(data=val_x,targets = val_y, BATCH_SIZE = batch_size).valid_dataloader()
        
        model = MLP(config).to(device)
        optimizer = torch.optim.Adam(model.parameters(), lr=config['lr'])
        
        
        training = []
        validation = []
        ## define control variables
        the_last_loss = 100
        trigger_times=0
#         Training
        for epoch in range(num_epochs):   
            running_loss = 0.0
            for batch in train_loader:
                inputs, labels = batch['x'].to(device), batch['y'].to(device)
                optimizer.zero_grad()
                with torch.set_grad_enabled(True):
                    outputs = model(inputs)
                    loss = loss_fn(outputs, labels)
                    loss.backward()
                    optimizer.step()
                running_loss += loss.item() * inputs.size(0)
            
            epoch_loss = running_loss / len(train_loader.dataset)
            training.append(epoch_loss)
            print(' Training: Epoch({}) - Loss: {:.4f}'.format(epoch+1, epoch_loss))
        
        #validation
            model.eval()
            vrunning_loss = 0.0
            num_samples = 0
        
            for batch in val_loader:
                data, labels = batch['x'].to(device), batch['y'].to(device) 
                optimizer.zero_grad()
                with torch.no_grad():
                    outputs = model(data)
                    loss = loss_fn(outputs, labels)
            
                vrunning_loss += loss.item() * data.size(0)
                num_samples += labels.size(0)
            # update epoch loss
            vepoch_loss = vrunning_loss/num_samples
            validation.append(vepoch_loss)
            print(' Validation({}) - Loss: {:.4f}'.format(epoch+1, vepoch_loss))
            
        
        # Check if Early Stopping
            if vepoch_loss > the_last_loss:
                trigger_times += 1
                if trigger_times >= patience:
                    print('Meet Early stopping!')
                    break
            else:
                trigger_times = 0
                the_last_loss = vepoch_loss
                torch.save(model.state_dict(), f'model_{fold}.pkl')
        ## Update global loss
        Val_Loss += vepoch_loss * num_samples
        N_Samples += num_samples
        models.append(model.to("cpu"))
        torch.save(model.state_dict(), f'model_{fold}.pkl')
        loss_dict[fold].extend([training,validation])
    return Val_Loss/N_Samples,loss_dict
train_model = True
%%time
if train_model:
    models = []
    model_loss,loss_dict = Train(config,models)
    print(model_loss)
    
else:
    models = []
    for i in range(len(splits)):
        mlp = MLP(config)
        mlp.load_state_dict(torch.load(f'./model_{i}.pkl',map_location=torch.device('cpu')))
        models.append(mlp)

Prediciton

def fit(model, x):
    x = torch.tensor(x, dtype=torch.float)
    model.eval()
    pred = model(x)
    return pred.detach().numpy()
X_train[1,:]
pred_vector = np.mean([fit(model, torch.tensor(X_train[0:1,:])) for model in models],axis=0)
pred_vector
import janestreet
env = janestreet.make_env()
env_iter = env.iter_test()
opt_th = 0.5
f_mean = np.mean(train[features[1:]].values,axis=0)

for (test_df, pred_df) in env_iter:
        
        if test_df['weight'].item() > 0:
            test_df = test_df.loc[:, features].values
            if np.isnan(test_df[:, 1:].sum()):
                test_df[:, 1:] = np.nan_to_num(test_df[:, 1:]) + np.isnan(test_df[:, 1:]) * f_mean

            pred_vector = np.mean([fit(model, test_df) for model in models],axis=0)
            pred = np.median(pred_vector)
            pred_df.action = (pred > opt_th).astype(int) 
            

        else:
            pred_df.action = 0
        env.predict(pred_df)
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值