FATE —— 二.4.3 使用冻结参数Bert进行情绪分类

在本例中,我们将使用Frozen Parameters Bert构造一个文本分类器,并在IMDB情感分类数据集上进行训练

数据集:IMDB情感

这是一个二进制分类数据集,您可以从这里下载我们的处理数据集:

并将其放在examples/data文件夹中(或者自己存放文件的地址,代码中需要用到)。

组织数据来自:

检查数据集

import pandas as pd
df = pd.read_csv('/mnt/hgfs/examples/data/IMDB.csv')  # 根据自己的文件位置进行调整
df

from federatedml.nn.dataset.nlp_tokenizer import TokenizerDataset
ds = TokenizerDataset(tokenizer_name_or_path="bert-base-uncased")
ds.load('/mnt/hgfs/examples/data/IMDB.csv')  # 根据自己的文件位置进行调整
from torch.utils.data import DataLoader

dl = DataLoader(ds, batch_size=16)
for i in dl:
    break

构建Bert分类器

from pipeline.component.nn import save_to_fate
%%save_to_fate model bert_.py

import torch as t
from federatedml.nn.model_zoo.pretrained_bert import PretrainedBert


class BertClassifier(t.nn.Module):
    
    def __init__(self, ):
        super(BertClassifier, self).__init__()
        self.bert = PretrainedBert(pretrained_model_name_or_path='bert-base-uncased', freeze_weight=True)
        self.classifier = t.nn.Sequential(
            t.nn.Linear(768, 128),
            t.nn.ReLU(),
            t.nn.Linear(128, 64),
            t.nn.ReLU(),
            t.nn.Linear(64, 1),
            t.nn.Sigmoid()
        )
        
    def parameters(self, ):
        return self.classifier.parameters()
        
    def forward(self, x):
        x = self.bert(x)
        return self.classifier(x.pooler_output)
model = BertClassifier()
import torch as t
from federatedml.nn.homo.trainer.fedavg_trainer import FedAVGTrainer

trainer = FedAVGTrainer(epochs=3, batch_size=16, shuffle=True, data_loader_worker=4)
trainer.local_mode()
trainer.set_model(model)
本地测试
opt = t.optim.Adam(model.parameters(), lr=0.005)
loss = t.nn.BCELoss()
# local test
trainer.train(ds, None, opt, loss)

提交pipeline

import torch as t
from torch import nn
from pipeline import fate_torch_hook
from pipeline.component import HomoNN
from pipeline.backend.pipeline import PipeLine
from pipeline.component import Reader, Evaluation, DataTransform
from pipeline.interface import Data, Model

fate_torch_hook(t)


import os
# fate_project_path = os.path.abspath('../../../../')
guest_0 = 10000
host_1 = 9999
pipeline = PipeLine().set_initiator(role='guest', party_id=guest_0).set_roles(guest=guest_0, host=host_1,
                                                                              arbiter=guest_0)
data_0 = {"name": "imdb", "namespace": "experiment"}
data_path = '/mnt/hgfs/examples/data/IMDB.csv'  # 根据自己的文件位置进行调整
pipeline.bind_table(name=data_0['name'], namespace=data_0['namespace'], path=data_path)
pipeline.bind_table(name=data_0['name'], namespace=data_0['namespace'], path=data_path)

{'namespace': 'experiment', 'table_name': 'imdb'}

reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest_0).component_param(table=data_0)
reader_0.get_party_instance(role='host', party_id=host_1).component_param(table=data_0)

reader_1 = Reader(name="reader_1")
reader_1.get_party_instance(role='guest', party_id=guest_0).component_param(table=data_0)
reader_1.get_party_instance(role='host', party_id=host_1).component_param(table=data_0)
如果虚拟机没有GPU,这里建议将cuda=True给删除。否则会报错
from pipeline.component.homo_nn import DatasetParam, TrainerParam  
model = t.nn.Sequential(
    t.nn.CustModel(module_name='bert_', class_name='BertClassifier')
)

# nn_component = HomoNN(name='nn_0',
#                       model=model, 
#                       loss=t.nn.BCELoss(),
#                       optimizer = t.optim.Adam(lr=0.001, weight_decay=0.001),
#                       dataset=DatasetParam(dataset_name='nlp_tokenizer', tokenizer_name_or_path="bert-base-uncased"),  # 使用自定义的dataset
#                       trainer=TrainerParam(trainer_name='fedavg_trainer', epochs=2, batch_size=16, data_loader_worker=8, cuda=True),
#                       torch_seed=100 
#                       )

nn_component = HomoNN(name='nn_0',
                      model=model, 
                      loss=t.nn.BCELoss(),
                      optimizer = t.optim.Adam(lr=0.001, weight_decay=0.001),
                      dataset=DatasetParam(dataset_name='nlp_tokenizer', tokenizer_name_or_path="bert-base-uncased"),  # 使用自定义的dataset
                      trainer=TrainerParam(trainer_name='fedavg_trainer', epochs=2, batch_size=16, data_loader_worker=8),
                      torch_seed=100 
                      )
这里把pipeline.add_component(reader_0)添加上,否则会报错
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(nn_component, data=Data(train_data=reader_0.output.data, validate_data=reader_1.output.data))
pipeline.add_component(Evaluation(name='eval_0', eval_type='binary'), data=Data(data=nn_component.output.data))
pipeline.compile()
pipeline.fit()

写入并保存

df = pipeline.get_component('nn_0').get_output_data()  # get result
df

import pandas as pd
df.to_csv('使用冻结参数Bert进行情绪分类.csv')
pipeline.get_component('nn_0').get_summary()

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值