C++ --> Python
c++保存
#include <torch/script.h>
#include <iostream>
#include <memory>
int main() {
auto x = torch::ones({3, 3});
auto bytes = torch::jit::pickle_save(x);
std::ofstream fout("x.zip", std::ios::out | std::ios::binary);
fout.write(bytes.data(), bytes.size());
fout.close();
return 0;
}
python加载
import torch
torch.load("x.zip")
Python --> C++
python保存
import io
import torch
def save_tensor(device):
my_tensor = torch.rand(3, 3).to(device);
print("[python] my_tensor: ", my_tensor)
f = io.BytesIO()
torch.save(my_tensor, f, _use_new_zipfile_serialization=True)
with open('my_tensor_%s.pt' % device, "wb") as out_f:
# Copy the BytesIO stream to the output file
out_f.write(f.getbuffer())
if __name__ == '__main__':
save_tensor('cpu')
c++加载
#include <iostream>
#include <torch/torch.h>
std::vector<char> get_the_bytes(std::string filename) {
std::ifstream input(filename, std::ios::binary);
std::vector<char> bytes(
(std::istreambuf_iterator<char>(input)),
(std::istreambuf_iterator<char>()));
input.close();
return bytes;
}
int main()
{
std::vector<char> f = get_the_bytes("my_tensor_cpu.pt");
torch::IValue x = torch::pickle_load(f);
torch::Tensor my_tensor = x.toTensor();
std::cout << "[cpp] my_tensor: " << my_tensor << std::endl;
return 0;
}