前言
minio官方文档:https://docs.min.io/docs(注意不要看中文文档http://docs.minio.org.cn/docs,它和英文版的里面内容有些差别,英文版的更加全面)
minio server单机docker部署
开发环境:centos7
部署方式:docker(不要用win,坑好多)
minio的docker命令:
docker run \
-p 9001:9001 \
-p 9000:9000 \
--name minio1 \
-e "MINIO_ROOT_USER=admin" \
-e "MINIO_ROOT_PASSWORD=password" \
-v /home/docker/minio/data:/data minio/minio server /data \
--console-address ":9000" \
--address ":9001" \
这里要注意一些参数,像console-address、address 没带上的话,浏览器访问不了,尽管没带会随机生成端口,但是我用那个端口也是不行
minio 客户端mc部署
mc客户端的docker命令
docker pull minio/mc
docker run -it --name mc1 --entrypoint=/bin/sh minio/mc
docker run启动mc容器后可以crtl+C退出,然后再用docker start mc1来后台启动,这时候可以用docker exec 来进入mc容器,进入容器后进入/bin文件夹里面有个mc启动,这时候就用mc+命令来进行操作
要对上面的minio存储进行操作的话要进行下面的命令
mc config host add minio1 http://loaclhost:9001 admin password --api s3v4
minio断点续传后端代码
参考:https://blog.csdn.net/Tuine/article/details/113996848、https://blog.csdn.net/anxyh_name/article/details/108397774、https://blog.csdn.net/lmlm21/article/details/107768581
minio断点续传:minio后端的putObject其实已经是支持断点续传的操作了(Minio3本身不支持断点续传,而是利用了AWS S3协议来支持),其中有个stream(InputStream stream, long objectSize, long partSize)方法就是来做分流操作
因为底层是利用s3来实现分段,所以我们用S3Base抽象类的方法来实现,代码如下(注意每个分片大小要大于5MB,小于5GB,如果单个整文件小于5MB可以直接用MINIO的putObject方法来上传就没必要分片了)
/**
* minio文件分片信息
*/
public class MinioPart {
//文件md5标记
private String eTag;
//分片数量
private Integer partCount;
//分片生成时间
private Timestamp dateTime;
//分片的url
private String partUrl;
//分片的uploadid
private String uploadId;
//get和set方法省略
/**
* 自定义minio
*/
public class CustomMinioClient extends MinioClient {
public CustomMinioClient(MinioClient client) {
super(client);
}
public String initMultiPartUpload(String bucket, String region, String object, Multimap<String, String> headers, Multimap<String, String> extraQueryParams) throws IOException, InvalidKeyException, NoSuchAlgorithmException, InsufficientDataException, ServerException, InternalException, XmlParserException, InvalidResponseException, ErrorResponseException {
CreateMultipartUploadResponse response = this.createMultipartUpload(bucket, region, object, headers, extraQueryParams);
return response.result().uploadId();
}
public ObjectWriteResponse mergeMultipartUpload(String bucketName, String region, String objectName, String uploadId, Part[] parts, Multimap<String, String> extraHeaders, Multimap<String, String> extraQueryParams) throws IOException, InvalidKeyException, NoSuchAlgorithmException, InsufficientDataException, ServerException, InternalException, XmlParserException, InvalidResponseException, ErrorResponseException {
return this.completeMultipartUpload(bucketName, region, objectName, uploadId, parts, extraHeaders, extraQueryParams);
}
public ListPartsResponse listMultipart(String bucketName, String region, String objectName, Integer maxParts, Integer partNumberMarker, String uploadId, Multimap<String, String> extraHeaders, Multimap<String, String> extraQueryParams) throws NoSuchAlgorithmException, InsufficientDataException, IOException, InvalidKeyException, ServerException, XmlParserException, ErrorResponseException, InternalException, InvalidResponseException {
return this.listParts(bucketName, region, objectName, maxParts, partNumberMarker, uploadId, extraHeaders, extraQueryParams);
}
}
@Component
@ConditionalOnExpression("${minio.enabled}==true")
public class MinioFileSystem {
@Autowired
MinioHelp MinioHelp;
@Autowired
private MinioPartMapper minioPartMapper;
//创建minioclient的方法
MinioClient getMinioClient()
{
return MinioHelp.getMinioClient();
}
/**
* 通过数据库判断minio中是否已经存在分流的临时文件
*/
public Map<String, Object> isExist(String dir, String filename,int partCount,String etag){
Map<String,Object> result=new HashMap<>();
try {
MinioPart minioPart = minioPartMapper.selectByEtag(etag);
if (minioPart==null){ //没有上传的分片,新建分片url并把信息存到数据库
return append(dir, filename,partCount,etag);
}else if(minioPart.getDateTime().before(new Timestamp(System.currentTimeMillis()-86400000*5))){//设定时间为5天,分片url已经失效,删除无用的分片信息
minioPartMapper.deleteByEtag(etag);
// minioPartMapper.deleteOverdue(); //删除过期信息,后面可以写成定时器的形式
return append(dir, filename,partCount,etag);
}else { //已经有分片,将分片信息和已经上传的分片序号传给前端
MinioClient minioClient=getMinioClient();
CustomMinioClient customMinioClient=new CustomMinioClient(minioClient);
String uploadId=minioPart.getUploadId();
//查询minio中已经上传的分片
ListPartsResponse partResult = customMinioClient.listMultipart(MinioHelp.rootPath, null, filename, 1000, 0, uploadId, null, null);
//数据处理
List<Integer> partNumber=new ArrayList(partCount);
for (int i=0;i<partCount;i++){
partNumber.add(i+1);
}
List<Integer> hasPart=new ArrayList<>(partCount);
for (Part part:partResult.result().partList()){
hasPart.add(part.partNumber());
}
if (partResult.result().partList().size()>0){
partNumber.removeAll(hasPart);
}
List<String> partList = new ArrayList<>();
String[] partUrls=minioPart.getPartUrl().substring(1,minioPart.getPartUrl().length()-1).split(", ");
for (Integer partId:partNumber){
partList.add(partUrls[partId-1]);
}
result.put("uploadId", uploadId); //minio临时文件id
result.put("uploadUrls",partList); //还未上传的url
result.put("isExist",true); //true已经存在分片
result.put("partCount",partCount); //之前定好的分片数量
result.put("partNumber",partNumber); //未上传过的分片
return result;
}
} catch (Exception e) {
e.printStackTrace();
}
return result;
}
//断点续传,最小分片要大于5MB,最后一片分片可以小于5MB
// @Override
public Map<String, Object> append(String dir, String filename,int partCount,String etag) throws Exception {
Map<String, Object> result = new HashMap<>();
if (partCount>1000){
result.put("error","分片数量不能大于1000");
}else {
//生成分片url
MinioClient minioClient=getMinioClient();
String uploadId = new CustomMinioClient(minioClient).initMultiPartUpload(MinioHelp.rootPath, null, filename, null, null);
result.put("uploadId", uploadId); //minio临时文件id
List<String> partList = new ArrayList<>();
Map<String, String> reqParams = new HashMap<>();
reqParams.put("uploadId", uploadId);
for (int i = 1; i <= partCount; i++) {
reqParams.put("partNumber", String.valueOf(i));
String uploadUrl = minioClient.getPresignedObjectUrl(
GetPresignedObjectUrlArgs.builder()
.method(Method.PUT)
.bucket(MinioHelp.rootPath)
.object(filename)
.expiry(7, TimeUnit.DAYS)
.extraQueryParams(reqParams)
.build());
partList.add(uploadUrl);
}
result.put("uploadUrls", partList); //还未上传的url
result.put("isExist",false); //false不存在分片
result.put("partCount",partCount); //定好的分片数量
result.put("partNumber",null); //未上传过的分片
//将分片信息存到数据库
MinioPart minioPart=new MinioPart();
minioPart.seteTag(etag);
minioPart.setPartCount(partCount);
minioPart.setDateTime(new Timestamp(System.currentTimeMillis()));
minioPart.setUploadId(uploadId);
minioPart.setPartUrl(partList.toString());
minioPartMapper.insertSelective(minioPart);
}
return result;
}
/**
* 分片上传完后合并,分片要大于5MB,最后一片分片可以小于5MB
*/
@Transactional(rollbackFor=Exception.class)
public boolean mergeMultipartUpload(String dir, String filename, String uploadId,String etag) {
try {
MinioClient minioClient=getMinioClient();
CustomMinioClient customMinioClient=new CustomMinioClient(minioClient);
//目前仅做了最大1000分片
Part[] parts = new Part[1000];
ListPartsResponse partResult = customMinioClient.listMultipart(MinioHelp.rootPath, null, filename, 1000, 0, uploadId, null, null);
int partNumber = 1;
for (Part part : partResult.result().partList()) {
parts[partNumber - 1] = new Part(partNumber, part.etag());
partNumber++;
}
customMinioClient.mergeMultipartUpload(MinioHelp.rootPath, null, filename, uploadId, parts, null, null);
//合并完成,删除数据库中分片信息
minioPartMapper.deleteByEtag(etag);
} catch (Exception e) {
e.printStackTrace();
return false;
}
return true;
}
@Component
public class MinioHelp {
private static final Logger logger = LoggerFactory.getLogger(MinioHelp.class);
static MinioClient sminioClient = null;
static Object lock = new Object();
@Value("${minio.root:dci}")
String rootPath;
// @Value("${minio.minioUrl:http://localhost:9001}")
@Value("${minio.minioUrl:http://192.168.56.200:9001}")
String minioUrl;
@Value("${minio.accessKey:admin}")
String accessKey;
@Value("${minio.secretKey:password}")
String secretKey;
@Value("${minio.tmpdir:temp}")
String tmpDir;
public MinioClient getMinioClient()
{
if(sminioClient==null)
{
synchronized (lock)
{ if(sminioClient==null) {
init();
}
}
}
return sminioClient;
}
public String DownloadTmpFile(String souPath)
{
String desPath = tmpDir+ File.separator+UUID.randomUUID().toString();
try {
this.getMinioClient().downloadObject(
DownloadObjectArgs.builder()
.bucket(this.rootPath)
.object(souPath)
.filename(desPath)
.build());
} catch (Exception e) {
logger.error(e.getMessage(),e);
throw new RuntimeException(e);
}
return desPath;
}
void init ()
{
// 使用MinIO服务的URL,端口,Access key和Secret key创建一个MinioClient对象
try {
// MinioClient minioClient = new MinioClient("http://localhost:9001", "admin", "password");
MinioClient minioClient =
MinioClient.builder()
.endpoint(minioUrl)
.credentials(accessKey, secretKey)
.build();
// 检查存储桶是否已经存在
boolean isExist = minioClient.bucketExists(BucketExistsArgs.builder().bucket(rootPath).build());
if(isExist) {
System.out.println(rootPath+" already exists.");
} else {
// 创建一个名为asiatrip的存储桶,用于存储照片的zip文件。
minioClient.makeBucket(MakeBucketArgs.builder().bucket(rootPath).build());
}
sminioClient = minioClient;
} catch (Exception e) {
logger.error(e.getMessage(),e);
throw new RuntimeException(e);
}
}
}
@RestController()
public class MinioController {
@Autowired
private MinioFileSystem minioFileSystem;
/**
* MINIO断点续传
* @param dir 文件路径
* @param filename 文件名称
* @param partCount 分片数量
* @param etag 前端计算文件md5
* @return
*/
@RequestMapping(value = "/attach/stream/segmentUpload.do")
public HttpResponseEntity segmentUpload(String dir, String filename,int partCount,String etag){
try {
Map results=minioFileSystem.isExist(dir,filename,partCount,etag);
return HttpResponseEntity.build(results);
} catch (Exception e) {
e.printStackTrace();
}
return HttpResponseEntity.build("ERROR");
}
/**
* 分片上传完成进行合并
* @param dir 文件路径
* @param filename 文件名
* @param uploadid 分片id
* @param etag 前端计算文件md5
* @return
*/
@RequestMapping(value = "/attach/stream/mergePart.do")
public HttpResponseEntity mergePart(String dir, String filename,String uploadid,String etag){
try {
Boolean results=minioFileSystem.mergeMultipartUpload(dir,filename,uploadid,etag);
if (results==true)
return HttpResponseEntity.build(results);
else
return HttpResponseEntity.build(null, "Failed to merge file fragments Procedure", HttpStatus.INTERNAL_SERVER_ERROR);
} catch (Exception e) {
e.printStackTrace();
}
return HttpResponseEntity.build("ERROR");
}
}
oracle数据库
create table MINIO_PART
(
ETAG VARCHAR2(255) not null
constraint MINIO_PART_PK
primary key,
PARTCOUNT NUMBER,
DATETIME TIMESTAMP(6),
PARTURL CLOB,
UPLOADID VARCHAR2(255)
)
代码可能有点乱,具体可以看上面的3个参考资料,然后试着自己做一下
minio分布式部署
尝试minio分布式部署
参考:https://blog.csdn.net/weixin_49385823/article/details/119064077
1、先在linux宿主机中创建4*4个文件夹,相当于4个节点,每个节点中有4个硬盘
cd /home/docker/minio
mkdir data-1-1 data-1-2 data-1-3 data-1-4 data-2-1 data-2-2 data-2-3 data-2-4 data-3-1 data-3-2 data-3-3 data-3-4 data-4-1 data-4-2 data-4-3 data-4-4
2、利用docker compose来创建容器(一开始我还打算直接手动创建4个容器,然后修改启动项command,但是发现好像修改不了,无奈只能老老实实选择compose的方法),现在docker部署方式有几种,第一种是compose主要用在同个虚拟机或者宿主机上,第二种是swarm主要用在多个宿主机或物理机的情况,第三种是kubernetes和swarm是一样的。
因为我只用一台虚拟机,所以用compose,思路就是compose创建4个容器并把它们关联起来,然后用nginx来做一个代理
准备docker-compose.yaml和nginx.conf (点击获取按照自己需求修改,注意端口号)
version: '3.7'
# Settings and configurations that are common for all containers
x-minio-common: &minio-common
image: minio/minio
command: server --console-address ":9000" --address ":9001" http://minio-{1...4}/data{1...4}
expose:
- "9000"
- "9001"
environment:
MINIO_ROOT_USER: admin
MINIO_ROOT_PASSWORD: password
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
# starts 4 docker containers running minio server instances.
# using nginx reverse proxy, load balancing, you can access
# it through port 9000.
services:
minio-1:
<<: *minio-common
hostname: minio-1
container_name: minio-1
restart: always
volumes:
- /home/docker/minio/data-1-1:/data1
- /home/docker/minio/data-1-2:/data2
- /home/docker/minio/data-1-3:/data3
- /home/docker/minio/data-1-4:/data4
minio-2:
<<: *minio-common
hostname: minio-2
container_name: minio-2
restart: always
volumes:
- /home/docker/minio/data-2-1:/data1
- /home/docker/minio/data-2-2:/data2
- /home/docker/minio/data-2-3:/data3
- /home/docker/minio/data-2-4:/data4
minio-3:
<<: *minio-common
hostname: minio-3
container_name: minio-3
restart: always
volumes:
- /home/docker/minio/data-3-1:/data1
- /home/docker/minio/data-3-2:/data2
- /home/docker/minio/data-3-3:/data3
- /home/docker/minio/data-3-4:/data4
minio-4:
<<: *minio-common
hostname: minio-4
container_name: minio-4
restart: always
volumes:
- /home/docker/minio/data-4-1:/data1
- /home/docker/minio/data-4-2:/data2
- /home/docker/minio/data-4-3:/data3
- /home/docker/minio/data-4-4:/data4
nginx:
image: nginx:1.19.2-alpine
hostname: nginx
container_name: nginx
restart: always
volumes:
- /home/docker/nginx/nginx.conf:/etc/nginx/nginx.conf:ro
ports:
- "90:90"
- "91:91"
depends_on:
- minio-1
- minio-2
- minio-3
- minio-4
## By default this config uses default local driver,
## For custom volumes replace with volume driver configuration.
volumes:
data-1-1:
data-1-2:
data-1-3:
data-1-4:
data-2-1:
data-2-2:
data-2-3:
data-2-4:
data-3-1:
data-3-2:
data-3-3:
data-3-4:
data-4-1:
data-4-2:
data-4-3:
data-4-4:
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 4096;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
keepalive_timeout 65;
# include /etc/nginx/conf.d/*.conf;
upstream minio {
server minio-1:9001;
server minio-2:9001;
server minio-3:9001;
server minio-4:9001;
}
upstream console {
ip_hash;
server minio-1:9000;
server minio-2:9000;
server minio-3:9000;
server minio-4:9000;
}
server {
listen 90;
listen [::]:90;
server_name localhost;
# To allow special characters in headers
ignore_invalid_headers off;
# Allow any size file to be uploaded.
# Set to a value such as 1000m; to restrict file size to a specific value
client_max_body_size 0;
# To disable buffering
proxy_buffering off;
location / {
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_connect_timeout 300;
# Default is HTTP/1, keepalive is only enabled in HTTP/1.1
proxy_http_version 1.1;
proxy_set_header Connection "";
chunked_transfer_encoding off;
proxy_pass http://console;
}
}
server {
listen 91;
listen [::]:91;
server_name localhost;
# To allow special characters in headers
ignore_invalid_headers off;
# Allow any size file to be uploaded.
# Set to a value such as 1000m; to restrict file size to a specific value
client_max_body_size 0;
# To disable buffering
proxy_buffering off;
location / {
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-NginX-Proxy true;
# This is necessary to pass the correct IP to be hashed
real_ip_header X-Real-IP;
proxy_connect_timeout 300;
# Default is HTTP/1, keepalive is only enabled in HTTP/1.1
proxy_http_version 1.1;
proxy_set_header Connection "";
chunked_transfer_encoding off;
proxy_pass http://minio;
}
}
}
进入到docker-compose.yaml的文件夹,启用docker-compose
#拉去镜像
docker-compose pull
#创建并启动容器
docker-compose up
经过以上操作后minio分布式部署完成,如果以我上面的配置访问虚拟机的ip:90就可以看到minio的登录界面,然后创建一个桶,然后往这个桶存文件,你会发现你的虚拟机里面,那些映射文件(data-1-1、data-1-2、data-1-3…)里面都多了一个文件,名称即为你刚刚创建的桶的名称,然后进入到每个桶会发现里面多个一个你刚刚上传的文件名称一样的文件夹,进入后会看到一个碎片文件,因为整个文件被分成多个片存到不同的存储空间中