MapReduce来构建索引

单机程序使用使用hadoop的构建lucene索引,本篇呢,我们里看下如何使用MapReduce来构建索引,代码如下:

Java代码 复制代码  收藏代码
  1. package com.mapreduceindex;  
  2.   
  3. import java.io.IOException;  
  4. import java.util.ArrayList;  
  5. import java.util.List;  
  6. import java.util.Random;  
  7.   
  8. import org.apache.commons.io.output.NullWriter;  
  9. import org.apache.hadoop.conf.Configuration;  
  10. import org.apache.hadoop.fs.FileSystem;  
  11. import org.apache.hadoop.fs.Path;  
  12. import org.apache.hadoop.io.IntWritable;  
  13. import org.apache.hadoop.io.LongWritable;  
  14. import org.apache.hadoop.io.NullWritable;  
  15. import org.apache.hadoop.io.Text;  
  16. import org.apache.hadoop.mapreduce.Job;  
  17. import org.apache.hadoop.mapreduce.Mapper;  
  18. import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;  
  19. import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;  
  20. import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;  
  21. import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;  
  22. import org.apache.lucene.analysis.Analyzer;  
  23. import org.apache.lucene.document.Document;  
  24. import org.apache.lucene.document.Field.Store;  
  25. import org.apache.lucene.document.StringField;  
  26. import org.apache.lucene.document.TextField;  
  27. import org.apache.lucene.index.IndexWriter;  
  28. import org.apache.lucene.index.IndexWriterConfig;  
  29. import org.apache.lucene.util.Version;  
  30. import org.apache.solr.store.hdfs.HdfsDirectory;  
  31. import org.mortbay.log.Log;  
  32. import org.wltea.analyzer.lucene.IKAnalyzer;  
  33.   
  34. import com.qin.wordcount.MyWordCount;  
  35.   
  36. /** 
  37.  *  
  38.  * 使用MapReduce构建索引 
  39.  * @author qindongliang 
  40.  * 大数据技术交流群: 376932160 
  41.  *  搜索技术一号群:  324714439 
  42.  *  搜索技术一号群:  206247899 
  43.  * Hadoop版本2.2.0 
  44.  * Lucene版本4.8.0 
  45.  *   Solr版本4.8.0 
  46.  *  
  47.  * **/  
  48. public class BuildIndexMapReduce {  
  49.   
  50.     /** 
  51.      * 获取一个IndexWriter 
  52.      * @param outDir 索引的输出目录 
  53.      * @return IndexWriter 获取一个IndexWriter 
  54.      * */  
  55.     public static IndexWriter  getIndexWriter(String outDir) throws Exception{  
  56.         Analyzer  analyzer=new IKAnalyzer(true);//IK分词  
  57.         IndexWriterConfig    config=new IndexWriterConfig(Version.LUCENE_48, analyzer);  
  58.         Configuration conf=new Configuration();  
  59.         conf.set("fs.defaultFS","hdfs://192.168.46.32:9000/");//HDFS目录  
  60.         Path path=new Path("hdfs://192.168.46.32:9000/qin/"+outDir);//索引目录  
  61.         HdfsDirectory directory=new HdfsDirectory(path, conf);  
  62.         long heapSize = Runtime.getRuntime().totalMemory()/ 1024L / 1024L;//总内存  
  63.         long heapMaxSize = Runtime.getRuntime().maxMemory()/ 1024L / 1024L;//使用的最大内存  
  64.         config.setRAMBufferSizeMB(((heapMaxSize-heapSize)*0.7));//空闲内存的70%作为合并因子  
  65.         IndexWriter writer=new IndexWriter(directory, config);//  
  66.         return writer;  
  67.           
  68.     }  
  69.       
  70.     /** 
  71.      * 索引的工具类 
  72.      *  
  73.      * **/  
  74.     public static class LuceneDocumentUtil{  
  75.         public static Document getDoc(String filed,String value){  
  76.                 Document d=new Document();  
  77.                 //模拟载入schemal文件,根据solr的scheml文件来灵活的坐一些索引,  
  78.                 d.add(new TextField("content", value, Store.YES));  
  79.             return d;  
  80.         }  
  81.           
  82.     }  
  83.     /** 
  84.      * @author qindongliang 
  85.      * 
  86.      */  
  87.     private static class BuildIndexMapper extends Mapper<LongWritable, Text, NullWritable, NullWritable> {  
  88.           
  89.         IndexWriter iw;  
  90.         List<Document> documenst=new ArrayList<>();  
  91.           
  92.           
  93.     @Override  
  94.     protected void setup(Context context)throws IOException, InterruptedException {  
  95.         Random rd=new Random();  
  96.         int i=rd.nextInt(99999999);//此处的索引目录名可以使用UUID来使它唯一  
  97.         try{  
  98.         iw=getIndexWriter(i+"");//初始化IndexWriter  
  99.         }catch(Exception e){  
  100.             e.printStackTrace();  
  101.         }  
  102.           
  103.           
  104.        
  105.     }  
  106.           
  107.       
  108.     @Override  
  109.         protected void map(LongWritable key, Text value,Context context)  
  110.                 throws IOException, InterruptedException {  
  111.         Log.info("  记录的日志信息: "+value.toString());  
  112.         String values[]=value.toString().split("\1");//此处读入被索引的文件每一行  
  113.         String fieldName=values[0];  
  114.         String fieldValue=values[1];  
  115.         Document d=LuceneDocumentUtil.getDoc(fieldName, fieldValue);  
  116.         if(d==null){  
  117.             return;  
  118.         }  
  119.         documenst.add(d);  
  120.         if(documenst.size()>5000){//使用批处理提交  
  121.             iw.addDocuments(documenst);  
  122.             documenst.clear();  
  123.         }  
  124.           
  125.         // context.write(null, null);  
  126.         }  
  127.     /*** 
  128.      * 在Map结束时,做一些事,提交索引 
  129.      *  
  130.      * */  
  131.         @Override  
  132.         protected void cleanup(Context context)throws IOException, InterruptedException {  
  133.             if(documenst.size()>0){  
  134.                 iw.addDocuments(documenst);  
  135.             }  
  136.             if(iw!=null){  
  137.             iw.close(true);//关闭至合并完成  
  138.             }  
  139.               
  140.         }  
  141.     }  
  142. public static void main(String[] args)throws Exception {  
  143.       
  144.     Configuration conf=new Configuration();  
  145.       
  146.     conf.set("mapreduce.job.jar""myjob.jar");  
  147.     conf.set("fs.defaultFS","hdfs://192.168.46.32:9000");  
  148.     conf.set("mapreduce.framework.name""yarn");    
  149.     conf.set("yarn.resourcemanager.address""192.168.46.32:8032");   
  150.     /**Job任务**/  
  151.    //Job job=new Job(conf, "testwordcount");//废弃此API  
  152.    Job job=Job.getInstance(conf, "build index ");  
  153.     job.setJarByClass(BuildIndexMapReduce.class);  
  154.     System.out.println("模式:  "+conf.get("yarn.resourcemanager.address"));;  
  155.     // job.setCombinerClass(PCombine.class);  
  156.      job.setNumReduceTasks(0);//设置为3  
  157.      job.setMapperClass(BuildIndexMapper.class);  
  158.      job.setInputFormatClass(TextInputFormat.class);  
  159.      job.setOutputFormatClass(TextOutputFormat.class);  
  160.   
  161.    
  162.       
  163.      job.setMapOutputKeyClass(NullWritable.class);  
  164.      job.setMapOutputValueClass(NullWritable.class);  
  165.    
  166.   
  167.         String path="hdfs://192.168.46.32:9000/qin/output";  
  168.         FileSystem fs=FileSystem.get(conf);  
  169.         Path p=new Path(path);  
  170.         if(fs.exists(p)){  
  171.             fs.delete(p, true);  
  172.             System.out.println("输出路径存在,已删除!");  
  173.         }  
  174.     FileInputFormat.setInputPaths(job, "hdfs://192.168.46.32:9000/qin/indexinput");  
  175.     FileOutputFormat.setOutputPath(job,p );  
  176.     System.exit(job.waitForCompletion(true) ? 0 : 1);    
  177. }  
  178.   
  179.       
  180.       
  181.       
  182. }  
package com.mapreduceindex;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;

import org.apache.commons.io.output.NullWriter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.util.Version;
import org.apache.solr.store.hdfs.HdfsDirectory;
import org.mortbay.log.Log;
import org.wltea.analyzer.lucene.IKAnalyzer;

import com.qin.wordcount.MyWordCount;

/**
 * 
 * 使用MapReduce构建索引
 * @author qindongliang
 * 大数据技术交流群: 376932160
 *  搜索技术一号群:  324714439
 *  搜索技术一号群:  206247899
 * Hadoop版本2.2.0
 * Lucene版本4.8.0
 *   Solr版本4.8.0
 * 
 * **/
public class BuildIndexMapReduce {

	/**
	 * 获取一个IndexWriter
	 * @param outDir 索引的输出目录
	 * @return IndexWriter 获取一个IndexWriter
	 * */
	public static IndexWriter  getIndexWriter(String outDir) throws Exception{
		Analyzer  analyzer=new IKAnalyzer(true);//IK分词
 		IndexWriterConfig    config=new IndexWriterConfig(Version.LUCENE_48, analyzer);
 		Configuration conf=new Configuration();
 		conf.set("fs.defaultFS","hdfs://192.168.46.32:9000/");//HDFS目录
 		Path path=new Path("hdfs://192.168.46.32:9000/qin/"+outDir);//索引目录
 		HdfsDirectory directory=new HdfsDirectory(path, conf);
 		long heapSize = Runtime.getRuntime().totalMemory()/ 1024L / 1024L;//总内存
		long heapMaxSize = Runtime.getRuntime().maxMemory()/ 1024L / 1024L;//使用的最大内存
		config.setRAMBufferSizeMB(((heapMaxSize-heapSize)*0.7));//空闲内存的70%作为合并因子
 		IndexWriter writer=new IndexWriter(directory, config);//
 		return writer;
		
	}
	
	/**
	 * 索引的工具类
	 * 
	 * **/
	public static class LuceneDocumentUtil{
		public static Document getDoc(String filed,String value){
			    Document d=new Document();
			    //模拟载入schemal文件,根据solr的scheml文件来灵活的坐一些索引,
				d.add(new TextField("content", value, Store.YES));
			return d;
		}
		
	}
	/**
	 * @author qindongliang
	 *
	 */
	private static class BuildIndexMapper extends Mapper<LongWritable, Text, NullWritable, NullWritable> {
		
		IndexWriter iw;
		List<Document> documenst=new ArrayList<>();
		
		
	@Override
	protected void setup(Context context)throws IOException, InterruptedException {
	    Random rd=new Random();
		int i=rd.nextInt(99999999);//此处的索引目录名可以使用UUID来使它唯一
		try{
		iw=getIndexWriter(i+"");//初始化IndexWriter
		}catch(Exception e){
			e.printStackTrace();
		}
		
		
	 
	}
		
	
	@Override
		protected void map(LongWritable key, Text value,Context context)
				throws IOException, InterruptedException {
		Log.info("  记录的日志信息: "+value.toString());
		String values[]=value.toString().split("\1");//此处读入被索引的文件每一行
		String fieldName=values[0];
		String fieldValue=values[1];
		Document d=LuceneDocumentUtil.getDoc(fieldName, fieldValue);
		if(d==null){
			return;
		}
		documenst.add(d);
		if(documenst.size()>5000){//使用批处理提交
			iw.addDocuments(documenst);
			documenst.clear();
		}
		
		// context.write(null, null);
		}
	/***
	 * 在Map结束时,做一些事,提交索引
	 * 
	 * */
		@Override
		protected void cleanup(Context context)throws IOException, InterruptedException {
			if(documenst.size()>0){
				iw.addDocuments(documenst);
			}
			if(iw!=null){
			iw.close(true);//关闭至合并完成
			}
			
		}
	}
public static void main(String[] args)throws Exception {
	
	Configuration conf=new Configuration();
	
    conf.set("mapreduce.job.jar", "myjob.jar");
	conf.set("fs.defaultFS","hdfs://192.168.46.32:9000");
	conf.set("mapreduce.framework.name", "yarn");  
	conf.set("yarn.resourcemanager.address", "192.168.46.32:8032"); 
	/**Job任务**/
   //Job job=new Job(conf, "testwordcount");//废弃此API
   Job job=Job.getInstance(conf, "build index ");
	job.setJarByClass(BuildIndexMapReduce.class);
 	System.out.println("模式:  "+conf.get("yarn.resourcemanager.address"));;
	// job.setCombinerClass(PCombine.class);
	 job.setNumReduceTasks(0);//设置为3
	 job.setMapperClass(BuildIndexMapper.class);
	 job.setInputFormatClass(TextInputFormat.class);
	 job.setOutputFormatClass(TextOutputFormat.class);

 
	
	 job.setMapOutputKeyClass(NullWritable.class);
	 job.setMapOutputValueClass(NullWritable.class);
 

		String path="hdfs://192.168.46.32:9000/qin/output";
		FileSystem fs=FileSystem.get(conf);
		Path p=new Path(path);
		if(fs.exists(p)){
			fs.delete(p, true);
			System.out.println("输出路径存在,已删除!");
		}
	FileInputFormat.setInputPaths(job, "hdfs://192.168.46.32:9000/qin/indexinput");
	FileOutputFormat.setOutputPath(job,p );
	System.exit(job.waitForCompletion(true) ? 0 : 1);  
}

	
	
	
}


控制台生成的信息如下:

Java代码 复制代码  收藏代码
  1. 模式:  192.168.46.32:8032  
  2. INFO - RMProxy.createRMProxy(56) | Connecting to ResourceManager at /192.168.46.32:8032  
  3. WARN - JobSubmitter.copyAndConfigureFiles(149) | Hadoop command-line option parsing not performed. Implement the Tool interface and execute your application with ToolRunner to remedy this.  
  4. INFO - FileInputFormat.listStatus(287) | Total input paths to process : 3  
  5. INFO - JobSubmitter.submitJobInternal(394) | number of splits:3  
  6. INFO - Configuration.warnOnceIfDeprecated(840) | user.name is deprecated. Instead, use mapreduce.job.user.name  
  7. INFO - Configuration.warnOnceIfDeprecated(840) | mapred.jar is deprecated. Instead, use mapreduce.job.jar  
  8. INFO - Configuration.warnOnceIfDeprecated(840) | fs.default.name is deprecated. Instead, use fs.defaultFS  
  9. INFO - Configuration.warnOnceIfDeprecated(840) | mapred.reduce.tasks is deprecated. Instead, use mapreduce.job.reduces  
  10. INFO - Configuration.warnOnceIfDeprecated(840) | mapred.mapoutput.value.class is deprecated. Instead, use mapreduce.map.output.value.class  
  11. INFO - Configuration.warnOnceIfDeprecated(840) | mapreduce.map.class is deprecated. Instead, use mapreduce.job.map.class  
  12. INFO - Configuration.warnOnceIfDeprecated(840) | mapred.job.name is deprecated. Instead, use mapreduce.job.name  
  13. INFO - Configuration.warnOnceIfDeprecated(840) | mapreduce.inputformat.class is deprecated. Instead, use mapreduce.job.inputformat.class  
  14. INFO - Configuration.warnOnceIfDeprecated(840) | mapred.input.dir is deprecated. Instead, use mapreduce.input.fileinputformat.inputdir  
  15. INFO - Configuration.warnOnceIfDeprecated(840) | mapred.output.dir is deprecated. Instead, use mapreduce.output.fileoutputformat.outputdir  
  16. INFO - Configuration.warnOnceIfDeprecated(840) | mapreduce.outputformat.class is deprecated. Instead, use mapreduce.job.outputformat.class  
  17. INFO - Configuration.warnOnceIfDeprecated(840) | mapred.map.tasks is deprecated. Instead, use mapreduce.job.maps  
  18. INFO - Configuration.warnOnceIfDeprecated(840) | mapred.mapoutput.key.class is deprecated. Instead, use mapreduce.map.output.key.class  
  19. INFO - Configuration.warnOnceIfDeprecated(840) | mapred.working.dir is deprecated. Instead, use mapreduce.job.working.dir  
  20. INFO - JobSubmitter.printTokens(477) | Submitting tokens for job: job_1407866786826_0001  
  21. INFO - YarnClientImpl.submitApplication(174) | Submitted application application_1407866786826_0001 to ResourceManager at /192.168.46.32:8032  
  22. INFO - Job.submit(1272) | The url to track the job: http://h1:8088/proxy/application_1407866786826_0001/  
  23. INFO - Job.monitorAndPrintJob(1317) | Running job: job_1407866786826_0001  
  24. INFO - Job.monitorAndPrintJob(1338) | Job job_1407866786826_0001 running in uber mode : false  
  25. INFO - Job.monitorAndPrintJob(1345) |  map 0% reduce 0%  
  26. INFO - Job.monitorAndPrintJob(1345) |  map 33% reduce 0%  
  27. INFO - Job.monitorAndPrintJob(1345) |  map 100% reduce 0%  
  28. INFO - Job.monitorAndPrintJob(1356) | Job job_1407866786826_0001 completed successfully  
  29. INFO - Job.monitorAndPrintJob(1363) | Counters: 27  
  30.     File System Counters  
  31.         FILE: Number of bytes read=0  
  32.         FILE: Number of bytes written=238179  
  33.         FILE: Number of read operations=0  
  34.         FILE: Number of large read operations=0  
  35.         FILE: Number of write operations=0  
  36.         HDFS: Number of bytes read=67091  
  37.         HDFS: Number of bytes written=9708  
  38.         HDFS: Number of read operations=147  
  39.         HDFS: Number of large read operations=0  
  40.         HDFS: Number of write operations=75  
  41.     Job Counters   
  42.         Launched map tasks=3  
  43.         Data-local map tasks=3  
  44.         Total time spent by all maps in occupied slots (ms)=81736  
  45.         Total time spent by all reduces in occupied slots (ms)=0  
  46.     Map-Reduce Framework  
  47.         Map input records=166  
  48.         Map output records=0  
  49.         Input split bytes=326  
  50.         Spilled Records=0  
  51.         Failed Shuffles=0  
  52.         Merged Map outputs=0  
  53.         GC time elapsed (ms)=11308  
  54.         CPU time spent (ms)=9200  
  55.         Physical memory (bytes) snapshot=469209088  
  56.         Virtual memory (bytes) snapshot=2544439296  
  57.         Total committed heap usage (bytes)=245399552  
  58.     File Input Format Counters   
  59.         Bytes Read=62970  
  60.     File Output Format Counters   
  61.         Bytes Written=0  
模式:  192.168.46.32:8032
INFO - RMProxy.createRMProxy(56) | Connecting to ResourceManager at /192.168.46.32:8032
WARN - JobSubmitter.copyAndConfigureFiles(149) | Hadoop command-line option parsing not performed. Implement the Tool interface and execute your application with ToolRunner to remedy this.
INFO - FileInputFormat.listStatus(287) | Total input paths to process : 3
INFO - JobSubmitter.submitJobInternal(394) | number of splits:3
INFO - Configuration.warnOnceIfDeprecated(840) | user.name is deprecated. Instead, use mapreduce.job.user.name
INFO - Configuration.warnOnceIfDeprecated(840) | mapred.jar is deprecated. Instead, use mapreduce.job.jar
INFO - Configuration.warnOnceIfDeprecated(840) | fs.default.name is deprecated. Instead, use fs.defaultFS
INFO - Configuration.warnOnceIfDeprecated(840) | mapred.reduce.tasks is deprecated. Instead, use mapreduce.job.reduces
INFO - Configuration.warnOnceIfDeprecated(840) | mapred.mapoutput.value.class is deprecated. Instead, use mapreduce.map.output.value.class
INFO - Configuration.warnOnceIfDeprecated(840) | mapreduce.map.class is deprecated. Instead, use mapreduce.job.map.class
INFO - Configuration.warnOnceIfDeprecated(840) | mapred.job.name is deprecated. Instead, use mapreduce.job.name
INFO - Configuration.warnOnceIfDeprecated(840) | mapreduce.inputformat.class is deprecated. Instead, use mapreduce.job.inputformat.class
INFO - Configuration.warnOnceIfDeprecated(840) | mapred.input.dir is deprecated. Instead, use mapreduce.input.fileinputformat.inputdir
INFO - Configuration.warnOnceIfDeprecated(840) | mapred.output.dir is deprecated. Instead, use mapreduce.output.fileoutputformat.outputdir
INFO - Configuration.warnOnceIfDeprecated(840) | mapreduce.outputformat.class is deprecated. Instead, use mapreduce.job.outputformat.class
INFO - Configuration.warnOnceIfDeprecated(840) | mapred.map.tasks is deprecated. Instead, use mapreduce.job.maps
INFO - Configuration.warnOnceIfDeprecated(840) | mapred.mapoutput.key.class is deprecated. Instead, use mapreduce.map.output.key.class
INFO - Configuration.warnOnceIfDeprecated(840) | mapred.working.dir is deprecated. Instead, use mapreduce.job.working.dir
INFO - JobSubmitter.printTokens(477) | Submitting tokens for job: job_1407866786826_0001
INFO - YarnClientImpl.submitApplication(174) | Submitted application application_1407866786826_0001 to ResourceManager at /192.168.46.32:8032
INFO - Job.submit(1272) | The url to track the job: http://h1:8088/proxy/application_1407866786826_0001/
INFO - Job.monitorAndPrintJob(1317) | Running job: job_1407866786826_0001
INFO - Job.monitorAndPrintJob(1338) | Job job_1407866786826_0001 running in uber mode : false
INFO - Job.monitorAndPrintJob(1345) |  map 0% reduce 0%
INFO - Job.monitorAndPrintJob(1345) |  map 33% reduce 0%
INFO - Job.monitorAndPrintJob(1345) |  map 100% reduce 0%
INFO - Job.monitorAndPrintJob(1356) | Job job_1407866786826_0001 completed successfully
INFO - Job.monitorAndPrintJob(1363) | Counters: 27
	File System Counters
		FILE: Number of bytes read=0
		FILE: Number of bytes written=238179
		FILE: Number of read operations=0
		FILE: Number of large read operations=0
		FILE: Number of write operations=0
		HDFS: Number of bytes read=67091
		HDFS: Number of bytes written=9708
		HDFS: Number of read operations=147
		HDFS: Number of large read operations=0
		HDFS: Number of write operations=75
	Job Counters 
		Launched map tasks=3
		Data-local map tasks=3
		Total time spent by all maps in occupied slots (ms)=81736
		Total time spent by all reduces in occupied slots (ms)=0
	Map-Reduce Framework
		Map input records=166
		Map output records=0
		Input split bytes=326
		Spilled Records=0
		Failed Shuffles=0
		Merged Map outputs=0
		GC time elapsed (ms)=11308
		CPU time spent (ms)=9200
		Physical memory (bytes) snapshot=469209088
		Virtual memory (bytes) snapshot=2544439296
		Total committed heap usage (bytes)=245399552
	File Input Format Counters 
		Bytes Read=62970
	File Output Format Counters 
		Bytes Written=0


本次,散仙测试的使用的数据源有3个文件,当然散仙在这里是小文件,在实际生产中,尽量避免有小文件存放在HDFS上,应该提前合并小文件为大文文件,散仙用了3个测试文件,所以会起了3个map进程,最后生成的索引,有3份,如果需要,我们还可以用生成的多份索引使用一个reduce作业,来完成合并。

 

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值