大连英文网站建设,网站开发分为哪几块,wordpress采集淘宝,网络设计是不是艺术类一 需求
这个案例的需求很简单
现在这里有一个文本wordcount.txt#xff0c;内容如下 现要求你使用 mapreduce 框架统计每个单词的出现个数
这样一个案例虽然简单但可以让新学习大数据的同学熟悉 mapreduce 框架 二 准备工作
#xff08;1#xff09;创建一个 maven 工…一 需求
这个案例的需求很简单
现在这里有一个文本wordcount.txt内容如下 现要求你使用 mapreduce 框架统计每个单词的出现个数
这样一个案例虽然简单但可以让新学习大数据的同学熟悉 mapreduce 框架 二 准备工作
1创建一个 maven 工程maven 工程框架可以选择quickstart
2在properties中添加 hadoop.version导入依赖pom.xml内容如下
?xml version1.0 encodingUTF-8?
project xmlnshttp://maven.apache.org/POM/4.0.0xmlns:xsihttp://www.w3.org/2001/XMLSchema-instancexsi:schemaLocationhttp://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsdmodelVersion4.0.0/modelVersiongroupIdorg.example/groupIdartifactIdmaven_hadoop/artifactIdversion1.0-SNAPSHOT/versiondependenciesdependencygroupIdjunit/groupIdartifactIdjunit/artifactIdversion4.11/versionscopetest/scope/dependencydependencygroupIdorg.apache.hadoop/groupIdartifactIdhadoop-common/artifactIdversion${hadoop.version}/version/dependencydependencygroupIdorg.apache.hadoop/groupIdartifactIdhadoop-hdfs/artifactIdversion${hadoop.version}/version/dependencydependencygroupIdorg.apache.hadoop/groupIdartifactIdhadoop-mapreduce-client-core/artifactIdversion${hadoop.version}/version/dependencydependencygroupIdorg.apache.hadoop/groupIdartifactIdhadoop-mapreduce-client-common/artifactIdversion${hadoop.version}/version/dependencydependencygroupIdorg.apache.hadoop/groupIdartifactIdhadoop-client/artifactIdversion${hadoop.version}/version/dependency/dependenciespropertiesmaven.compiler.source8/maven.compiler.sourcemaven.compiler.target8/maven.compiler.targethadoop.version3.1.3/hadoop.version/properties/project
3准备数据创建两个文件夹 inout一个是输入文件一个是输出文件输入文件放在 in 文件夹中 三 编写 WordCountMapper 类
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;import java.io.IOException;// 0, hello java, hello, 1
// 0, hello java, java, 1
// alt ins
public class WordCountMapper extends MapperLongWritable, Text,Text, IntWritable {Text text new Text();IntWritable intWritable new IntWritable();Overrideprotected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {System.out.println(WordCountMap stage Key:key Value:value);String[] words value.toString().split( ); // hello java---[hello,java]for (String word :words) {text.set(word);intWritable.set(1);context.write(text,intWritable); //hello,1,java,1}}
}四 编写 WordCountReducer 类
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;import java.io.IOException;public class WordCountReduce extends ReducerText, IntWritable, Text, LongWritable {Overrideprotected void reduce(Text key, IterableIntWritable values, Context context) throws IOException, InterruptedException {System.out.println(Reduce stage Key: key Values: values.toString());int count 0;for (IntWritable intWritable :values) {countintWritable.get();}LongWritable longWritable new LongWritable(count);System.out.println(ReduceResult key:key resultValue:longWritable.get());context.write(key,longWritable);}
} 五 编写WordCountDriver 类
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;import java.io.IOException;public class WordCountDriver {public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {Configuration conf new Configuration();Job job Job.getInstance(conf);job.setJarByClass(WordCountDriver.class);// 设置job的map阶段 工作任务job.setMapperClass(WordCountMapper.class);job.setMapOutputKeyClass(Text.class);job.setMapOutputValueClass(IntWritable.class);// 设置job的reduce阶段 工作任务job.setReducerClass(WordCountReduce.class);job.setOutputKeyClass(Text.class);job.setOutputValueClass(LongWritable.class);// 指定job map阶段的输入文件的路径FileInputFormat.setInputPaths(job, new Path(D:\\bigdataworkspace\\kb23\\hadoopstu\\in\\wordcount.txt));// 指定job reduce阶段的输出文件路径Path path new Path(D:\\bigdataworkspace\\kb23\\hadoopstu\\out1);FileSystem fileSystem FileSystem.get(path.toUri(), conf);if (fileSystem.exists(path))fileSystem.delete(path,true);FileOutputFormat.setOutputPath(job, path);// 启动jobjob.waitForCompletion(true);}
}