【学习】Hadoop大数据平台架构与实践--基础篇下

来源:互联网 发布:华而不实为所欲为知乎 编辑:程序博客网 时间:2024/06/09 19:45

文章来源:
http://blog.csdn.net/huanglong8/article/details/64124063

视频教学来源:
http://www.imooc.com/learn/391

5. 统计示例WordCount

基本过程是
编写WordCount.java,包含Mapper类,Reducer类
编译WordCount.java,javac -classpath
打包jar -cvf WordCount.jar classes/*
作业提交 hadoop jar WordCount.jar WordCount input output

代码就不讲了,这里直接贴

import java.io.IOException;import java.util.StringTokenizer;import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.Path;import org.apache.hadoop.io.IntWritable;import org.apache.hadoop.io.LongWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Job;import org.apache.hadoop.mapreduce.Mapper;import org.apache.hadoop.mapreduce.Reducer;import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;public class WordCount {    public static class WordCountMap extends            Mapper<LongWritable, Text, Text, IntWritable> {        private final IntWritable one = new IntWritable(1);        private Text word = new Text();        public void map(LongWritable key, Text value, Context context)                throws IOException, InterruptedException {            String line = value.toString();            StringTokenizer token = new StringTokenizer(line);            while (token.hasMoreTokens()) {                word.set(token.nextToken());                context.write(word, one);            }        }    }    public static class WordCountReduce extends            Reducer<Text, IntWritable, Text, IntWritable> {        public void reduce(Text key, Iterable<IntWritable> values,                Context context) throws IOException, InterruptedException {            int sum = 0;            for (IntWritable val : values) {                sum += val.get();            }            context.write(key, new IntWritable(sum));        }    }    public static void main(String[] args) throws Exception {        Configuration conf = new Configuration();        Job job = new Job(conf);        job.setJarByClass(WordCount.class);        job.setJobName("wordcount");        job.setOutputKeyClass(Text.class);        job.setOutputValueClass(IntWritable.class);        job.setMapperClass(WordCountMap.class);        job.setReducerClass(WordCountReduce.class);        job.setInputFormatClass(TextInputFormat.class);        job.setOutputFormatClass(TextOutputFormat.class);        FileInputFormat.addInputPath(job, new Path(args[0]));        FileOutputFormat.setOutputPath(job, new Path(args[1]));        job.waitForCompletion(true);    }}

楼主重新开了一下机,所有要重新启动hadoop,完了可以用jps查看。

./opt/hadoop-1.2.1/bin/start-all.sh

通过Samba进行上传,如果不会配Samba自己补一下。。。

上传上去后,cp到目录下,然后开始编译

mkdir word_count_classjavac -classpath  /opt/hadoop-1.2.1/hadoop-core-1.2.1.jar:/opt/hadoop-1.2.1/lib/commons-cli-1.2.jar -d word_count_class/ WordCount.java

编译完成后,在目录下就会有三个文件了。
这里写图片描述

下来是打包的过程

jar -cvf wordcount.jar *.class

好,下来创建示例的参数文件,然后并提交到hdfs中。
目录word_count/input/file1
hello world
hello hadoop
hadoop file system
hadoop java api
hello java
hello api
hello ubuntu

目录word_count/input/file2
new file
new system
hadoop file
hadoop new world
hadoop free home
hadoop free school

创建hdfs目录
提交两个文件到hdfs目录中

hadoop fs -mkdir input_wordcounthadoop fs -put input/* input_wordcount/

然后调用hd来执行

hadoop jar word_count_class/wordcount.jar WordCount input_wordcount output_wordcount

查看文件结果

这里写图片描述

hadoop fs -ls output_wordcounthadoop fs -cat output_wordcount/part-r-00000

6. 利用MapReduce进行排序

其思想原理是 将所有数据先分区域分块 对独立的区块进行排序,最后进入到Reduce中进行合并输出。

上代码吧。。。

import java.io.IOException;import java.util.StringTokenizer;import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.Path;import org.apache.hadoop.io.IntWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Job;import org.apache.hadoop.mapreduce.Mapper;import org.apache.hadoop.mapreduce.Reducer;import org.apache.hadoop.mapreduce.Partitioner;import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;import org.apache.hadoop.util.GenericOptionsParser;public class Sort {    public static class Map extends            Mapper<Object, Text, IntWritable, IntWritable> {        private static IntWritable data = new IntWritable();        public void map(Object key, Text value, Context context)                throws IOException, InterruptedException {            String line = value.toString();            data.set(Integer.parseInt(line));            context.write(data, new IntWritable(1));        }    }    public static class Reduce extends            Reducer<IntWritable, IntWritable, IntWritable, IntWritable> {        private static IntWritable linenum = new IntWritable(1);        public void reduce(IntWritable key, Iterable<IntWritable> values,                Context context) throws IOException, InterruptedException {            for (IntWritable val : values) {                context.write(linenum, key);                linenum = new IntWritable(linenum.get() + 1);            }        }    }    public static class Partition extends Partitioner<IntWritable, IntWritable> {        @Override        public int getPartition(IntWritable key, IntWritable value,                int numPartitions) {            int MaxNumber = 65223;            int bound = MaxNumber / numPartitions + 1;            int keynumber = key.get();            for (int i = 0; i < numPartitions; i++) {                if (keynumber < bound * i && keynumber >= bound * (i - 1))                    return i - 1;            }            return 0;        }    }    /**     * @param args     */    public static void main(String[] args) throws Exception {        // TODO Auto-generated method stub        Configuration conf = new Configuration();        String[] otherArgs = new GenericOptionsParser(conf, args)                .getRemainingArgs();        if (otherArgs.length != 2) {            System.err.println("Usage WordCount <int> <out>");            System.exit(2);        }        Job job = new Job(conf, "Sort");        job.setJarByClass(Sort.class);        job.setMapperClass(Map.class);        job.setPartitionerClass(Partition.class);        job.setReducerClass(Reduce.class);        job.setOutputKeyClass(IntWritable.class);        job.setOutputValueClass(IntWritable.class);        FileInputFormat.addInputPath(job, new Path(otherArgs[0]));        FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));        System.exit(job.waitForCompletion(true) ? 0 : 1);    }}

其实贴代码不好,只是便于学习,目的还是先把hd熟悉流程起来,至于hd中的一些类库的使用,现学现用呗。。。

map类是用来排序的,Partition类是进行分区合并的。

然后用同样的方式运行就行了。自己练习吧。

0 0
原创粉丝点击