Hadoop实战-PHP-MapReduce
发布于:2018-6-8 8:51 作者:admin 浏览:2188 分类:系统架构1. 编写Mapper的代码
#vi WordMap.php
#!/usr/bin/php <?php while (($line = fgets(STDIN)) !== false) { $words = preg_split('/(\s+)/', $line); foreach ($words as $word) { echo $word."\t"."1".PHP_EOL; } } ?>
2.编写Reducer的代码
#vi WordReduce.php
#!/usr/bin/php <?php $result=array(); while (($line = fgets(STDIN)) !== false) { $line = trim($line); list($k, $v) = explode("\t", $line); $result[$k] += $v; } ksort($result); foreach($result as $word => $count) { echo $word."\t".$count.PHP_EOL; } ?>
3.运行WordMapReduce
#chmod 0777 WordMap.php #chmod 0777 WordReduce.php #bin/hadoop jar share/hadoop/tools/lib/hadoop-streaming-2.9.1.jar -mapper WordMap.php -reducer WordReduce.php -input HdfsInput/* -output HdfsOutput
4.查看运行结果
#hadoop fs -ls HdfsOutput #hadoop fs -cat HdfsOutput/* #hadoop fs -get HdfsOutput LocalOutput #cat LocalOutput/*
Hadoop实战-MapReduce
发布于:2018-6-7 12:41 作者:admin 浏览:1889 分类:系统架构
Hadoop实战-环境搭建
http://www.wangfeilong.cn/server/114.html
Hadoop实战-MapReduce
1. 编写Mapper的代码
#vi WordMap.java
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
//实现Mapper
public class WordMap extends Mapper<LongWritable, Text, Text, LongWritable>{
@Override
protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, LongWritable>.Context context)
throws IOException, InterruptedException {
//读取到的一行字符串,按照空格分割单词
String line = value.toString();
String[] words = line.split(" ");
for (String word : words) {
//将分割的单词word输出为key,次数输出为value,次数为1,这行数据会输到reduce中,
context.write(new Text(word), new LongWritable(1));
}
}
}
2.编写Reducer的代码
#vi WordReduce.java
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
//实现Reducer
public class WordReduce extends Reducer<Text, LongWritable, Text, LongWritable> {
@Override
/**
* 第一个Text: 是传入的单词名称,是Mapper中传入的
* 第二个:LongWritable 是该单词出现了多少次,这个是mapreduce计算出来的
* 第三个Text: 是输出单词的名称 ,这里是要输出到文本中的内容
* 第四个LongWritable: 是输出时显示出现了多少次,这里也是要输出到文本中的内容
*/
protected void reduce(Text key, Iterable<LongWritable> values,
Reducer<Text, LongWritable, Text, LongWritable>.Context context) throws IOException, InterruptedException {
//累加统计
long count = 0;
for (LongWritable num : values) {
count += num.get();
}
context.write(key, new LongWritable(count));
}
}
3.编写main方法执行这个MapReduce
#vi WordMapReduce.java
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
//实现MapReduce
public class WordMapReduce{
public static void main(String[] args) throws IOException {
Configuration conf = new Configuration();
//如果是打包在linux上运行,则不需要写这两行代码
/*
//指定运行在yarn中
conf.set("mapreduce.framework.name", "yarn");
//指定resourcemanager的主机名
conf.set("yarn.resourcemanager.hostname", "localhost");
*/
Job job = Job.getInstance(conf);
//使得hadoop可以根据类包,找到jar包在哪里
job.setJarByClass(WordMapReduce.class);
//指定Mapper的类
job.setMapperClass(WordMap.class);
//指定reduce的类
job.setReducerClass(WordReduce.class);
//设置Mapper输出的类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(LongWritable.class);
//设置最终输出的类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(LongWritable.class);
//指定输入文件的位置,这里为了灵活,接收外部参数
FileInputFormat.setInputPaths(job, new Path(args[0]));
//指定输入文件的位置,这里接收启动参数
FileOutputFormat.setOutputPath(job, new Path(args[1]));
//将job中的参数,提交到yarn中运行
//job.submit();
try {
job.waitForCompletion(true);
//这里的为true,会打印执行结果
} catch (ClassNotFoundException | InterruptedException e) {
e.printStackTrace();
}
}
}
4. 编译WordMapReduce
注意环境变量
#export CLASSPATH=.:/usr/local/soft/jdk1.8.0_171/lib:/usr/local/soft/jdk1.8.0_171/jre/lib:$(/usr/local/soft/hadoop/bin/hadoop classpath):$CLASSPATH
编译
#javac WordMap.java
#javac WordReduce.java
#javac WordMapReduce.java
5.打包 WordMap、WordReduce、WordMapReduce的class打包
#jar cvf WordMapReduce.jar Word*.class
6.运行WordMapReduce
#hadoop jar WordMapReduce.jar WordMapReduce HdfsInput HdfsOutput
7.查看运行结果
#hadoop fs -ls HdfsOutput
#hadoop fs -cat HdfsOutput/*
#hadoop fs -get HdfsOutput LocalOutput
#cat LocalOutput/*