一、环境准备
编辑器:vscode
JDK版本:JDK1.8
项目管理器:maven
二、项目结构以及坐标依赖
项目结构:
坐标依赖:
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>2.7.5</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>2.7.5</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>2.7.5</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-core</artifactId>
<version>2.7.5</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-common</artifactId>
<version>2.7.5</version>
</dependency>
三、数据结构
数据下载地址:
链接:https://pan.baidu.com/s/10h_3TL27zYO0_WRTTfn2CQ 提取码:8888
数据预览: 去掉首行索引保存为.csv文件,上传到hadoop存储即可,存储路径自定义,只需后续在代码中修改即可。
四、项目代码
①MapReduce作业一
目标:job_counter: 统计每个球员的总的ACE数据
文件结构:
map_ace.java代码:
package job_counter;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
public class map_ace extends Mapper<LongWritable,Text,Text,LongWritable> {
@Override
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
String[] line=value.toString().split(",");
String name=line[3];
String ace=line[15];
Text text=new Text();
LongWritable longWritable=new LongWritable();
text.set(name);
longWritable.set(Long.parseLong(ace));
context.write(text,longWritable);
}
}
reduce_ace.java代码:
package job_counter;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
public class reduce_ace extends Reducer<Text, LongWritable, Text, LongWritable> {
@Override
protected void reduce(Text key,Iterable<LongWritable> values,Context context) throws IOException, InterruptedException {
long point=0;
for (LongWritable value : values) {
point=point+value.get();
}
context.write(key, new LongWritable(point));
}
}
main_ace.java代码:
package job_counter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class main_ace extends Configured implements Tool {
@Override
public int run(String[] args) throws Exception {
Configuration conf=super.getConf();
Job job=Job.getInstance(conf,"main_ace");
job.setJarByClass(main_ace.class);
job.setInputFormatClass(TextInputFormat.class);
TextInputFormat.addInputPath(job, new Path("hdfs://192.168.96.138:9000/user/hadoop/job_data"));
job.setMapperClass(map_ace.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(LongWritable.class);
job.setReducerClass(reduce_ace.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(LongWritable.class);
Path output= new Path("hdfs://192.168.96.138:9000/user/hadoop/counter_out_ace");
FileSystem fs = FileSystem.get(conf);
if (fs.exists(output)) {
fs.delete(output, true);
}
job.setOutputFormatClass(TextOutputFormat.class);
TextOutputFormat.setOutputPath(job, output);
boolean b1=job.waitForCompletion(true);
return b1 ? 0:1;
}
public static void main(String[] args) throws Exception {
Configuration configuration=new Configuration();
int run=ToolRunner.run(configuration, new main_ace(), args);
System.exit(run);
}
}
②MapReduce作业二 目标: job_class: 对%DF的球员根据四分位数分成四组 文件结构: map_class.java代码:
package job_class;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
public class map_class extends Mapper<LongWritable, Text, Text, NullWritable> {
@Override
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
context.write(value,NullWritable.get());
}
}
part_class.java代码:
package job_class;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Partitioner;
public class part_class extends Partitioner<Text,NullWritable>{
@Override
public int getPartition(Text text, NullWritable nullWritable, int i) {
String[] split=text.toString().split(",");
String df=split[18];
double low = 0.111111111111110;
double zhong=0.181818181818182;
double hig=0.300000000000000;
if(Double.parseDouble(df)<low){
return 3;
}
else if(Double.parseDouble(df)>low & Double.parseDouble(df)<=zhong){
return 2;
}
else if(Double.parseDouble(df)>zhong & Double.parseDouble(df)<=hig){
return 1;
}
else{
return 0;
}
}
}
reduce_class.java代码:
package job_class;
import java.io.IOException;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
public class reduce_class extends Reducer<Text, NullWritable, Text, NullWritable> {
@Override
protected void reduce(Text key, Iterable<NullWritable> values,Context context) throws IOException, InterruptedException {
context.write(key, NullWritable.get());
}
}
main_class.java代码:
package job_class;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class main_class extends Configured implements Tool {
@Override
public int run(String[] args) throws Exception {
Configuration conf = super.getConf();
Job job = Job.getInstance(conf, "main_class");
job.setJarByClass(main_class.class);
job.setInputFormatClass(TextInputFormat.class);
TextInputFormat.addInputPath(job, new Path("hdfs://192.168.96.138:9000/user/hadoop/job_data"));
job.setMapperClass(map_class.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(NullWritable.class);
job.setPartitionerClass(part_class.class);
job.setNumReduceTasks(4);
job.setReducerClass(reduce_class.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
Path output = new Path("hdfs://192.168.96.138:9000/user/hadoop/class_out_%DF");
FileSystem fs = FileSystem.get(conf);
if (fs.exists(output)) {
fs.delete(output, true);
}
job.setOutputFormatClass(TextOutputFormat.class);
TextOutputFormat.setOutputPath(job, output);
boolean b1 = job.waitForCompletion(true);
return b1 ? 0 : 1;
}
public static void main(String[] args) throws Exception {
Configuration configuration = new Configuration();
int run = ToolRunner.run(configuration, new main_class(), args);
System.exit(run);
}
}
|