hadoop_HA MapReduce(在给定的文本文件中统计输出每一个单词出现的总次数)

  • 时间:
  • 来源:互联网
  • 文章标签:

MapReduce统计数据中每个出现的单词总数

        • 上传文本到hdfs文件系统中
        • idear创建maven项目
        • 导包
        • 定义一个mapper类
        • 定义一个reducer类
        • 定义一个主类,用来描述job并提交job
        • 将代码打包成jar包上传到服务器上运行
        • 运行结果

上传文本到hdfs文件系统中

hdfs dfs -mkdir /wordcount
hdfs dfs -put /export/servers/wordcount.txt  /wordcount/

idear创建maven项目

在这里插入图片描述

导包

将代码复制到pom文件中:

<repositories>
        <repository>
            <id>cloudera</id>
            <url>https://repository.cloudera.com/artifactory/cloudera-repos/</url>
        </repository>
</repositories>
<dependencies>
        <dependency>
            <groupId>org.apache.Hadoop</groupId>
            <artifactId>Hadoop-client</artifactId>
            <version>2.6.0-mr1-cdh5.14.0</version>
        </dependency>
        <dependency>
            <groupId>org.apache.Hadoop</groupId>
            <artifactId>Hadoop-common</artifactId>
            <version>2.6.0-cdh5.14.0</version>
        </dependency>
        <dependency>
            <groupId>org.apache.Hadoop</groupId>
            <artifactId>Hadoop-hdfs</artifactId>
            <version>2.6.0-cdh5.14.0</version>
        </dependency>

        <dependency>
            <groupId>org.apache.Hadoop</groupId>
            <artifactId>Hadoop-mapreduce-client-core</artifactId>
            <version>2.6.0-cdh5.14.0</version>
        </dependency>
        <dependency>
            <groupId>junit</groupId>
            <artifactId>junit</artifactId>
            <version>4.11</version>
            <scope>test</scope>
        </dependency>
        <dependency>
            <groupId>org.testng</groupId>
            <artifactId>testng</artifactId>
            <version>RELEASE</version>
        </dependency>
</dependencies>
<build>
        <plugins>
            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-compiler-plugin</artifactId>
                <version>3.0</version>
                <configuration>
                    <source>1.8</source>
                    <target>1.8</target>
                    <encoding>UTF-8</encoding>
                </configuration>
        </plugin>
        <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-shade-plugin</artifactId>
                <version>2.4.3</version>
                <executions>
                    <execution>
                        <phase>package</phase>
                        <goals>
                            <goal>shade</goal>
                        </goals>
                        <configuration>
                            <minimizeJar>true</minimizeJar>
                        </configuration>
                    </execution>
                </executions>
        </plugin>
        </plugins>
</build>

定义一个mapper类

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;

public class WordCountMap extends Mapper<LongWritable, Text,Text,LongWritable> {
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

        for (String s : value.toString().split(",")) {
            context.write(new Text(s),new LongWritable(1));
        }

    }
}

定义一个reducer类

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;

public class WordCountReduce extends Reducer<Text, LongWritable,Text,LongWritable> {
    @Override
    protected void reduce(Text key, Iterable<LongWritable> values, Context context) throws IOException, InterruptedException {

        long count = 0;
        for (LongWritable value : values) {
            count+=value.get();
        }
        context.write(key,new LongWritable(count));

    }
}

定义一个主类,用来描述job并提交job

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class WordCountDriver {
    public static void main(String[] args) throws Exception {

        Job job = Job.getInstance(new Configuration(), "WordCount_002");

//        设置程序的主类
		job.setJarByClass(WordCountDriver.class);
		
//        设置map程序 和 ruduce 程序代码
		job.setMapperClass(WordCountMap.class);
        job.setReducerClass(WordCountReduce.class);

//        设置map输出的key value 的类型
 		job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(LongWritable.class);


//        设置Reduce输出的key value类型
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(LongWritable.class);


//        设置去哪里读取数据
        FileInputFormat.addInputPath(job, new Path("/wordcount/wordcount.txt"));
//        设置最终结果写到哪里去
        FileOutputFormat.setOutputPath(job, new Path("/output2"));

//        提交作业
//        job.submit();通常不用
        boolean b = job.waitForCompletion(true);
        System.exit(b ? 0 : 1);
    }
}

将代码打包成jar包上传到服务器上运行

在这里插入图片描述

在这里插入图片描述

将jar包上传到服务器:
rz      (rz下载:yum install -y lrzsz)
运行jar包:
hadoop jar hadoop_HA_20201023_1-1.0-SNAPSHOT.jar WordCountDriver

在这里插入图片描述

运行结果

在这里插入图片描述

本文链接http://www.taodudu.cc/news/show-1781750.html