Spark example

21ca發表於2017-03-07
pom.xml
  1. <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
  2.     xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
  3.     <modelVersion>4.0.0</modelVersion>
  4.     <groupId>active </groupId>
  5.     <artifactId>spark-test</artifactId>
  6.     <version>0.0.1-SNAPSHOT</version>

  7.     <dependencies>
  8.         <dependency>
  9.             <groupId>org.apache.spark</groupId>
  10.             <artifactId>spark-core_2.10</artifactId>
  11.             <version>2.1.0</version>
  12.         </dependency>
  13.     </dependencies>
  14. </project>
SparkTest.java
  1. import java.util.Arrays;

  2. import org.apache.spark.SparkConf;
  3. import org.apache.spark.api.java.JavaPairRDD;
  4. import org.apache.spark.api.java.JavaRDD;
  5. import org.apache.spark.api.java.JavaSparkContext;

  6. import scala.Tuple2;

  7. public class SparkTest {

  8.     public static void main(String[] args) {
  9.         SparkConf conf = new SparkConf().setAppName("Test").setMaster("local");
  10.         JavaSparkContext sc = new JavaSparkContext(conf);
  11.         JavaRDD<String> file = sc.parallelize(Arrays.asList("Hello test", "Hello test2", "dds"));
  12.         JavaRDD<String> words = file.flatMap(s -> Arrays.asList(s.split(" |\t|\n|\r")).iterator());
  13.         JavaPairRDD<String, Integer> counts = words.mapToPair(s -> new Tuple2<String, Integer>(s, 1));        
  14.         counts = counts.reduceByKey((x, y) -> x + y);
  15.         
  16.         System.out.println(counts.collect());
  17.         sc.close();
  18.     }

  19. }
可以單獨執行,也可以提交到spark叢集: spark-submit.cmd --class SparkTest D:\workspace\spark-test\target\spark-test-0.0.1-SNAPSHOT.jar

來自 “ ITPUB部落格 ” ,連結:http://blog.itpub.net/10742815/viewspace-2134860/,如需轉載,請註明出處,否則將追究法律責任。

相關文章