hadoop個人理解
2 hadoop三種方式
Local:
需要改hadoop-site.xml—的java——home
Hadoop-default是主要配置,不需要更改
命令:
hadoop/bin/hadoop demo.hadoop.HadoopGrep log檔案所在目錄 任意的輸出目錄 grep的字串
3 重寫例子
package com.hadoop.example;
import java.io.File;
import java.util.ArrayList;
import java.util.List;
import org.apache.catalina.filters.AddDefaultCharsetFilter;
import com.sun.org.apache.bcel.internal.generic.NEW;
public class HReal {
public static void main(String[] args) {
}
public void work(){
JobConf conf = null;
Mapper mapper = new Mapper();
Reducer reducer = new Reducer();
InputFormat inputFormat = new InputFormat();
OutputFromat outputFromat = new OutputFromat();
String inpathString ="";
String outpathString="";
JobTracker jobTracker = new JobTracker();
Job job = new Job();
jobTracker.addJob(job);
if(job.flg=true){
}
}
}
class Mapper{
private String kString;
private String vString;
public void deal(Class c){
}
private void deal(String s){
}
public SequenceFile deal(String kString,String vString){
this.kString=kString;
this.vString=vString;
return new SequenceFile();
}
}
class SequenceFile{
}
class Reducer{
private String resultString;
private String middleString;
public String deal(){
return resultString;
}
}
class InputFormat{
public RecordReader newReader(){
return new RecordReader();
}
public Object format(String line){
return line;
}
public Object format(String line, Object type){
if(type instanceof String){
return line;
}
if(type instanceof Class){
return line.getClass();
}
return line;
}
public FileSplist getSplits(String pathfile){
File file=new File(pathfile);
//分割檔案處理
return new FileSplist();
}
}
class FileSplist{
}
class OutputFromat{
public void write(String result){
}
}
class NameNode{
}
class DataNode{
}
class Master{
private JobTracker jobTracker;
private NameNode nameNode;
}
class JobTracker{
private MapReduce mapReduce;
private InputFormat inputFormat;
List<Job> jobs = new ArrayList<Job>();
public void addJob(Job job){
jobs.add(job);
}
public void nextJob(){
jobs.get(this.getCurrentFlg());
}
public int getCurrentFlg(){
return 1;
}
public void currState(){
}
}
class MapReduce{
}
class JobConf{
}
class Job{
static boolean flg = false;
public void run(){
System.out.println("deal");
flg=true;
}
}
class MapperTask{
List<FileSplist> lst = new ArrayList<FileSplist>();
InputFormat inputFormat;
public void addTask(FileSplist fileSplist){
lst.add(fileSplist);
}
public void read(){
// lst.get(0)
}
}
class Queue{
List lst = new ArrayList();
public void addMapperTask(MapperTask mapperTask){
lst.add(mapperTask);
}
}
class TaskTracker{
int taskCount=0;
JobTracker jobTracker;
public void nextMapReduce(){
jobTracker.nextJob();
}
public void deal(){
}
//定時報告任務情況
public void schele(){
System.out.println("當前任務完成了多少了");
jobTracker.currState();
if(taskCount ==3){
nextMapReduce();
}
try {
Thread.sleep(10000);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
class RecordReader{
}
class ReducerTask{
Reducer reducer;
OutputFromat outputFromat;
public void sort(){
}
public void merge(){
}
public void deal(){
sort();
merge();
String resultString = reducer.deal();
outputFromat.write(resultString);
}
}
相關文章
- RESTful 個人理解REST
- zookeeper個人理解
- Event Loop個人理解OOP
- activeMQ個人理解總結MQ
- MV*模式的個人理解模式
- vuex文件(附加個人理解)Vue
- PGA 文件及個人理解
- Copy與mutableCopy的個人理解
- javascript閉包的個人理解JavaScript
- 關於GAN的個人理解
- 個人對於vue的理解Vue
- 設計模式個人理解(一)設計模式
- 對IP地址的個人理解
- NUMA架構的個人理解架構
- Hadoop Yarn框架原理解析HadoopYarn框架
- 深入理解hadoop網路Hadoop
- 關於交叉熵的個人理解熵
- 個人理解Vue和React區別VueReact
- Promise的個人理解及實踐Promise
- 請求處理管道個人理解
- 對hadoop之RPC的理解HadoopRPC
- Hadoop排程器原理解析Hadoop
- java中傳值方式的個人理解Java
- dubbo個人理解於應用章(二)
- python協程asyncio的個人理解Python
- 分散式、微服務、叢集,個人理解分散式微服務
- NAS、SAN、ISCSI儲存的個人理解
- iOS中copy和strong的個人理解iOS
- PGA_官方說明和個人理解
- From Apprentice To Artisan 一書個人理解APP
- ODS 狀態修復加個人理解
- Android中Activity設計思想個人理解Android
- Flex 佈局:個人的學習與理解Flex
- 個人對vue中生命週期的理解Vue
- 瀏覽器快取機制個人理解瀏覽器快取
- 個人對於flux、redux及vuex的理解ReduxVue
- 關於ConcurrentHashMap1.8的個人理解HashMap
- C# 學習async/await(個人理解)C#AI