【大資料數列】使用api修改hadoop的副本數和塊大小

來源:互聯網
上載者:User

標籤:body   mit   junit   ado   override   super   tcl   public   aos   

package com.slp.hdfs;import org.apache.commons.io.output.ByteArrayOutputStream;import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.FSDataInputStream;import org.apache.hadoop.fs.FSDataOutputStream;import org.apache.hadoop.fs.FileSystem;import org.apache.hadoop.fs.Path;import org.apache.hadoop.io.IOUtils;import org.junit.Test;import java.io.IOException;/** * @author sanglp * @create 2017-12-08 11:26 * @desc hdfs測試 **/public class TestHdfs {    /**     * 正常輸出     * i am a girl     * i want to be a super man     * but i cannot still now     * resource下的core-site.xml中s201如果沒有在本機對應會報unknow host     * 如果讀取的檔案不存在會報 File not exists     */    @Test    public void testSave(){        /**         * 載入操作源碼         * static {         * deprecationContext = new AtomicReference(new Configuration.DeprecationContext((Configuration.DeprecationContext)null, defaultDeprecations));         * ClassLoader cL = Thread.currentThread().getContextClassLoader();         * if(cL == null) {         * cL = Configuration.class.getClassLoader();         * }         *         * if(cL.getResource("hadoop-site.xml") != null) {         * LOG.warn("DEPRECATED: hadoop-site.xml found in the classpath. Usage of hadoop-site.xml is deprecated. Instead use core-site.xml, mapred-site.xml and hdfs-site.xml to override properties of core-default.xml, mapred-default.xml and hdfs-default.xml respectively");         * }         *         * addDefaultResource("core-default.xml");         * addDefaultResource("core-site.xml");         * }         */        Configuration configuration = new Configuration();//載入類路徑下的檔案        try{            FileSystem fs = FileSystem.get(configuration);            Path path = new Path("hdfs://192.168.181.201/user/sanglp/hadoop/hello.txt");//本地未配置s201解析 java.lang.IllegalArgumentException: java.net.UnknownHostException: s201  檔案不存在java.io.FileNotFoundException: File does not exist: /user/sanglp/hadoop/hello.txt            FSDataInputStream fis = fs.open(path);            ByteArrayOutputStream baos = new ByteArrayOutputStream();            IOUtils.copyBytes(fis,baos,1024);            fis.close();            System.out.print(new String(baos.toByteArray()));        } catch (IOException e) {            e.printStackTrace();        }    }    /**     * 許可權配置:     * org.apache.hadoop.security.AccessControlException: Permission denied: user=hadoop, access=WRITE, inode="/user/sanglp/hadoop":sanglp:supergroup:drwxr-xr-x     * hdfs dfs -chmod o+w /user/sanglp/hadoop     */    @Test    public void testWrite(){        Configuration configuration = new Configuration();        try {            FileSystem fs = FileSystem.get(configuration);            Path path = new Path("hdfs://192.168.181.201/user/sanglp/hello.txt");            FSDataOutputStream fsDataOutputStream = fs.create(new Path("/user/sanglp/hadoop/a.txt"));            fsDataOutputStream.write("how are you".getBytes());        } catch (IOException e) {            e.printStackTrace();        }    }    /**     * 定製副本數和blocksize     * 設定塊過小     * org.apache.hadoop.ipc.RemoteException(java.io.IOException): Specified block size is less than configured minimum value (dfs.namenode.fs-limits.min-block-size): 5 < 1048576     * hdfs-site.xml     *  <property>     * <name>dfs.namenode.fs-limits.min-block-size</name>     * <value>5</value>     * </property>     */    @Test    public void testWrite2(){        Configuration configuration = new Configuration();        try {            FileSystem fs = FileSystem.get(configuration);            Path path = new Path("hdfs://192.168.181.201/user/sanglp/hello.txt");            //public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize, short replication, long blockSize)            FSDataOutputStream fsDataOutputStream = fs.create(new Path("/user/sanglp/hadoop/a.txt"),true,1024,(short)2,5);            fsDataOutputStream.write("how are you".getBytes());        } catch (IOException e) {            e.printStackTrace();        }    }}

  

【大資料數列】使用api修改hadoop的副本數和塊大小

相關文章

聯繫我們

該頁面正文內容均來源於網絡整理,並不代表阿里雲官方的觀點,該頁面所提到的產品和服務也與阿里云無關,如果該頁面內容對您造成了困擾,歡迎寫郵件給我們,收到郵件我們將在5個工作日內處理。

如果您發現本社區中有涉嫌抄襲的內容,歡迎發送郵件至: info-contact@alibabacloud.com 進行舉報並提供相關證據,工作人員會在 5 個工作天內聯絡您,一經查實,本站將立刻刪除涉嫌侵權內容。

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.