[root @ node1 ~] # spark-shell --master yarn-client
Warning: Master yarn-client is deprecated since 2.0. Please use master "yarn" with specified deploy mode instead.
Using Spark's default log4j profile: org / apache / spark / log4j-defaults.properties
Setting default log level to "WARN".
To adjust logging level use sc.setLogLevel (newLevel). For SparkR, use setLogLevel (newLevel).
17/04/09 08:36:06 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform ... using builtin-java classes where applicable
17/04/09 08:36:11 WARN Client: Neither spark.yarn.jars nor spark.yarn.archive is set, falling back to uploading libraries under SPARK_HOME.
17/04/09 08:36:24 ERROR SparkContext: Error initializing SparkContext.
org.apache.spark.SparkException: Yarn application has already ended! It might have been killed or unable to launch application master.
at org.apache.spark.scheduler.cluster.YarnClientSchedulerBackend.waitForApplication (YarnClientSchedulerBackend.scala: 85)
at org.apache.spark.scheduler.cluster.YarnClientSchedulerBackend.start (YarnClientSchedulerBackend.scala: 62)
at org.apache.spark.scheduler.TaskSchedulerImpl.start (TaskSchedulerImpl.scala: 156)
at org.apache.spark.SparkContext. <init> (SparkContext.scala: 509)
at org.apache.spark.SparkContext $ .getOrCreate (SparkContext.scala: 2313)
at org.apache.spark.sql.SparkSession $ Builder $$ anonfun $ 6.apply (SparkSession.scala: 868)
at org.apache.spark.sql.SparkSession $ Builder $$ anonfun $ 6.apply (SparkSession.scala: 860)
at scala.Option.getOrElse (Option.scala: 121)
at org.apache.spark.sql.SparkSession $ Builder.getOrCreate (SparkSession.scala: 860)
at org.apache.spark.repl.Main $ .createSparkSession (Main.scala: 95)
at $ line3. $ read $$ iw $$ iw. <init> (<console>: 15)
at $ line3. $ read $$ iw. <init> (<console>: 42)
at $ line3. $ read. <init> (<console>: 44)
at $ line3. $ read $. <init> (<console>: 48)
at $ line3. $ read $. <clinit> (<console>)
at $ line3. $ eval $. $ print $ lzycompute (<console>: 7)
at $ line3. $ eval $. $ print (<console>: 6)
at $ line3. $ eval. $ print (<console>)
at sun.reflect.NativeMethodAccessorImpl.invoke0 (Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke (NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke (DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke (Method.java:498)
at scala.tools.nsc.interpreter.IMain $ ReadEvalPrint.call (IMain.scala: 786)
at scala.tools.nsc.interpreter.IMain $ Request.loadAndRun (IMain.scala: 1047)
at scala.tools.nsc.interpreter.IMain $ WrappedRequest $$ anonfun $ loadAndRunReq $ 1.apply (IMain.scala: 638)
at scala.tools.nsc.interpreter.IMain $ WrappedRequest $$ anonfun $ loadAndRunReq $ 1.apply (IMain.scala: 637)
at scala.reflect.internal.util.ScalaClassLoader $ class.asContext (ScalaClassLoader.scala: 31)
at scala.reflect.internal.util.AbstractFileClassLoader.asContext (AbstractFileClassLoader.scala: 19)
at scala.tools.nsc.interpreter.IMain $ WrappedRequest.loadAndRunReq (IMain.scala: 637)
at scala.tools.nsc.interpreter.IMain.interpret (IMain.scala: 569)
at scala.tools.nsc.interpreter.IMain.interpret (IMain.scala: 565)
at scala.tools.nsc.interpreter.ILoop.interpretStartingWith (ILoop.scala: 807)
at scala.tools.nsc.interpreter.ILoop.command (ILoop.scala: 681)
at scala.tools.nsc.interpreter.ILoop.processLine (ILoop.scala: 395)
at org.apache.spark.repl.SparkILoop $$ anonfun $ initializeSpark $ 1.apply $ mcV $ sp (SparkILoop.scala: 38)
at org.apache.spark.repl.SparkILoop $$ anonfun $ initializeSpark $ 1.apply (SparkILoop.scala: 37)
at org.apache.spark.repl.SparkILoop $$ anonfun $ initializeSpark $ 1.apply (SparkILoop.scala: 37)
at scala.tools.nsc.interpreter.IMain.beQuietDuring (IMain.scala: 214)
at org.apache.spark.repl.SparkILoop.initializeSpark (SparkILoop.scala: 37)
at org.apache.spark.repl.SparkILoop.loadFiles (SparkILoop.scala: 105)
at scala.tools.nsc.interpreter.ILoop $$ anonfun $ process $ 1.apply $ mcZ $ sp (ILoop.scala: 920)
at scala.tools.nsc.interpreter.ILoop $$ anonfun $ process $ 1.apply (ILoop.scala: 909)
at scala.tools.nsc.interpreter.ILoop $$ anonfun $ process $ 1.apply (ILoop.scala: 909)
at scala.reflect.internal.util.ScalaClassLoader $ .savingContextLoader (ScalaClassLoader.scala: 97)
at scala.tools.nsc.interpreter.ILoop.process (ILoop.scala: 909)
at org.apache.spark.repl.Main $ .doMain (Main.scala: 68)
at org.apache.spark.repl.Main $ .main (Main.scala: 51)
at org.apache.spark.repl.Main.main (Main.scala)
at sun.reflect.NativeMethodAccessorImpl.invoke0 (Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke (NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke (DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke (Method.java:498)
at org.apache.spark.deploy.SparkSubmit $ .org $ apache $ spark $ deploy $ SparkSubmit $$ runMain (SparkSubmit.scala: 738)
at org.apache.spark.deploy.SparkSubmit $ .doRunMain $ 1 (SparkSubmit.scala: 187)
at org.apache.spark.deploy.SparkSubmit $ .submit (SparkSubmit.scala: 212)
at org.apache.spark.deploy.SparkSubmit $ .main (SparkSubmit.scala: 126)
at org.apache.spark.deploy.SparkSubmit.main (SparkSubmit.scala)
17/04/09 08:36:24 WARN YarnSchedulerBackend $ YarnSchedulerEndpoint: Attempted to request executors before the AM has registered!
17/04/09 08:36:24 WARN MetricsSystem: Stopping a MetricsSystem that is not running
org.apache.spark.SparkException: Yarn application has already ended! It might have been killed or unable to launch application master.
at org.apache.spark.scheduler.cluster.YarnClientSchedulerBackend.waitForApplication (YarnClientSchedulerBackend.scala: 85)
at org.apache.spark.scheduler.cluster.YarnClientSchedulerBackend.start (YarnClientSchedulerBackend.scala: 62)
at org.apache.spark.scheduler.TaskSchedulerImpl.start (TaskSchedulerImpl.scala: 156)
at org.apache.spark.SparkContext. <init> (SparkContext.scala: 509)
at org.apache.spark.SparkContext $ .getOrCreate (SparkContext.scala: 2313)
at org.apache.spark.sql.SparkSession $ Builder $$ anonfun $ 6.apply (SparkSession.scala: 868)
at org.apache.spark.sql.SparkSession $ Builder $$ anonfun $ 6.apply (SparkSession.scala: 860)
at scala.Option.getOrElse (Option.scala: 121)
at org.apache.spark.sql.SparkSession $ Builder.getOrCreate (SparkSession.scala: 860)
at org.apache.spark.repl.Main $ .createSparkSession (Main.scala: 95)
... 47 elided
<console>: 14: error: not found: value spark
import spark.implicits._
^
<console>: 14: error: not found: value spark
import spark.sql
^
Welcome to
____ __
/ __ / __ ___ _____ / / __
_ \ \ / _ \ / _ `/ __ / '_ /
/ ___ / .__ / \ _, _ / _ / / _ / \ _ \ version 2.1.0
/ _ /
Using Scala version 2.11.8 (Java HotSpot (TM) 64-Bit Server VM, Java 1.8.0_112)
Type in expressions to have them evaluated.
Type: help for more information.
scala>
YARN is running normally.
spark-shell --master yarn-client (Exception has been resolved)
[root @ node1 ~] # tail -50 /opt/hadoop-2.7.3/logs/yarn-root-resourcemanager-node1.log
at org.apache.hadoop.fs.RawLocalFileSystem.deprecatedGetFileStatus (RawLocalFileSystem.java:611)
at org.apache.hadoop.fs.RawLocalFileSystem.getFileLinkStatusInternal (RawLocalFileSystem.java:824)
at org.apache.hadoop.fs.RawLocalFileSystem.getFileStatus (RawLocalFileSystem.java:601)
at org.apache.hadoop.fs.FilterFileSystem.getFileStatus (FilterFileSystem.java:421)
at org.apache.hadoop.yarn.util.FSDownload.copy (FSDownload.java:253)
at org.apache.hadoop.yarn.util.FSDownload.access $ 000 (FSDownload.java:63)
at org.apache.hadoop.yarn.util.FSDownload $ 2.run (FSDownload.java:361)
at org.apache.hadoop.yarn.util.FSDownload $ 2.run (FSDownload.java:359)
at java.security.AccessController.doPrivileged (Native Method)
at javax.security.auth.Subject.doAs (Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs (UserGroupInformation.java:1698)
at org.apache.hadoop.yarn.util.FSDownload.call (FSDownload.java:358)
at org.apache.hadoop.yarn.util.FSDownload.call (FSDownload.java:62)
at java.util.concurrent.FutureTask.run (FutureTask.java:266)
at java.util.concurrent.Executors $ RunnableAdapter.call (Executors.java:511)
at java.util.concurrent.FutureTask.run (FutureTask.java:266)
at java.util.concurrent.ThreadPoolExecutor.runWorker (ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor $ Worker.run (ThreadPoolExecutor.java:617)
at java.lang.Thread.run (Thread.java:745)
Failing this attempt. Failing the application.
2017-04-09 08: 36: 23,640 INFO org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl: application_1491741099370_0002 State change from FINAL_SAVING to FAILED
2017-04-09 08: 36: 23,640 WARN org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger: USER = root OPERATION = Application Finished-Failed TARGET = RMAppManager RESULT = FAILURE DESCRIPTION = App failed with state: FAILED PERMISSIONS = Application application_1491741099370_0002 failed 2 times due to AM Container for appattempt_1491741099370_0002_000002 exited with exitCode: -1000
For more detailed output, check application tracking page: http: // node1: 8088 / cluster / app / application_1491741099370_0002Then, click on links to logs of each attempt.
Diagnostics: File file: / tmp / spark-b465ad00-e218-48b0-a85a-c00907c5015f / __ spark_libs__8229958103392672487.zip does not exist
java.io.FileNotFoundException: File file: / tmp / spark-b465ad00-e218-48b0-a85a-c00907c5015f / __ spark_libs__8229958103392672487.zip does not exist
at org.apache.hadoop.fs.RawLocalFileSystem.deprecatedGetFileStatus (RawLocalFileSystem.java:611)
at org.apache.hadoop.fs.RawLocalFileSystem.getFileLinkStatusInternal (RawLocalFileSystem.java:824)
at org.apache.hadoop.fs.RawLocalFileSystem.getFileStatus (RawLocalFileSystem.java:601)
at org.apache.hadoop.fs.FilterFileSystem.getFileStatus (FilterFileSystem.java:421)
at org.apache.hadoop.yarn.util.FSDownload.copy (FSDownload.java:253)
at org.apache.hadoop.yarn.util.FSDownload.access $ 000 (FSDownload.java:63)
at org.apache.hadoop.yarn.util.FSDownload $ 2.run (FSDownload.java:
361)
at org.apache.hadoop.yarn.util.FSDownload $ 2.run (FSDownload.java:359)
at java.security.AccessController.doPrivileged (Native Method)
at javax.security.auth.Subject.doAs (Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs (UserGroupInformation.java:1698)
at org.apache.hadoop.yarn.util.FSDownload.call (FSDownload.java:358)
at org.apache.hadoop.yarn.util.FSDownload.call (FSDownload.java:62)
at java.util.concurrent.FutureTask.run (FutureTask.java:266)
at java.util.concurrent.Executors $ RunnableAdapter.call (Executors.java:511)
at java.util.concurrent.FutureTask.run (FutureTask.java:266)
at java.util.concurrent.ThreadPoolExecutor.runWorker (ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor $ Worker.run (ThreadPoolExecutor.java:617)
at java.lang.Thread.run (Thread.java:745)
Failing this attempt. Failing the application. APPID = application_1491741099370_0002
2017-04-09 08: 36: 23,641 INFO org.apache.hadoop.yarn.server.resourcemanager.RMAppManager $ ApplicationSummary: appId = application_1491741099370_0002, name = Spark shell, user = root, queue = default, state = FAILED, trackingUrl = http: // node1: 8088 / cluster / app / application_1491741099370_0002, appMasterHost = N / A, startTime = 1491741381902, finishTime = 1491741383639, finalStatus = FAILED, memorySeconds = 475, vcoreSeconds = 0, preemptedAMContainers = 0, preemptedNonAMContainers = 0 = preempted <memory: 0 \, vCores: 0>, applicationType = SPARK
2017-04-09 08: 36: 23,641 INFO org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.ParentQueue: Application removed-appId: application_1491741099370_0002 user: root leaf-queue of parent: root #applications: 0
2017-04-09 08: 39: 12,559 INFO org.apache.hadoop.yarn
The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion;
products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the
content of the page makes you feel confusing, please write us an email, we will handle the problem
within 5 days after receiving your email.
If you find any instances of plagiarism from the community, please send an email to:
info-contact@alibabacloud.com
and provide relevant evidence. A staff member will contact you within 5 working days.