Package Org.apache.spark.ui
Private[spark] Object ToolTips {
Val Scheduler_delay =
"" "Scheduler delay includes time to ship the task from the Scheduler to
The executor, and time to send the task result from the executor to the scheduler. If
Scheduler delay is large, consider decreasing the size of the tasks or decreasing the size
of task results. "" "
Val Task_deserialization_time =
"" "Time spent deserializing the task closure on the executor and including the time to read the
Broadcasted task. "" "
Val Kshuffle_read_bloced_time =
"Time that task spent blocked waiting-shuffle data to is read from remote machines."
Val INPUT = "Bytes and records read from Hadoop or from Spark storage."
Val OUTPUT = "Bytes and records written to Hadoop."
Val storage_memory =
"Memory Used/total available memory for storage of data" +
"Like the RDD partitions cached in memory."
Val Shuffle_write =
"Bytes and records written to disk in order to being read by a shuffle on a future stage."
Val Shuffle_read =
"" "Total Shuffle Bytes and records read (includes both data read locally and data read from
Remote executors). """
Val shuffle_read_remote_size =
"" "Total shuffle bytes read from remote executors. This is a subset of the shuffle
Read bytes; The remaining shuffle data is read locally. """
Val Getting_result_time =
"" "time that the driver spends fetching task results from workers. If This is large, consider
Decreasing the amount of data returned from each task. ""
Val Result_serialization_time =
"" "Time spent serializing the task result on the executor before sending it back to the
Driver. "" "
Val Gc_time =
"" "time that the executor spent paused for Java garbage collection while the task was
Running. "" "
Val Job_timeline =
"" "shows when jobs started and ended and is executors joined or left. Drag to scroll.
Click Enable zooming and use mouse wheel to zoom in/out. "" "
Val Stage_timeline =
"" "shows when stages started and ended and when executors joined or left. Drag to scroll.
Click Enable zooming and use mouse wheel to zoom in/out. "" "
Val Job_dag =
"" "shows a graph of stages executed for the job, each of the which can contain
Multiple RDD operations (e.g. map () and filter ()), and of RDDs inside each operation
(shown as dots). "" "
Val Stage_dag =
"" "shows a graph of RDD operations in this stage, and RDDs inside each one. Stage A can run
Multiple operations (e.g. map () functions) if they can be pipelined. Some operations
Also create multiple RDDs internally. Cached RDDs is shown in green.
"""
}
Explanations of the various time periods that spark runs