Database development-use DMV and DMF to analyze database performance

Source: Internet
Author: User
Tags tidy

to optimize the database recently, you must first know where the problem is to be optimized and use the SQL server2005 analysis script.
I also used several of them. they are all from the east to the west.
reason why the server is waiting
select top 10
[Wait type] = wait_type,
[wait time (s)] = wait_time_ms/1000,
[% waiting] = convert (decimal (100.0), wait_time_ms *
/sum (wait_time_ms) over ()
from sys. dm_ OS _wait_stats
where wait_type not like '% Sleep %'
order by wait_time_ms DESC;

Read and Write
Select top 10
[Total reads] = sum (total_logical_reads)
, [Execution count] = sum(qs.exe cution_count)
, Databasename = db_name (QT. dbid)
From SYS. dm_exec_query_stats Qs
Cross apply SYS. dm_exec_ SQL _text (Qs. SQL _handle) as QT
Group by db_name (QT. dbid)
Order by [total reads] DESC;
Select top 10
[Total writes] = sum (total_logical_writes)
, [Execution count] = sum(qs.exe cution_count)
, Databasename = db_name (QT. dbid)
From SYS. dm_exec_query_stats Qs
Cross apply SYS. dm_exec_ SQL _text (Qs. SQL _handle) as QT
Group by db_name (QT. dbid)
Order by [total writes] DESC;

Missing database index
Select
Databasename = db_name (database_id)
, [Number indexes missing] = count (*)
From SYS. dm_db_missing_index_details
Group by db_name (database_id)
Order by 2 DESC;

Missing index list information

Select databasename = db_name (database_id), * From SYS. dm_db_missing_index_details order by db_name (database_id)

High-availability missing Indexes
Select top 10
[Total cost] = round (avg_total_user_cost * avg_user_impact * (user_seeks + user_scans), 0)
, Avg_user_impact
, Tablename = Statement
, [Equalityusage] = equality_columns
, [Inequalityusage] = inequality_columns
, [Include cloumns] = included_columns
From SYS. dm_db_missing_index_groups g
Inner join SYS. dm_db_missing_index_group_stats
On S. group_handle = G. index_group_handle
Inner join SYS. dm_db_missing_index_details d
On D. index_handle = G. index_handle
Order by [total cost] DESC;

 

Determines the index with the highest overhead
Select top 10 [total cost] = round (optional * avg_user_impact * (user_seeks + user_scans), 0), avg_user_impact, tablename = Statement, [equalityusage] = equality_columns, [inequalityusage] = inequality_columns, [include cloumns] = included_columns from sys. dm_db_missing_index_groups G Inner join sys. dm_db_missing_index_group_stats s on S. group_handle = G. index_group_handle inner join sys. dm_db_missing_index_details D on D. index_handle = G. index_handle order by [total cost] DESC;

 

 

Determine the script used for the highest overhead index and display the result.
-- Create required table structure only.
-- Note: This SQL must be the same as in the database loop given in the following step.
Select top 1
[Maintenance cost] = (user_updates + system_updates)
, [Retrieval usage] = (user_seeks + user_scans + user_lookups)
, Databasename = db_name ()
, Tablename = object_name (S. [object_id])
, Indexname = I. Name
Into # tempmaintenancecost
From SYS. dm_db_index_usage_stats
Inner join SYS. Indexes I on S. [object_id] = I. [object_id]
And S. index_id = I. index_id
Where S. database_id = db_id ()
And objectproperty (S. [object_id], 'ismsshipped ') = 0
And (user_updates + system_updates)> 0 -- only report on active rows.
And S. [object_id] =-999 -- dummy value to get table structure.
;
-- Loop around all the databases on the server.
Exec sp_msforeachdb 'use [?];
-- Table already exists.
Insert into # tempmaintenancecost
Select top 10
[Maintenance cost] = (user_updates + system_updates)
, [Retrieval usage] = (user_seeks + user_scans + user_lookups)
, Databasename = db_name ()
, Tablename = object_name (S. [object_id])
, Indexname = I. Name
From SYS. dm_db_index_usage_stats
Inner join SYS. Indexes I on S. [object_id] = I. [object_id]
And S. index_id = I. index_id
Where S. database_id = db_id ()
And I. Name is not null -- ignore heap indexes.
And objectproperty (S. [object_id], ''isshipped '') = 0
And (user_updates + system_updates)> 0 -- only report on active rows.
Order by [maintenance cost] DESC
;
'
-- Select records.
Select top 10 * from # tempmaintenancecost
Order by [maintenance cost] DESC
-- Tidy up.
Drop table # tempmaintenancecost

Displays the number of times the index has been used and sorts by usage.
-- Create required table structure only.
-- Note: This SQL must be the same as in the database loop given in the -- following step.
Select top 1
[Usage] = (user_seeks + user_scans + user_lookups)
, Databasename = db_name ()
, Tablename = object_name (S. [object_id])
, Indexname = I. Name
Into # tempusage
From SYS. dm_db_index_usage_stats
Inner join SYS. Indexes I on S. [object_id] = I. [object_id]
And S. index_id = I. index_id
Where S. database_id = db_id ()
And objectproperty (S. [object_id], 'ismsshipped ') = 0
And (user_seeks + user_scans + user_lookups)> 0
-- Only report on active rows.
And S. [object_id] =-999 -- dummy value to get table structure.
;
-- Loop around all the databases on the server.
Exec sp_msforeachdb 'use [?];
-- Table already exists.
Insert into # tempusage
Select top 10
[Usage] = (user_seeks + user_scans + user_lookups)
, Databasename = db_name ()
, Tablename = object_name (S. [object_id])
, Indexname = I. Name
From SYS. dm_db_index_usage_stats
Inner join SYS. Indexes I on S. [object_id] = I. [object_id]
And S. index_id = I. index_id
Where S. database_id = db_id ()
And I. Name is not null -- ignore heap indexes.
And objectproperty (S. [object_id], ''isshipped '') = 0
And (user_seeks + user_scans + user_lookups)> 0 -- only report on active rows.
Order by [usage] DESC
;
'
-- Select records.
Select top 10 * from # tempusage order by [usage] DESC
-- Tidy up.
Drop table # tempusage

 

Script used for the logical most fragmented Index
-- Create required table structure only.
-- Note: This SQL must be the same as in the database loop given in the -- following step.
Select top 1
Datbasename = db_name ()
, Tablename = object_name (S. [object_id])
, Indexname = I. Name
, [Fragmentation %] = round (avg_fragmentation_in_percent, 2)
Into # tempfragmentation
From SYS. dm_db_index_physical_stats (db_id (), null) S
Inner join SYS. Indexes I on S. [object_id] = I. [object_id]
And S. index_id = I. index_id
Where S. [object_id] =-999 -- dummy value just to get table structure.
;
-- Loop around all the databases on the server.
Exec sp_msforeachdb 'use [?];
-- Table already exists.
Insert into # tempfragmentation
Select top 10
Datbasename = db_name ()
, Tablename = object_name (S. [object_id])
, Indexname = I. Name
, [Fragmentation %] = round (avg_fragmentation_in_percent, 2)
From SYS. dm_db_index_physical_stats (db_id (), null) S
Inner join SYS. Indexes I on S. [object_id] = I. [object_id]
And S. index_id = I. index_id
Where S. database_id = db_id ()
And I. Name is not null -- ignore heap indexes.
And objectproperty (S. [object_id], ''isshipped '') = 0
Order by [fragmentation %] DESC
;
'
-- Select records.
Select top 10 * from # tempfragmentation order by [fragmentation %] DESC
-- Tidy up.
Drop table # tempfragmentation

 

Obtain high Io queries
Select top 10
[Average iO] = (total_logical_reads + total_logical_writes)/qs.exe cution_count
, [Total iO] = (total_logical_reads + total_logical_writes)
, [Execution count] = qs.exe cution_count
, [Individual query] = substring (QT. Text, Qs. statement_start_offset/2,
(Case when Qs. statement_end_offset =-1
Then Len (convert (nvarchar (max), QT. Text) * 2
Else Qs. statement_end_offset end-Qs. statement_start_offset)/2)
, [Parent query] = QT. Text
, Databasename = db_name (QT. dbid)
From SYS. dm_exec_query_stats Qs
Cross apply SYS. dm_exec_ SQL _text (Qs. SQL _handle) as QT
Order by [average iO] DESC;

 

Obtain I/O statistics
Select wait_type, waiting_tasks_count, wait_time_ms from SYS. dm_ OS _wait_stats where wait_type like 'pageiolatch % 'order by wait_type

Query the current I/olock
Select db_name (database_id), file_id, io_stall, io_pending_ms_ticks, scheduler_address from SYS. Tables (null, null) T1, SYS. Tables as T2 where t1.file _ HANDLE = t2.io _ HANDLE

The five statements make I/O High.
Select top 5 (values/execution_count) as values, (values/execution_count) as values, (values/execution_count) as avg_phys_reads, execution_count, region as stmt_start_offset, SQL _handle, plan_handle from sys. dm_exec_query_stats order by (total_logical_reads + total_logical_writes) DESC

Obtain the statement based on the handle
Select text from SYS. dm_exec_ SQL _text (0x03000e00d4ab884e808214016b9a0000000000000000000)

 

query: determines the query with the highest overhead measured by CPU usage
select top 10
[average CPU used] = total_worker_time/qs.exe cution_count
, [total CPU used] = total_worker_time
, [execution count] = qs.exe cution_count
, [individual query] = substring (QT. text, Qs. statement_start_offset/2,
(case when Qs. statement_end_offset =-1
then Len (convert (nvarchar (max), QT. text) * 2
else Qs. statement_end_offset end-
Qs. statement_start_offset)/2)
, [Parent query] = QT. text
, databasename = db_name (QT. dbid)
from sys. dm_exec_query_stats QS
Cross apply sys. dm_exec_ SQL _text (Qs. SQL _handle) as QT
order by [average CPU used] DESC;

highly open CLR query
select top 10
[average CLR time] = total_clr_time/execution_count
, [total CLR time] = total_clr_time
, [execution count] = qs.exe cution_count
, [individual query] = substring (QT. text, Qs. statement_start_offset/2,
(case when Qs. statement_end_offset =-1
then Len (convert (nvarchar (max), QT. text) * 2
else Qs. statement_end_offset end-Qs. statement_start_offset)/2)
, [Parent query] = QT. text
, databasename = db_name (QT. dbid)
from sys. dm_exec_query_stats as QS
Cross apply sys. dm_exec_ SQL _text (Qs. SQL _handle) as QT
where total_clr_time <> 0
order by [average CLR time] DESC;

The most frequently executed Query
Select top 10
[Execution count] = execution_count
, [Individual query] = substring (QT. Text, Qs. statement_start_offset/2,
(Case when Qs. statement_end_offset =-1
Then Len (convert (nvarchar (max), QT. Text) * 2
Else Qs. statement_end_offset end-Qs. statement_start_offset)/2)
, [Parent query] = QT. Text
, Databasename = db_name (QT. dbid)
From SYS. dm_exec_query_stats Qs
Cross apply SYS. dm_exec_ SQL _text (Qs. SQL _handle) as QT
Order by [execution count] DESC;

 

blocked query
select top 10
[average time blocked] = (total_elapsed_time-total_worker_time)/qs.exe cution_count
, [total time blocked] = total_elapsed_time-total_worker_time
, [execution count] = qs.exe cution_count
, [individual query] = substring (QT. text, Qs. statement_start_offset/2,
(case when Qs. statement_end_offset =-1
then Len (convert (nvarchar (max), QT. text) * 2
else Qs. statement_end_offset end-Qs. statement_start_offset)/2)
, [Parent query] = QT. text
, databasename = db_name (QT. dbid)
from sys. dm_exec_query_stats QS
Cross apply sys. dm_exec_ SQL _text (Qs. SQL _handle) as QT
order by [average time blocked] DESC;

Minimum plan reuse rate
Select Top 100
[Plan usage] = CP. usecounts
, [Individual query] = substring (QT. Text, Qs. statement_start_offset/2,
(Case when Qs. statement_end_offset =-1
Then Len (convert (nvarchar (max ),
Qt. Text) * 2 else Qs. statement_end_offset end-
Qs. statement_start_offset)/2)
, [Parent query] = QT. Text
, Databasename = db_name (QT. dbid)
, CP. cacheobjtype
From SYS. dm_exec_query_stats Qs
Cross apply SYS. dm_exec_ SQL _text (Qs. SQL _handle) as QT
Inner join SYS. dm_exec_cached_plans as CP on Qs. plan_handle = CP. plan_handle
Where CP. plan_handle = Qs. plan_handle
Order by [plan usage] ASC;

Database Connection
Select session_id, connect_time, endpoint_id, auth_scheme, num_reads, num_writes, client_net_address, connection_id from SYS. dm_exec_connections order by client_net_address

Query Optimizer Information
Select * From SYS. dm_exec_query_optimizer_info

Current execution request
Select * From SYS. dm_exec_requests

Currently executed session
Select * From SYS. dm_exec_sessions

The number of tasks waiting for running by all schedulers
Select
Scheduler_id,
Current_tasks_count,
Runnable_tasks_count
From
SYS. dm_ OS _schedulers
Where
Scheduler_id <255

 

The number of tasks waiting for running by all schedulers
Select
*
From
SYS. dm_ OS _schedulers
Where
Scheduler_id <255

 

Queries the most resource-consuming total CPU usage
Select top 50
Sum (Qs. total_worker_time) as total_cpu_time,
Sum(qs.exe cution_count) as total_execution_count,
Count (*) as number_of_statements,
Qs. plan_handle
From
SYS. dm_exec_query_stats Qs
Group by QS. plan_handle
Order by sum (Qs. total_worker_time) DESC

 

The number of tasks waiting for running by all schedulers
Select
Scheduler_id,
Current_tasks_count,
Runnable_tasks_count
From
SYS. dm_ OS _schedulers
Where
Scheduler_id <255

 

The number of tasks waiting for running by all schedulers
Select
*
From
SYS. dm_ OS _schedulers
Where
Scheduler_id <255

 

Queries the most resource-consuming total CPU usage
Select top 50
Sum (Qs. total_worker_time) as total_cpu_time,
Sum(qs.exe cution_count) as total_execution_count,
Count (*) as number_of_statements,
Qs. plan_handle
From
SYS. dm_exec_query_stats Qs
Group by QS. plan_handle
Order by sum (Qs. total_worker_time) DESC

 

Obtain the time spent in query optimization within a given period of time.
Select * From SYS. dm_exec_query_optimizer_info

25 stored procedures with the maximum number of recompilation times
Select top 25
SQL _text.text,
SQL _handle,
Plan_generation_num,
Execution_count,
Dbid,
Objectid
From
SYS. dm_exec_query_stats
Cross apply SYS. dm_exec_ SQL _text (SQL _handle) as SQL _text
Where
Plan_generation_num> 1
Order by plan_generation_num DESC

Query with the largest CPU usage
Select
Highest_cpu_queries.plan_handle,
Highest_cpu_queries.total_worker_time,
Q. dbid,
Q. objectid,
Q. Number,
Q. encrypted,
Q. [text]
From
(Select top 50
Qs. plan_handle,
Qs. total_worker_time
From
SYS. dm_exec_query_stats Qs
Order by QS. total_worker_time DESC) as highest_cpu_queries
Cross apply SYS. dm_exec_ SQL _text (plan_handle) as Q
Order by highest_cpu_queries.total_worker_time DESC

 

How to clear statistics

Checkpoint
DBCC freeproccache release cache, be careful
DBCC dropcleanbuffers clear cache, be careful
DBCC sqlperf ('sys. dm_ OS _wait_stats ', clear); clear an item
Go

Comprehensive analysis:
Select db_id (db. dbid) 'database name'
, Object_id (db. objectid) 'object'
, Qs. creation_time 'compile plan time'
, Qs. last_execution_time 'time of the last execution plan'
, Qs.exe cution_count 'number of executions'
, Qs. total_elapsed_time/1000 'total time used (seconds )'
, Qs. total_physical_reads 'Total number of physical reads'
, Qs. total_worker_time/1000 'total CPU time (seconds )'
, Qs. total_logical_writes 'Total number of logical writes'
, Qs. total_logical_reads n 'Total number of logical reads'
, Qs. total_elapsed_time/1000 n' total consumed time (seconds )'
, Substring (St. Text, (Qs. statement_start_offset/2) + 1,
(Case statement_end_offset
When-1 then datalength (St. Text)
Else Qs. statement_end_offset
End-Qs. statement_start_offset)/2) + 1) as 'Statement execution ',
[Parent query] = ST. Text
From SYS. dm_exec_query_stats as QS cross apply
SYS. dm_exec_ SQL _text (Qs. SQL _handle) as St inner join
(Select *
From SYS. dm_exec_cached_plans CP cross apply
SYS. dm_exec_query_plan (CP. plan_handle)
) DB
On Qs. plan_handle = dB. plan_handle
Where substring (St. Text, (Qs. statement_start_offset/2) + 1,
(Case statement_end_offset
When-1 then datalength (St. Text)
Else Qs. statement_end_offset
End-Qs. statement_start_offset)/2) + 1) Not like '% fetch %'
Order by QS. total_elapsed_time/1000 DESC

 

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.