quinta-feira, 4 de julho de 2019

Consulta do Dia-a-Dia que todos DBA's precisam ter na manga

--Size Tables
create table #TableSize (
    Name varchar(255),
    [rows] int,
    reserved varchar(255),
    data varchar(255),
    index_size varchar(255),
    unused varchar(255))
create table #ConvertedSizes (
    Name varchar(255),
    [rows] int,
    reservedKb int,
 SizeKB FLOAT,
 SizeMB FLOAT,
    SizeGB FLOAT,
    reservedIndexSize int,
    reservedUnused int)

EXEC sp_MSforeachtable @command1="insert into #TableSize
EXEC sp_spaceused '?'"
insert into #ConvertedSizes (Name, [rows], reservedKb,SizeKB,SizeMB, SizeGB, reservedIndexSize, reservedUnused)
select name, [rows], 
SUBSTRING(reserved, 0, LEN(reserved)-2), 
SUBSTRING(data, 0, LEN(data)-2), 
(CAST(SUBSTRING(data, 0, LEN(data)-2) AS FLOAT)/1024), 
round((CAST(SUBSTRING(data, 0, LEN(data)-2) AS FLOAT)/1024)/1024,2), 
SUBSTRING(index_size, 0, LEN(index_size)-2), 
SUBSTRING(unused, 0, LEN(unused)-2)
from #TableSize

select * from #ConvertedSizes
--WHERE Size > 0.1
order by SizeGB desc

drop table #TableSize
drop table #ConvertedSizes

--- Size Bases
with fs
as
(
    select database_id, type, size * 8.0 / 1024 size
    from sys.master_files
)
select
    name as NomeDoBanco,
    ((select sum(size) from fs where type = 0 and fs.database_id = db.database_id)/1024) DataFileSizeGb,
    ((select sum(size) from fs where type = 1 and fs.database_id = db.database_id)/1024) LogFileSizeGB
into #tmp_database
from sys.databases db


select 
*
from #tmp_database
order by DataFileSizeGb desc
drop table #tmp_database
-- Valida Discos do Servidor
SELECT DISTINCT
 @@servername as "Servidor",
 QUOTENAME(CAST(CONNECTIONPROPERTY('local_net_address') AS VARCHAR)) AS "IP",
 VS.volume_mount_point [Montagem]
 ,VS.logical_volume_name AS [Volume]
 ,CAST(CAST(VS.total_bytes AS DECIMAL(19,2))/1024 /1024 /1024 AS DECIMAL (10,2)) AS [Total (GB)]
 ,CAST(CAST(VS.available_bytes AS DECIMAL(19,2))/1024 /1024 /1024 AS DECIMAL (10,2)) AS [Espaço Disponível (GB)]
 ,CAST((CAST(VS.available_bytes AS DECIMAL(19,2)) / CAST(VS.total_bytes AS DECIMAL(19,2)) * 100 ) AS DECIMAL(10,2)) AS [Espaço Disponível ( % )]
 ,CAST((100 - CAST(VS.available_bytes AS DECIMAL(19,2)) / CAST(VS.total_bytes AS DECIMAL(19,2)) * 100) AS DECIMAL (10,2)) AS [Espaço em uso ( % )]
FROM sys.master_files AS MF
CROSS APPLY [sys].[dm_os_volume_stats](MF.database_id, MF.FILE_ID) AS VS
-- Consulta Backups
SELECT TOP 100 s.database_name As NomeDobanco, 
 CASE s.[type] 
 WHEN 'D' THEN 'Full'
 WHEN 'I' THEN 'Differential'
 WHEN 'L' THEN 'Transaction Log'
 END AS TipoDeBackup, 
 m.physical_device_name AS CaminhoFisicoDoArquivo, 
 Cast(Cast(s.backup_size / 1000000 AS INT) AS VARCHAR(14)) 
 + ' ' + 'MB' AS TamanhoDoBackup, 
 Cast(Datediff(second, s.backup_start_date, s.backup_finish_date) 
 AS VARCHAR(4)) 
 + ' ' + 'Segundos' AS TempoDoBackup, 
 s.backup_start_date AS DataInicioDoBackup, 
 s.server_name AS Servidor, 
 s.recovery_model ,*
FROM msdb.dbo.backupset s 
INNER JOIN msdb.dbo.backupmediafamily m 
ON s.media_set_id = m.media_set_id 
where s.[type]  = 'D'
ORDER BY backup_start_date DESC, 
backup_finish_date 
-- Alocação TEMPDB
SELECT A.SESSION_ID,
  B.HOST_NAME, 
  B.LOGIN_NAME ,
  (USER_OBJECTS_ALLOC_PAGE_COUNT + INTERNAL_OBJECTS_ALLOC_PAGE_COUNT)*1.0/128 AS TOTALALOCADOMB,
  D.TEXT
FROM SYS.DM_DB_SESSION_SPACE_USAGE A
JOIN SYS.DM_EXEC_SESSIONS B  ON A.SESSION_ID = B.SESSION_ID
JOIN SYS.DM_EXEC_CONNECTIONS C ON C.SESSION_ID = B.SESSION_ID
CROSS APPLY SYS.DM_EXEC_SQL_TEXT(C.MOST_RECENT_SQL_HANDLE) AS D
WHERE  (USER_OBJECTS_ALLOC_PAGE_COUNT + INTERNAL_OBJECTS_ALLOC_PAGE_COUNT)*1.0/128 > 10 -- OCUPAM MAIS DE 100 MB
ORDER BY TOTALALOCADOMB DESC
-- Tempo Ligado
SET NOCOUNT
   ON  DECLARE @crdate DATETIME,
   @hr VARCHAR(50),
   @min VARCHAR(5)  SELECT
      @crdate=crdate
FROM
   sysdatabases
WHERE
   NAME='tempdb'  SELECT
      @hr=(DATEDIFF ( mi,
      @crdate,
      GETDATE()))/60  IF ((DATEDIFF ( mi,
      @crdate,
      GETDATE()))/60)=0  SELECT
         @min=(DATEDIFF ( mi,
         @crdate,
         GETDATE()))  
         ELSE  SELECT
            @min=(DATEDIFF ( mi,
            @crdate,
            GETDATE()))-((DATEDIFF( mi,
            @crdate,
            GETDATE()))/60)*60  PRINT 'O SQL Server "' + CONVERT(VARCHAR(20),
            SERVERPROPERTY('SERVERNAME'))+'" esta online a '+@hr+' horas e '+@min+' minutos.'
-- Ratio de Memoria
select
 (select cntr_value from sys.dm_os_performance_counters   
  where object_name like'%Memory Manager%'and counter_name like'Maximum Workspace Memory (KB)%')as'Maximum Workspace Memory (KB)',
 (select cntr_value
 from sys.dm_os_performance_counters
 where object_name like'%Memory Manager%'and counter_name like'Target Server Memory (KB)%')as'Target Server Memory (KB)',
    ( select cntr_value from sys . dm_os_performance_counters
   where object_name like'%Memory Manager%'and counter_name like'Maximum Workspace Memory (KB)%')*100.0/
       ( select cntr_value from sys . dm_os_performance_counters
   where object_name like'%Memory Manager%'and counter_name like'Target Server Memory (KB)%')as Ratio
-- Consumo CPU
SELECT TOP(256) SQLProcessUtilization AS [SQL Server Process CPU Utilization], 
               SystemIdle AS [System Idle Process], 
               100 - SystemIdle - SQLProcessUtilization AS [Other Process CPU Utilization], 
               DATEADD(ms, -1 * (@ts_now - [timestamp]), GETDATE()) AS [Event Time] 
FROM (SELECT record.value('(./Record/@id)[1]', 'int') AS record_id, 
   record.value('(./Record/SchedulerMonitorEvent/SystemHealth/SystemIdle)[1]', 'int') 
   AS [SystemIdle], 
   record.value('(./Record/SchedulerMonitorEvent/SystemHealth/ProcessUtilization)[1]', 'int') 
   AS [SQLProcessUtilization], [timestamp] 
   FROM (SELECT [timestamp], CONVERT(xml, record) AS [record] 
   FROM sys.dm_os_ring_buffers WITH (NOLOCK)
   WHERE ring_buffer_type = N'RING_BUFFER_SCHEDULER_MONITOR' 
   AND record LIKE N'%<SystemHealth>%') AS x) AS y 
ORDER BY record_id DESC OPTION (RECOMPILE);

Nenhum comentário:

Postar um comentário

Migrando Blog https://luizflsilva.wordpress.com/

Curtindo cada vez mais a criar conteudo que possa levar aprendizado a amantes e profissionais da area de banco de dados, estou migrando o bl...