USE master; GO ALTER DATABASE AdventureWorks2012 SET SINGLE_USER WITH ROLLBACK IMMEDIATE; GO ALTER DATABASE AdventureWorks2012 SET READ_ONLY; GO ALTER DATABASE AdventureWorks2012 SET MULTI_USER; GO
Thursday, December 29, 2016
SQL SERVER SINGLE USER MODE
VIEW SSIS Package Errors
DECLARE @DATE DATE = GETDATE() - 1 -- This is to restrict the data for last 7 days, used in ON condition
SELECT O.Operation_Id -- Not much of use
,E.Folder_Name AS Project_Name
,E.Project_name AS SSIS_Project_Name
,EM.Package_Name
,CONVERT(DATETIME, O.start_time) AS Start_Time
,CONVERT(DATETIME, O.end_time) AS End_Time
,OM.message as [Error_Message]
,EM.Event_Name
,EM.Message_Source_Name AS Component_Name
,EM.Subcomponent_Name AS Sub_Component_Name
,E.Environment_Name
,CASE E.Use32BitRunTime
WHEN 1
THEN 'Yes'
ELSE 'NO'
END Use32BitRunTime
,EM.Package_Path
,E.Executed_as_name AS Executed_By
FROM [SSISDB].[internal].[operations] AS O
INNER JOIN [SSISDB].[internal].[event_messages] AS EM
ON o.start_time >= @date -- Restrict data by date AND EM.operation_id = O.operation_id
INNER JOIN [SSISDB].[internal].[operation_messages] AS OM
ON EM.operation_id = OM.operation_id
INNER JOIN [SSISDB].[internal].[executions] AS E
ON OM.Operation_id = E.EXECUTION_ID
WHERE OM.Message_Type = 120 -- 120 means Error AND EM.event_name = 'OnError'
-- This is something i'm not sure right now but SSIS.Pipeline just adding duplicates so I'm removing it. AND ISNULL(EM.subcomponent_name, '') <> 'SSIS.Pipeline'
ORDER BY EM.operation_id DESC
SELECT O.Operation_Id -- Not much of use
,E.Folder_Name AS Project_Name
,E.Project_name AS SSIS_Project_Name
,EM.Package_Name
,CONVERT(DATETIME, O.start_time) AS Start_Time
,CONVERT(DATETIME, O.end_time) AS End_Time
,OM.message as [Error_Message]
,EM.Event_Name
,EM.Message_Source_Name AS Component_Name
,EM.Subcomponent_Name AS Sub_Component_Name
,E.Environment_Name
,CASE E.Use32BitRunTime
WHEN 1
THEN 'Yes'
ELSE 'NO'
END Use32BitRunTime
,EM.Package_Path
,E.Executed_as_name AS Executed_By
FROM [SSISDB].[internal].[operations] AS O
INNER JOIN [SSISDB].[internal].[event_messages] AS EM
ON o.start_time >= @date -- Restrict data by date AND EM.operation_id = O.operation_id
INNER JOIN [SSISDB].[internal].[operation_messages] AS OM
ON EM.operation_id = OM.operation_id
INNER JOIN [SSISDB].[internal].[executions] AS E
ON OM.Operation_id = E.EXECUTION_ID
WHERE OM.Message_Type = 120 -- 120 means Error AND EM.event_name = 'OnError'
-- This is something i'm not sure right now but SSIS.Pipeline just adding duplicates so I'm removing it. AND ISNULL(EM.subcomponent_name, '') <> 'SSIS.Pipeline'
ORDER BY EM.operation_id DESC
Thursday, December 15, 2016
SQL Server Agent Job
You actually cannot disable a step in SQL Server Agent Job, but you can skip it.
To skip a step open up the previous step –> Go to Advance tab –> On Success action->GOTO Steps
Wednesday, December 14, 2016
ALL Database permissons
user will already exist SET NOCOUNT ON; SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; DECLARE @command nvarchar(4000), @database sysname, @counter int = 0; IF OBJECT_ID('tempdb..#Permissions') IS NOT NULL DROP TABLE #Permissions; CREATE TABLE #Permissions (DatabaseName sysname, ObjType nvarchar(50), SQLStmt nvarchar(4000)); IF OBJECT_ID('tempdb..#Databases') IS NOT NULL DROP TABLE #Databases; CREATE TABLE #Databases (DB_Counter int, DatabaseName sysname); INSERT #Databases (DB_Counter, DatabaseName) SELECT ROW_NUMBER() OVER (ORDER BY database_id) AS DB_Counter ,name AS DatabaseName FROM sys.databases WHERE state_desc = 'ONLINE' AND name NOT IN ('tempdb'); --SELECT * FROM #Databases -- Loop through the databases WHILE (SELECT MAX (DB_Counter) FROM #Databases ) > @counter BEGIN; SELECT @counter += 1; SELECT @database = QUOTENAME(DatabaseName) FROM #Databases WHERE DB_Counter = @counter; -- Object Level Permissions SELECT @command = 'USE ' + @database + '; SELECT DB_NAME(), ''Object Level Permissions'', ''USE ' + @database + '; '' + CASE WHEN perm.state <> ''W'' THEN perm.state_desc ELSE ''GRANT'' END + SPACE(1) + perm.permission_name + SPACE(1) + ''ON '' + QUOTENAME(USER_NAME(obj.schema_id)) + ''.'' + QUOTENAME(obj.name) + CASE WHEN cl.column_id IS NULL THEN SPACE(0) ELSE ''('' + QUOTENAME(cl.name) + '')'' END + SPACE(1) + ''TO'' + SPACE(1) + QUOTENAME(USER_NAME(usr.principal_id)) COLLATE database_default + CASE WHEN perm.state <> ''W'' THEN SPACE(0) ELSE SPACE(1) + ''WITH GRANT OPTION'' END + CHAR(10) + ''GO'' AS ''--Object Level Permissions''' + 'FROM sys.database_permissions AS perm INNER JOIN sys.objects AS obj ON perm.major_id = obj.[object_id] INNER JOIN sys.database_principals AS usr ON perm.grantee_principal_id = usr.principal_id LEFT JOIN sys.columns AS cl ON cl.column_id = perm.minor_id AND cl.[object_id] = perm.major_id ORDER BY perm.permission_name ASC, perm.state_desc ASC;'; PRINT @command; INSERT INTO #Permissions EXEC (@command); -- Database Level Permissions SELECT @command = 'USE ' + @database + '; SELECT DB_NAME(), ''Database Level Permissions'',''USE ' + @database + '; '' + CASE WHEN perm.state <> ''W'' THEN perm.state_desc ELSE ''GRANT'' END + SPACE(1) + perm.permission_name + SPACE(1) + SPACE(1) + ''TO'' + SPACE(1) + QUOTENAME(USER_NAME(usr.principal_id)) COLLATE database_default + CASE WHEN perm.state <> ''W'' THEN SPACE(0) ELSE SPACE(1) + ''WITH GRANT OPTION'' END + CHAR(10) + ''GO'' AS ''--Database Level Permissions'' FROM sys.database_permissions AS perm INNER JOIN sys.database_principals AS usr ON perm.grantee_principal_id = usr.principal_id WHERE perm.major_id = 0 ORDER BY perm.permission_name ASC, perm.state_desc ASC'; PRINT @command; INSERT INTO #Permissions EXEC (@command); -- Database Level Role Members SELECT @command = 'USE ' + @database + '; SELECT DB_NAME(), ''Database Level Role Members'',''USE ' + @database + '; '' + ''EXEC sp_addrolemember @rolename ='' + SPACE(1) + QUOTENAME(usr1.name, '''') + '', @membername ='' + SPACE(1) + QUOTENAME(usr2.name, '''') + CHAR(10) + ''GO'' AS ''--Role Memberships'' FROM sys.database_principals AS usr1 INNER JOIN sys.database_role_members AS rm ON usr1.principal_id = rm.role_principal_id INNER JOIN sys.database_principals AS usr2 ON rm.member_principal_id = usr2.principal_id ORDER BY rm.role_principal_id ASC'; PRINT @command; INSERT INTO #Permissions EXEC (@command); END; INSERT #Permissions (DatabaseName, ObjType, SQLStmt) -- Server Level Role Members SELECT @@SERVERNAME ,'Server Level Role Members' ,'USE [master]; EXEC sp_addsrvrolemember @rolename =' + SPACE(1) + QUOTENAME(usr1.name, '''') + ', @loginame =' + SPACE(1) + QUOTENAME(usr2.name, '''') + CHAR(10) + 'GO' AS '--Role Memberships' FROM sys.server_principals AS usr1 INNER JOIN sys.server_role_members AS rm ON usr1.principal_id = rm.role_principal_id INNER JOIN sys.server_principals AS usr2 ON rm.member_principal_id = usr2.principal_id ORDER BY rm.role_principal_id ASC; INSERT #Permissions (DatabaseName, ObjType, SQLStmt) -- Server Level Permissions SELECT @@SERVERNAME ,'Server Level Permissions' ,'USE [master]; ' + server_permissions.state_desc COLLATE SQL_Latin1_General_CP1_CI_AS + ' ' + server_permissions.permission_name COLLATE SQL_Latin1_General_CP1_CI_AS + ' TO [' + server_principals.name COLLATE SQL_Latin1_General_CP1_CI_AS + ']; ' + CHAR(10) + 'GO' AS '--Server Level Permissions' FROM sys.server_permissions AS server_permissions WITH (NOLOCK) INNER JOIN sys.server_principals AS server_principals WITH (NOLOCK) ON server_permissions.grantee_principal_id = server_principals.principal_id WHERE server_principals.type IN ('S', 'U', 'G') ORDER BY server_principals.name ,server_permissions.state_desc ,server_permissions.permission_name; -- Output SELECT @@SERVERNAME AS ServerName ,DatabaseName ,ObjType ,SQLStmt FROM #Permissions ORDER BY DatabaseName, ObjType;
FIND INDEX Create date
select
crdate, i.name, object_name(o.id)
from
sysindexes i
join
sysobjects o ON o.id = i.id where i.NAME like '%AdAlerts%'
crdate, i.name, object_name(o.id)
from
sysindexes i
join
sysobjects o ON o.id = i.id where i.NAME like '%AdAlerts%'
Friday, December 9, 2016
Most Common SQL Server Performance Problems
NUMBER 5: TEMPDB PAGELATCH CONTENTION
Jonathan Kehayias (b | t) covers this topic very well in a great article on Optimizing tempdb configuration with SQL Server 2012 Extended Events, so I'm not going to attempt to go into it very deeply here, but I will talk about my experience with it.
I'm starting to see this more and more. It is usually with a system making heavy use of tempdb for some type of ETL process. This is especially common if it is an ongoing "real-time" style ETL process. I'm not saying data loads are the cause of this by the way; I'm just relaying my observations.
The symptoms of this can vary, but some things are always the same. High PAGELATCH waits in tempdb and poor performance recorded for processes using tempdb. I'll typically follow the waits to Top SQL in Performance Advisor, and see lots of queries that use temp tables listed in Top SQL. These queries usually run in milliseconds, and should never be counted among the "Top SQL" for the server. This can have people feeling like these queries are a large part of the problem, but that is not necessarily the case at all. The queries are the victims of the real problem.
Once I'm suspecting this to be the case, I will usually jump to the Disk Activity tab in Performance Advisor to see how tempdb is configured. Most times I actually see the same thing: A busy tempdb with a single data file defined. From here I'll usually recommend reconfiguring tempdb, and direct them to a resource like Jonathan's article for more information.
NUMBER 4: EXPECTING AUTO UPDATE STATISTICS TO KEEP STATISTICS UPDATED
The problem here is that the thresholds for triggering auto statistics updates end up being the same in most cases, even for a very large table. Without going into a very deep explanation, the threshold is ~20% of the rows in the table. So on a really big table it takes a lot of data change to trigger an update. Kevin Kline has a nice, easy to follow explanation of this here as well.
The reason this makes the list is that DBAs seem really surprised to find out that the auto update isn't taking care of things the way the name implies. Then there are also many dbas that believe it should be handled by their maintenance job. Then after looking at the maintenance, they are doing index reorgs most of the time, and that won't update the statistics either (though a rebuild will). I also want to note here that if you are using the Fragmentation Manager feature in SQL Sentry 7.0 and higher, you can have a running history of when your indexes were reorganized rather than rebuilt. This can help you decide whether the problem you're seeing could be related to auto update not happening.
The lesson here is really to keep an eye on statistics, and make sure they're updated regularly, especially on large tables, which are becoming more and more common as time goes on. Another option here can be to use trace flag 2371 to actually change the formula used to trigger the update. The nicest explanation of this option I have found is at this blog post by Juergen Thomas.
NUMBER 3: THE CXPACKET WAIT TYPE
I would say that this is hands down the single most common wait type I see on larger SQL Server systems when someone asks me to look into query performance with them.
There is a lot of information out there on how to deal with this, but sadly I still see a lot of people make the initial assumption that the problem should be solved by having either the query or the entire server set MAXDOP to 1. More often than not the problem can be handled by proper indexing or statistics maintenance. It could also be that the plan cached for this query is just not optimal, and you can mark it for recompile using sp_recompile, set recompile at the query level, or just evict the plan using DBCC FREEPROCCACHE with a plan handle. It is best to exhaust these options before deciding to change MAXDOP to 1 because you could be throwing away a lot of processing power without realizing it.
Paul Randal (b | t) has a great survey on his blog here that seems to support what I'm used to seeing as well. In fact, he's the one who first taught me that MAXDOP 1 is not necessarily the answer to this.
NUMBER 2: MISUNDERSTOOD "TIMEOUT EXPIRED PRIOR TO THE COMPLETION OF…"
This one is huge. Outside of some very edge case behavior, there are two basic types of timeouts you *might* deal with for SQL Server. These are connection timeouts and operation (or query) timeouts. In both cases these are values set by the client connecting to the SQL Server. On the server side, there is a remote query timeout setting, but this is the very edge case I mentioned and will leave that for another discussion.
I'm going to focus on operation timeouts, since they are the most common. Operation timeout errors from various software tools might be the most misunderstood situation I come across. The cause of these really boils down to one simple thing though: The client executing the command has set a maximum amount of time that it will wait for the command to complete. If this maximum is reached prior to completion the command is aborted. An error is raised from the client.
Many times the timeout error will induce a panic mode, because the error can look a bit intimidating. The reality is, though, that this is not much different than hitting the stop button in SSMS because the query was taking too long. In fact, it will show up exactly the same in a profiler trace with Error = 2 (Aborted).
So, what does a timeout like this really tell us? It tells us that queries are taking longer than expected. We should go into "performance tuning" mode rather than "something is broken" mode. The error information from the client is really just some good information on where you might start to focus your tuning efforts.
If you receive timeout errors from the SQL Sentry monitoring service, and one of the servers you are monitoring is the source, this is not telling you that SQL Sentry is broken. This is SQL Sentry telling you that this server is experiencing performance issues. Again, it is time for "performance tuning" mode. These errors could be easily consumed internally, and retried later, but this would be doing our customers a huge disservice. We believe that you should know about *any* potential problem on your monitored server, even if it is SQL Sentry encountering the problem.
Incidentally, this is true for SQL Sentry, just as it is for any other system that uses an RDBMS for a repository your SQL Sentry database needs some TLC now and again. Without it you may indeed experience some timeouts from your SQL Sentry client. We spend a lot of time tuning our queries for performance before they ever make it out the door, but proper maintenance will ensure they continue to run as intended.
NUMBER 1: MEMORY PRESSURE
This is the big one. As soon as Kevin mentioned wanting this list it's the first thing that popped into my head. Not only because I see it so often, but also because it is so often mistaken for poor disk performance.
There are lots of caches in SQL Server, but the most well-known is the data cache (aka buffer pool). The easiest way to describe the data cache is that it is the data stored in memory, rather than persisted to disk. Being able to store lots of data in memory long term is desirable because working with data in memory is generally much quicker than having to perform physical I/Os.
I could turn this post into a very long discussion on memory pressure in SQL Server at this point, but I promise I will try to avoid that. There is already a ton of information available on this subject, and that is not really the intent of this post. What I will say is that, usually, memory pressure manifests as a few different symptoms. When viewed individually, some of these symptoms can lead you to incorrect, and sometimes costly, conclusions.
The two misleading symptoms are that you may start to see higher than normal latency across the disk subsystem, and you may start to see abnormally high waits related to disk activity. If you look at nothing but these two symptoms, you may come to the conclusion that you need to work on your disk system.
This is why being presented with all relevant metrics on one dashboard is so important. You have to look at the bigger picture, and having the memory-related data available along with the disk activity and waits helps to paint a clearer picture of what is really going on.
Typically what I'll see (along with the disk waits and disk latency) is a PLE (Page Lifetime Expectancy) that is fairly low for this server. I describe it this way because what is good or bad for this value really "depends". The larger your buffer cache is, the higher your "critical" threshold will be for PLE. The more data there is to churn in and out of the buffer, the worse off you will be when the "churn" actually happens. Another consideration is NUMA. The way the PLE counter is calculated can cause this value alone to be very misleading when multiple NUMA nodes are involved, as described by Paul Randal in a blog post about Page Life Expectancy isn't what you think… Luckily in SQL Sentry 7.0 and higher, you can actually see where PLE is for the individual NUMA nodes in history mode, which makes this a bit less of a problem.
I'll usually also see consistently higher lazy writer activity, and SQL Server page faults (SQL Server going to disk). Sometimes I'll see what I call buffer tearing. It's basically when the data buffer is up and down frequently creating a jagged (or torn) edge on the history chart in Performance Advisor. Finally, I may also see an abnormally large plan cache reducing available memory for the data cache.
All of these things together spell memory pressure, and there are various ways to deal with them, but the important thing to note is that this is not a disk issue. It's not saying that your disk system is necessarily wonderful either, but I am saying I wouldn't call up your SAN guy and order a bunch of new hardware based on this situation. Once you get the memory pressure situation under control, SQL Server will not need to go to disk as much, and the few symptoms related to disk may disappear entirely!
The moral here is really to always consider the full picture of performance, because looking at one thing out of context could severely limit your options for a solution.
Wednesday, December 7, 2016
How to find SSISDB package is running success , Failure and Completed
select e.*
, CONVERT(datetime, es.start_time) AS start_time
, CONVERT(datetime, es.end_time) AS end_time
, es.execution_duration , es.statistics_id
, es.execution_result
, case es.execution_result
when 0 then 'Success'
when 1 then 'Failure'
when 2 then 'Completion'
when 3 then 'Cancelled'
end as execution_result_description
from catalog.executables e
join catalog.executable_statistics es
on e.executable_id = es.executable_id
and e.execution_id = es.execution_id
where package_path = '\Package' and start_time >= '2016-12-06'
, CONVERT(datetime, es.start_time) AS start_time
, CONVERT(datetime, es.end_time) AS end_time
, es.execution_duration , es.statistics_id
, es.execution_result
, case es.execution_result
when 0 then 'Success'
when 1 then 'Failure'
when 2 then 'Completion'
when 3 then 'Cancelled'
end as execution_result_description
from catalog.executables e
join catalog.executable_statistics es
on e.executable_id = es.executable_id
and e.execution_id = es.execution_id
where package_path = '\Package' and start_time >= '2016-12-06'
Monday, December 5, 2016
The assembly may not be trusted with PERMISSION_SET = EXTERNAL_ACCESS or UNSAFE.
AN error occurred in the Microsoft .NET Framework while trying to load assembly id 65536. The server may be running out of resources, or the assembly may not be trusted with PERMISSION_SET = EXTERNAL_ACCESS or UNSAFE. Run the query again, or check documentation to see how to solve the assembly trust issues. For more information about this error:
System.IO.FileLoadException: Could not load file or assembly 'microsoft.sqlserver.integrationservices.serve
Solution :
ALTER DATABASE SSISDB SET TRUSTWORTHY ON
System.IO.FileLoadException: Could not load file or assembly 'microsoft.sqlserver.integrationservices.serve
Solution :
ALTER DATABASE SSISDB SET TRUSTWORTHY ON
Friday, December 2, 2016
DBOWNER
CREATE TABLE ##DBOwners
(
DBName SYSNAME,
Username SYSNAME,
Rolename NVARCHAR(50),
IsOwner BIT
);
EXEC sp_MSforeachdb
'
USE [?];
INSERT INTO ##DBOwners
SELECT
DB_NAME(),
dp.name,
dp2.name,
CASE
WHEN dp2.name = ''db_owner'' THEN 1
ELSE 0
END AS ''db_owner''
FROM
sys.database_principals AS dp
LEFT JOIN
sys.database_role_members AS rm
ON
rm.member_principal_id = dp.principal_id
LEFT JOIN
sys.database_principals AS dp2
ON
rm.role_principal_id = dp2.principal_id
WHERE
dp.type <> ''R''
'
SELECT * FROM ##DBOwners where Username <> 'dbo' and rolename ='db_owner' and Username <> 'w4access'
(
DBName SYSNAME,
Username SYSNAME,
Rolename NVARCHAR(50),
IsOwner BIT
);
EXEC sp_MSforeachdb
'
USE [?];
INSERT INTO ##DBOwners
SELECT
DB_NAME(),
dp.name,
dp2.name,
CASE
WHEN dp2.name = ''db_owner'' THEN 1
ELSE 0
END AS ''db_owner''
FROM
sys.database_principals AS dp
LEFT JOIN
sys.database_role_members AS rm
ON
rm.member_principal_id = dp.principal_id
LEFT JOIN
sys.database_principals AS dp2
ON
rm.role_principal_id = dp2.principal_id
WHERE
dp.type <> ''R''
'
SELECT * FROM ##DBOwners where Username <> 'dbo' and rolename ='db_owner' and Username <> 'w4access'
Thursday, December 1, 2016
select older than 12 months
SELECT * FROM [dbo].[TEMPPrice] where (DateAdd(MM, -12, GetDate()) > [DateChecked])
Subscribe to:
Comments (Atom)