Tag Archives: script

How to remove Logins and Users from SQL Server en masse

The script below will create SQL code to drop every user and login from a SQL Server instance. Not very useful unless you’re trying to give someone, possibly yourself, a very bad day. Don’t worry though the script does not execute the code, to reiterate it only generates it. However given the potential for you using the output of the script incorrectly this post comes with a disclaimer (Link to disclaimer).

To make the script a little more useful you can populate a temp table called #TheseUsersOnly in the script with the specific login/user names you want to remove.

This will limit the code outputted to only the logins and users specified.

Go to the /*INSERT LOGINS HERE*/ section to populate the temp table with hard coded login/user names.

Be sure to thoroughly review the code outputted before executing it.

If you encounter this error message “The database principal owns a schema in the database, and cannot be dropped“, see this post (Link).

SET NOCOUNT ON;

/*DECLARE VARIABLES*/
DECLARE @Database TABLE (DbName SYSNAME);
DECLARE @DbName AS SYSNAME;
DECLARE @sqlCommand AS VARCHAR(MAX);
DECLARE @UserName AS VARCHAR(128);
DECLARE @i AS INT;
DECLARE @z AS INT;
DECLARE @j AS INT;
DECLARE @y AS INT;

/*DROP EXISTING TEMP TABLES*/
IF OBJECT_ID('tempdb..#ErrorTable') IS NOT NULL
BEGIN
	DROP TABLE #ErrorTable
END;

IF OBJECT_ID(N'tempdb..#User') IS NOT NULL
BEGIN
	DROP TABLE #User
END;

IF OBJECT_ID(N'tempdb..#UserOrdered') IS NOT NULL
BEGIN
	DROP TABLE #UserOrdered
END;

IF OBJECT_ID(N'tempdb..#TheseUsersOnly') IS NOT NULL
BEGIN
	DROP TABLE #TheseUsersOnly
END;

IF OBJECT_ID('tempdb..#Return') IS NOT NULL
BEGIN
	DROP TABLE #Return
END;

/*CREATE TEMP TABLES*/
/*THE ERROR TABLE WILL STORE THE DYNAMIC SQL THAT DID NOT WORK*/
CREATE TABLE [#ErrorTable] ([SqlCommand] VARCHAR(MAX)) ON [PRIMARY];

CREATE TABLE #User (
	Id INT NOT NULL IDENTITY(1, 1) PRIMARY KEY
	,DbName SYSNAME NULL
	,UserName SYSNAME NULL
	);

CREATE TABLE #UserOrdered (
	Id INT NOT NULL IDENTITY(1, 1) PRIMARY KEY
	,DbName SYSNAME NULL
	,UserName SYSNAME NULL
	);

CREATE TABLE #TheseUsersOnly (UserName SYSNAME NULL);

CREATE TABLE [#Return] ([SqlCommand] VARCHAR(MAX)) ON [PRIMARY];

/*INSERT LOGINS HERE*/
/*
/*Single Login*/
INSERT INTO #TheseUsersOnly (UserName)
SELECT 'Test_Login';
*/
/*
/*Multiple Logins*/
INSERT INTO #TheseUsersOnly (UserName)
VALUES (Test_Login_1)
,(Test_Login_2)
,(Test_Login_3)
*/
SET @DbName = '';

/*GENERATE LIST TABLE OF DATABASE NAMES*/
INSERT INTO @Database (DbName)
SELECT NAME
FROM master.dbo.sysdatabases
WHERE NAME <> 'tempdb'
ORDER BY NAME ASC;

/*GENERATE LIST OF USERS FOR EACH DATABASE*/
WHILE @DbName IS NOT NULL
BEGIN
	SET @DbName = (
			SELECT MIN(DbName)
			FROM @Database
			WHERE DbName > @DbName
			)
	SET @sqlCommand = '
INSERT INTO #User (
DbName
,UserName
)
SELECT ' + '''' + @DbName + '''' + ' AS DbName 
,princ.name AS UserName
FROM ' + QUOTENAME(@DbName) + '.sys.database_principals AS princ
WHERE princ.name IS NOT NULL
';

	/*OPTION TO LIMIT USERS TO ONLY THE USERS SPECIFIED*/
	IF EXISTS (
			SELECT *
			FROM #TheseUsersOnly
			)
	BEGIN
		SET @sqlCommand = @sqlCommand + 'AND princ.Name IN (SELECT UserName FROM #TheseUsersOnly);'
	END
	ELSE
	BEGIN
		SET @sqlCommand = @sqlCommand + ';'
	END

	/*ERROR HANDLING*/
	BEGIN TRY
		EXEC (@sqlCommand)
	END TRY

	BEGIN CATCH
		INSERT INTO #ErrorTable
		SELECT (@sqlCommand)
	END CATCH
END;

/*REORDER USER TABLE BY NAME ALPHABETICALLY ASCENDING*/
INSERT INTO #UserOrdered (
	DbName
	,UserName
	)
SELECT DbName
	,UserName
FROM #User
ORDER BY UserName ASC
	,DbName ASC;

/*SET LOOP VARIABLES*/
SET @y = 0;
SET @i = 1;
SET @z = (
		SELECT COUNT(*)
		FROM #UserOrdered
		);

/*LOOP TO GENERATE SQL CODE*/
WHILE @i <= @z
BEGIN
	SELECT @DbName = DbName
		,@UserName = UserName
	FROM #UserOrdered
	WHERE Id = @i

	IF @y = 0
		SET @j = 1;

	BEGIN
		SET @y = (
				SELECT COUNT(UserName)
				FROM #UserOrdered
				WHERE UserName = @UserName
				)
	END

	SET @sqlCommand = 'USE [' + @DbName + '];
IF EXISTS (SELECT * FROM ' + QUOTENAME(@DbName) + '.sys.database_principals WHERE name = N' + '''' + @UserName + '''' + ')
BEGIN DROP USER [' + @UserName + '] END;
';

	/*ERROR HANDLING*/
	BEGIN TRY
		INSERT INTO #Return
		SELECT (@sqlCommand)
	END TRY

	BEGIN CATCH
		INSERT INTO #ErrorTable
		SELECT (@sqlCommand)
	END CATCH

	IF @j = @y
	BEGIN
		IF EXISTS (
				SELECT NAME
				FROM master.sys.server_principals
				WHERE NAME = @UserName
				)
		BEGIN
			INSERT INTO #Return (SqlCommand)
			SELECT 'USE [master];
IF EXISTS (SELECT NAME
FROM master.sys.server_principals
WHERE NAME = ' + '''' + @UserName + '''' + '
) BEGIN
DROP LOGIN [' + @UserName + '] END;'
		END

		SET @y = 0;
	END

	SET @i = @i + 1;
	SET @j = @j + 1;
END

/*RESULTS*/
SELECT SqlCommand
FROM #ErrorTable;

SELECT SqlCommand
FROM #Return;

/*HOUSEKEEPING*/
IF OBJECT_ID('tempdb..#ErrorTable') IS NOT NULL
BEGIN
	DROP TABLE #ErrorTable
END;

IF OBJECT_ID(N'tempdb..#User') IS NOT NULL
BEGIN
	DROP TABLE #User
END;

IF OBJECT_ID(N'tempdb..#UserOrdered') IS NOT NULL
BEGIN
	DROP TABLE #UserOrdered
END;

IF OBJECT_ID(N'tempdb..#TheseUsersOnly') IS NOT NULL
BEGIN
	DROP TABLE #TheseUsersOnly
END;

IF OBJECT_ID('tempdb..#Return') IS NOT NULL
BEGIN
	DROP TABLE #Return
END;

 

How to clear SQL Server backup and job history

SQL Server makes a record of each backup and restore it performs. This information is recorded across the following tables:

msdb.dbo.backupfile
msdb.dbo.backupfilegroup
msdb.dbo.backupmediafamily
msdb.dbo.backupmediaset
msdb.dbo.backupset
msdb.dbo.restorefile
msdb.dbo.restorefilegroup
msdb.dbo.restorehistory

There is no cleanup process that automatically clears these records down so as you can imagine over time it could grow to be substantial.

Before you upgrade a SQL Server instance if any of these tables exceeds 10,000 rows this can cause an upgrade to hang so it is recommended the older records be deleted.

To reduce rows in all of these tables, you can run the sp_delete_backuphistory stored procedure. This procedure deletes the entries in all of the backup and restore history tables for backup sets older than a specified date. An example is shown below:

EXEC msdb..sp_delete_backuphistory ‘20161201 12:00:00.000’

Similarly to remove records from msdb.dbo.sysjobhistory you can run the following example as shown below:

EXEC msdb..sp_purge_jobhistory @oldest_date = ‘20161201 12:00:00.000’

It is recommended you set up a job to clear these tables however how often you clear them requires some thought. For instance you may want to keep, or archive, the job history for performance comparisons over time or as a record of job failures.

Another example is I use a stored procedure that automatically generates TSQL to perform restores. This stored procedure uses the records in the backup history tables to generate the TSQL. Therefore I need to be careful I do not delete records that are within our backup file retention time period otherwise I would not be able to generate a restore script for those backup files using the stored procedure.

You might be reliant on these tables as well in some way so keep this in mind.

How to size transaction log files appropriately and reduce the number of virtual log files

Sizing transaction log files and resolving the issue of having too many virtual log files may or may not be caused by the same problem but both have similar solutions revolving around the same steps. As such the script provide below can resolve both but the application of the script differs slightly.

Appropriately sizing the Transaction Log (TLog) File

Ideally, TLog files should be created at their desired size at installation rather than being allowed to grow slowly over time.

Typically though the DBA will not know how large or frequent the transactions will be prior to the database becoming active in the production environment.

Or the DBA might be new to the organisation and may not be able to tell if the TLog file size is appropriate for the typical transactions the database handles or is the result of some abnormally large transactions or failed TLog backups.

All this can of course be investigated somewhat but if the database transactions are in the kilobytes and transaction frequency is low then you could also take the pragmatic approach outlined below:

  1. Backup the log until it gets truncated
  2. Shrink the log as much as possible
  3. Allow the file to grow
  4. Record the maximum size of the log
  5. Backup the log until it gets truncated
  6. Shrink the log as much as possible
  7. Manually expand the log to the maximum size recorded in step 4

Carrying out the steps above will also resolve any issues with excessive Virtual Log Files. This is dealt with in more detail below.

The script provided at the end of this article can be utilized to achieve the steps above, see the instructions “How to use the script” below.

Before proceeding with this approach however ensure the following points have been considered as these will minimize the impact of growing the files on the databases:

  • That the autogrowth increment for the transaction log is suitably large, @DefaultFileGrowth is set at 128 Mb. This value or higher is recommended.
  • Appropriate restriction settings for growth, preferably unrestricted
  • Instant file initialization is on.

Review the following article to set up instant file initialization.

https://www.brentozar.com/blitz/instant-file-initialization/

You may already know that the TLog file has been appropriately sized but there are too many VLFs in which case the following section outlines how to resolve VLFs as an issue.

Dealing with Virtual Log Files (VLFs)

Each Transaction Log (TLog) file is divided logically into smaller segments called VLFs. VLFs are the unit of truncation for the TLog. When a VLF no longer contains log records for active transactions, it can be truncated and the space becomes available to log new transactions.

SQL Server should not have an excessive number of VLFs inside the TLog. Having a large number of small VLFs can cause the following problems:

  • Slow down the recovery process on startup or after restoring a backup
  • Slow down TLog backups
  • Affect insert/update/delete performance

What constitutes too many VLFs as a problem is subjective however Microsoft flags 1000+ VLFs as an issue.

The number of VLFs can grow based on the autogrowth settings for the TLog file and how often active transactions are written to disk. The problem of too many VLFs is often the result of accepting the default settings for initial log file size and autogrowth increment when the database is created.

To check how many VLFs a database has run the below command against the database:

DBCC LOGINFO;

Each row returned represents a VLF.

To resolving too many VLFs unfortunately requires some downtime. The fix is to shrink and regrow the log file. This is a blocking operation and I would suggest once complete you run a full backup on the database\s as the shrink will break the continuity of the Full and TLog backup schedule which will cause the TLog backups to fail.

Taking the following steps will reduce the number of VLFs in the TLog of a database:

  1. Record the maximum size of the log
  2. Backup the log until it gets truncated
  3. Shrink the log as much as possible
  4. Manually expand the log to the maximum size recorded in the first step

The script provided at the end of this article can be utilized to achieve the steps above, see the instructions “How to use the script” below.

How to use the script:

The script below will output the TSQL to run against all the databases in your instance. If you want to address every database on the instance run the entire script during scheduled downtime. To run against a specific database simply run the snippet that relates to that database.

The role of Backups

The TLog needs to be backed up and truncated prior to the running the shrink to get the file as small as possible.

Backup Option 1:

The script will backup the TLog\s if you provide a value for the @BackupLocation variable. This is defaulted to ‘C:\SQL_Server\Backup\’.

Backup Option 2: 

Alternatively you can set  @BackupLocation = NULL and run your own TLog backup maintenance plan or maintenance script prior to running the script.

Future Proofing by adding a margin for growth:

The script variable @Margin will allow you to set a percentage of how much larger you want the new TLog file size base on the current size. So for a database with a current TLog file size of 100 Mb, running the script with @Margin = 10 will output TSQL to change the TLog file size to 110 Mb. This has increased the TLog File size by 10%, providing 10% additional disk space before the TLog would need to grow again.

Appropriate TLog File Size Instructions:

Once you have backups arranged using one the options above set the variable @LogSizeMb = ‘128’. And run the script. This will start the TLog file size at 128 Mb.

Take the TSQL Generated and run the snippets needed for particular databases or run the entire script.

Let the file grow over a period that is representative of repeated database transcation activity.

When the TLog file is no longer growing you have found the natural size of the TLog for that database.

Run the script again this time setting @LogSizeMb = 0 and if appropriate provide a value for @Margin. The TSQL generated will then use the current TLog file size as is, plus the optional margin.

Run the snippets for the required database\s.

Dealing with Virtual Log Files (VLFs) Instructions:

Once you have backups arranged using one the options above, ensure that the variable @LogSizeMb is set to zero and if appropriate provide a value for @Margin. Then the script can be run to generate the TSQL needed to shrink and resize the TLog file size reducing the number of VLFs.

Run the snippets for the required database\s.

USE master;
GO

SET QUOTED_IDENTIFIER ON
GO

SET ARITHABORT ON
GO

DECLARE @DbName NVARCHAR(255)
	,@LogicalFileName NVARCHAR(255)
	,@DBRecoveryDesc VARCHAR(200)
	,@LogSizeMb INT
	,@DefaultFileGrowth VARCHAR(10)
	,@BackupLocation VARCHAR(255)
	,@GetDate VARCHAR(23)
	,@Margin AS DECIMAL(5, 2)

SELECT @Margin = 0
	,@LogSizeMb = 0
	,@BackupLocation = 'C:\SQL_Server\Backup\'
	,@DefaultFileGrowth = '128'
	,@GetDate = CONVERT(VARCHAR(23), GETDATE(), 126)
	,@GetDate = REPLACE(REPLACE(REPLACE(REPLACE(@GetDate, '-', ''), ':', ''), 'T', '_'), '.', '')

DECLARE DatabaseList CURSOR
FOR
SELECT d.NAME
	,d.recovery_model_desc AS DBRecoveryDesc
	,CASE 
		WHEN @LogSizeMb > 0
			THEN @LogSizeMb
		ELSE CEILING((mf.size * 8.0) / 1024) + (CEILING(((mf.size * 8.0) / 1024) * (@Margin / 100)))
		END AS LogSizeMb
	,@BackupLocation AS BackupLocation
	,@DefaultFileGrowth AS DefaultFileGrowth
FROM sys.databases AS d
LEFT JOIN sys.master_files AS mf ON d.database_Id = mf.database_Id
WHERE d.state_desc = 'ONLINE'
	AND d.is_read_only = 0
	AND d.database_id > 4
	AND type_desc = 'LOG'
ORDER BY NAME

OPEN DatabaseList

FETCH NEXT
FROM DatabaseList
INTO @DbName
	,@DBRecoveryDesc
	,@LogSizeMb
	,@BackupLocation
	,@DefaultFileGrowth

WHILE @@FETCH_STATUS = 0
BEGIN
	SET @LogicalFileName = (
			SELECT TOP 1 NAME
			FROM sys.master_files AS mf
			WHERE DB_NAME(database_id) = @DbName
				AND type_desc = 'LOG'
			)

	IF @DBRecoveryDesc = 'Full'
		AND @BackupLocation IS NOT NULL
	BEGIN
		PRINT (
				'Use [' + @DbName + '] 
            GO  
			
			BACKUP LOG [' + @DbName + '] TO DISK=''' + @BackupLocation + @DbName + '_' + @GetDate + '_Log.trn' + ''';
			GO
			
            ALTER DATABASE [' + @DbName + '] SET RECOVERY SIMPLE WITH NO_WAIT
            GO   

            DBCC SHRINKFILE (''' + @LogicalFileName + ''',10)  
            GO  
            
            ALTER DATABASE [' + @DbName + '] MODIFY FILE (NAME=''' + @LogicalFileName + ''', SIZE=' + CONVERT(VARCHAR(10), @LogSizeMb) + 'MB, MAXSIZE=UNLIMITED, FILEGROWTH=' + @DefaultFileGrowth + 'MB);
            GO

            ALTER DATABASE [' + @DbName + '] SET RECOVERY FULL WITH NO_WAIT
            GO '
				)
		PRINT '----------------------------------------------------------- '
	END
	ELSE IF @DBRecoveryDesc = 'Full'
	BEGIN
		PRINT (
				'Use [' + @DbName + '] 
            GO  
					
            ALTER DATABASE [' + @DbName + '] SET RECOVERY SIMPLE WITH NO_WAIT
            GO   

            DBCC SHRINKFILE (''' + @LogicalFileName + ''',10)  
            GO  
            
            ALTER DATABASE [' + @DbName + '] MODIFY FILE (NAME=''' + @LogicalFileName + ''', SIZE=' + CONVERT(VARCHAR(10), @LogSizeMb) + 'MB, MAXSIZE=UNLIMITED, FILEGROWTH=' + @DefaultFileGrowth + 'MB);
            GO

            ALTER DATABASE [' + @DbName + '] SET RECOVERY FULL WITH NO_WAIT
            GO '
				)
		PRINT '----------------------------------------------------------- '
	END
	ELSE IF @DBRecoveryDesc = 'Simple'
	BEGIN
		PRINT (
				'Use [' + @DbName + ']  
            GO  

            DBCC SHRINKFILE (''' + @LogicalFileName + ''',10)    
            GO
            
            ALTER DATABASE [' + @DbName + '] MODIFY FILE (NAME=''' + @LogicalFileName + ''', SIZE=' + CONVERT(VARCHAR(10), @LogSizeMb) + 'MB, MAXSIZE=UNLIMITED, FILEGROWTH=' + @DefaultFileGrowth + 'MB);
            GO
            '
				)
		PRINT '----------------------------------------------------------- '
	END

	FETCH NEXT
	FROM DatabaseList
	INTO @DbName
		,@DBRecoveryDesc
		,@LogSizeMb
		,@BackupLocation
		,@DefaultFileGrowth
END

CLOSE DatabaseList

DEALLOCATE DatabaseList
Five Database icons arranged in a star with the largest one in the center

How to identify all user objects in the SQL Server Master database

What is the SQL Server Master Database used for?

The master database is used by SQL Server to contain all of the system level information:

  • Logins
  • Linked servers
  • Endpoints
  • Configuration settings
  • Information about the other databases on this instance and the location of their files

If the master database is not present, SQL Server cannot start.

This is way you should always take regular backups of the master database.
as if SQL Server suffers a failure, those changes will be lost and you’ll be in a lot of trouble.

So the master database is needed for SQL Server to work. Logic and objects needed to enable your application should not be tangled up in there so you should always avoid creating objects in the master database. Use a user database instead.

In saying that everyone has made the mistake of not specifying the database they want to USE in a script and by default objects get written to the Master database. Or you might have inherited the responsibility for an application which did not conform to best practices and it uses objects created in Master.

To gain visibility of these objects just run the script below.

SELECT o.object_id AS ObjectId
	,o.NAME AS ObjectName
	,o.type_desc AS ObjectType
	,o.create_date AS CreateDate
	,o.modify_date AS ModifyDate
	,SUM(st.row_count) AS RowCnt
	,CAST(SUM(st.used_page_count) / 128.0 AS DECIMAL(36, 1)) AS DataSize_MB
FROM master.sys.objects o
LEFT JOIN master.sys.dm_db_partition_stats st ON st.object_id = o.object_id
	AND st.index_id < 2
GROUP BY o.object_id
	,o.NAME
	,o.type_desc
	,o.create_date
	,o.modify_date
	,o.is_ms_shipped
HAVING o.is_ms_shipped = 0
	AND o.NAME <> 'sp_ssis_startup'
	AND o.type_desc NOT LIKE '%CONSTRAINT%'
ORDER BY CAST(SUM(st.used_page_count) / 128.0 AS DECIMAL(36, 1)) DESC
	,RowCnt DESC

How to determine the index fragmentation level on your SQL Server instance

Below is a script that will run against every table in every database on your Sql Server instance and output, among other things, the average index fragmentation percentage.

It can be a bit heavy when running against large databases with thousands of tables so I would recommend running it during downtime or outside office hours.

This script returns fragmentation stats by utilizing a DMV.

When Nulls are presented as parameters this DMV works at a server level and uses preset defaults.

[sys].[dm_db_index_physical_stats](NULL, NULL, NULL, NULL, NULL)

However it also accepts the parameter options below to limited the results returned.

[sys].[dm_db_index_physical_stats](Database_Id, Object_Id, Index_Id, partition_number, MODE)

MODE OPTIONS:

Mode specifies the scan level that is used to obtain statistics. Valid inputs are DEFAULT, NULL, LIMITED, SAMPLED, or DETAILED. The default (NULL) is LIMITED.

DEFAULT will have 12 columns full of data and then NULLS in the remainder.
SAMPLED will have 21 columns full of data.
LIMITED will have 12 columns of data and the NULLS in the remainder.
DETAILED will have 21 columns full of data.

Note: I’ve capture more fields in the temp table #Index than I use in the final select statement. This is because you might want to include additional index related fields i.e. check if the index is unique etc. If all you want is the index name in the final select statement you can tidy up the query as needed and exclude the unwanted additional fields.

SET NOCOUNT ON

IF OBJECT_ID('tempdb..#Schema') IS NOT NULL
	DROP TABLE #Schema

IF OBJECT_ID('tempdb..#Index') IS NOT NULL
	DROP TABLE #Index

DECLARE @Sql AS VARCHAR(MAX)
DECLARE @Database TABLE (
	DbId INT
	,DbName SYSNAME
	)
DECLARE @DbName AS SYSNAME
DECLARE @DbId AS INT

CREATE TABLE #Schema (
	DbId INT
	,DbName SYSNAME
	,Object_Id INT NULL
	,ObjectName SYSNAME NULL
	,TableFlag BIT
	,ViewFlag BIT
	,Schema_Id INT NULL
	,SchemaName SYSNAME NULL
	)

CREATE TABLE #Index (
	DbId INT
	,DbName SYSNAME
	,Object_Id INT NOT NULL
	,NAME SYSNAME NULL
	,index_id INT NOT NULL
	,type TINYINT NOT NULL
	,type_desc NVARCHAR(60) NULL
	,is_unique BIT NULL
	,data_space_id INT NULL
	,ignore_dup_key BIT NULL
	,is_primary_key BIT NULL
	,is_unique_constraint BIT NULL
	,fill_factor TINYINT NOT NULL
	,is_padded BIT NULL
	,is_disabled BIT NULL
	,is_hypothetical BIT NULL
	,allow_row_locks BIT NULL
	,allow_page_locks BIT NULL
	,has_filter BIT NULL
	,filter_definition NVARCHAR(max) NULL
	)

INSERT INTO @Database (
	DbId
	,DbName
	)
SELECT dbid
	,NAME
FROM master.dbo.sysdatabases
WHERE NAME <> 'tempdb'
ORDER BY NAME ASC

SET @DbId = 0

WHILE @DbId IS NOT NULL
BEGIN
	SET @DbId = (
			SELECT MIN(DbId)
			FROM @Database
			WHERE DbId > @DbId
			)
	SET @DbName = (
			SELECT DbName
			FROM @Database
			WHERE DbId = @DbId
			)
	SET @Sql = '
	INSERT INTO #Schema (
		DbId
		,DbName
		,Object_Id
		,ObjectName
		,TableFlag
		,ViewFlag
		,Schema_Id
		,SchemaName
		)
	SELECT ' + CONVERT(VARCHAR(128), @DbId) + ' AS DbId
		,' + '''' + @DbName + '''' + ' AS DbName
		,o.Object_Id
		,o.NAME AS ObjectName
		,CASE 
			WHEN o.type = ''IT''
				THEN 1
			WHEN o.type = ''S''
				THEN 1
			WHEN o.type = ''TT''
				THEN 1
			WHEN o.type = ''U''
				THEN 1
			ELSE NULL
			END AS TableFlag
		,CASE 
			WHEN o.type = ''V''
				THEN 1
			ELSE NULL
			END AS ViewFlag
		,s.Schema_Id
		,s.NAME AS SchemaName
	FROM ' + QUOTENAME(@DbName) + '.sys.objects AS o WITH (NOLOCK)
	INNER JOIN ' + QUOTENAME(@DbName) + '.sys.schemas AS s WITH (NOLOCK) ON o.schema_id = s.schema_id
	'

	EXEC (@Sql)

	SET @Sql = '
	INSERT INTO #Index (
	DbId
	,DbName
	,object_id
	,NAME
	,index_id
	,type
	,type_desc
	,is_unique
	,data_space_id
	,ignore_dup_key
	,is_primary_key
	,is_unique_constraint
	,fill_factor
	,is_padded
	,is_disabled
	,is_hypothetical
	,allow_row_locks
	,allow_page_locks
	,has_filter
	,filter_definition
	)
	SELECT ' + CONVERT(VARCHAR(128), @DbId) + ' AS DbId
		,' + '''' + @DbName + '''' + ' AS DbName
		,object_id
		,NAME
		,index_id
		,type
		,type_desc
		,is_unique
		,data_space_id
		,ignore_dup_key
		,is_primary_key
		,is_unique_constraint
		,fill_factor
		,is_padded
		,is_disabled
		,is_hypothetical
		,allow_row_locks
		,allow_page_locks
		,has_filter
		,filter_definition
	FROM ' + QUOTENAME(@DbName) + '.sys.indexes WITH (NOLOCK)
	'

	EXEC (@Sql)
END

SELECT ROW_NUMBER() OVER (
		ORDER BY s.DbName ASC
			,s.SchemaName ASC
			,s.ObjectName
		) AS Row
	,s.DbName
	,s.SchemaName AS SchemaName
	,s.Object_Id AS ObjectId
	,s.ObjectName
	,s.TableFlag
	,s.ViewFlag
	,i.Name AS IndexName
	,ddips.index_type_desc AS IndexType
	,ddips.page_count AS Pages
	,ddips.record_count AS Records
	,ddips.fragment_count AS Frags
	,ddips.avg_fragmentation_in_percent AS AvgFragPercent
FROM sys.dm_db_index_physical_stats(NULL, NULL, NULL, NULL, 'SAMPLED') AS ddips
INNER JOIN #Schema AS s ON ddips.database_id = s.DbId
INNER JOIN #Index AS i ON s.DbId = i.DbId
	AND s.Object_Id = i.Object_Id
	AND ddips.index_id = i.index_id
	AND ddips.object_id = s.Object_Id
WHERE (
		s.TableFlag = 1
		OR s.ViewFlag = 1
		)
ORDER BY s.DbName ASC
	,s.SchemaName ASC
	,s.ObjectName

How to export a table as an XML file using a SQL script

Below is a script that will allow you, as the title suggests, to export a table as an XML file.

By use of the find and replace function in your preferred text editor, or by way of the text editor in SSMS, input your values for the required fields, i.e. replace TableName with the name of the table you will be working off.

SERVER NAME: ServerName

DATABASE NAME: DatabaseName

SCHEMA NAME: SchemaName

TABLE NAME: TableName

SAVE LOCATION ADDRESS: AddressName

FILE NAME: FileName (Excluding .xml extension)

Then run the script below and the table will be exported as an XML file to the location and file name of your choosing.

/*
FIND AND REPLACE:

SERVER NAME: ServerName
DATABASE NAME: DatabaseName 
SCHEMA NAME: SchemaName
TABLE NAME: TableName
SAVE LOCATION ADDRESS: AddressName
FILE NAME: FileName
 */
-- Run on ServerName
USE DatabaseName;
GO

DECLARE @saveLocation AS VARCHAR(510)
DECLARE @nameOfFile AS VARCHAR(255)
DECLARE @instanceName AS VARCHAR(255)
DECLARE @cmd NVARCHAR(1020)

SET @saveLocation = 'AddressName'
SET @nameOfFile = 'FileName'
SET @saveLocation = @saveLocation + '\' + @nameOfFile + '.xml'
SET @instanceName = (
		SELECT @@servername
		)

-- Please note that the fully qualified table name is needed
SELECT @cmd = '
    bcp "SELECT * FROM [DatabaseName].[SchemaName].[TableName] row for xml auto, root(''rows''), elements" ' + 'queryout "' + @saveLocation + '" -S ' + @instanceName + ' -T -w -r -t';

EXEC xp_cmdshell @cmd;
GO

 

This script is enabled by two utilities xp_cmdshell and BCP.

For an explanation of each please view the associated links.