Okay, so, this is HIGHLY inadvised to actually use in any kind of production environment, but was a fun lil challenge and works for your test data. I strongly recommend you simply look for alternative solution to the entire problem that has led you to be holding where clauses in a database table.
This will not work for where clauses that have . characters within text strings, nor will any other solution that relies on splitting the string by . characters without a lot of effort to check of that character is a part of a string value.
Utilising Jeff Moden's string splitting function you can do the following:
declare @StringRetrival table(ID int,Condition varchar(4000));
insert into @StringRetrival(ID,Condition) values
(1,N'ISNULL(ABC.Premium, 0) < 0,ISNULL(ABC.Date, 101) < 19')
,(2,N'ISNULL(DEF.ColB, 101) < 25,ISNULL(DEF.ColB, 101) < 25,ISNULL(XYZ.ColB, 101) > 5, MSN.ColA < 5')
,(3,N'RTY.ColA');
with s1 as
(
select r.ID
,r.Condition
,s.ItemNumber
,max(s.ItemNumber) over (partition by r.ID) as MaxItemNumber
,reverse(s.Item) as Item
from @StringRetrival r
cross apply dbo.DelimitedSplit8K(r.Condition,'.') s
),s2 as
(
select r.ID
,r.Condition
,s.ItemNumber
,max(s.ItemNumber) over (partition by r.ID) as MaxItemNumber
,reverse(s.Item) as Item
from @StringRetrival r
cross apply dbo.DelimitedSplit8K(reverse(r.Condition),'.') s
)
select distinct s1.ID
,reverse(left(s1.Item,patindex('%[^a-zA-Z]%',s1.Item + ',')-1)) + '.' + left(s2.Item,patindex('%[^a-zA-Z]%',s2.Item + ',')-1) as Col
from s1
join s2
on s1.ID = s2.ID
and s1.ItemNumber = s2.ItemNumber
where s1.ItemNumber <> s1.MaxItemNumber
order by s1.ID;
Which will output:
+----+-------------+
| ID | Col |
+----+-------------+
| 1 | ABC.Date |
| 1 | ABC.Premium |
| 2 | DEF.ColA |
| 2 | DEF.ColB |
| 2 | MSN.ColB |
| 2 | XYZ.ColB |
| 3 | RTY.ColA |
+----+-------------+
The SQL to create the splitting function:
CREATE FUNCTION [dbo].[DelimitedSplit8K]
--===== Define I/O parameters
(@pString VARCHAR(8000), @pDelimiter CHAR(1))
--WARNING!!! DO NOT USE MAX DATA-TYPES HERE! IT WILL KILL PERFORMANCE!
RETURNS TABLE WITH SCHEMABINDING AS
RETURN
--===== "Inline" CTE Driven "Tally Table" produces values from 1 up to 10,000...
-- enough to cover VARCHAR(8000)
WITH E1(N) AS (
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1
), --10E+1 or 10 rows
E2(N) AS (SELECT 1 FROM E1 a, E1 b), --10E+2 or 100 rows
E4(N) AS (SELECT 1 FROM E2 a, E2 b), --10E+4 or 10,000 rows max
cteTally(N) AS (--==== This provides the "base" CTE and limits the number of rows right up front
-- for both a performance gain and prevention of accidental "overruns"
SELECT TOP (ISNULL(DATALENGTH(@pString),0)) ROW_NUMBER() OVER (ORDER BY (SELECT NULL)) FROM E4
),
cteStart(N1) AS (--==== This returns N+1 (starting position of each "element" just once for each delimiter)
SELECT 1 UNION ALL
SELECT t.N+1 FROM cteTally t WHERE SUBSTRING(@pString,t.N,1) = @pDelimiter
),
cteLen(N1,L1) AS(--==== Return start and length (for use in substring)
SELECT s.N1,
ISNULL(NULLIF(CHARINDEX(@pDelimiter,@pString,s.N1),0)-s.N1,8000)
FROM cteStart s
)
--===== Do the actual split. The ISNULL/NULLIF combo handles the length for the final element when no delimiter is found.
SELECT ItemNumber = ROW_NUMBER() OVER(ORDER BY l.N1),
Item = SUBSTRING(@pString, l.N1, l.L1)
FROM cteLen l
GO
wherecondition isRTY.ColA = '.Text.MoreText.EvenMoreText.'?