summaryrefslogtreecommitdiff
path: root/arch/arm/mach-tegra/nvrm/io/ap15/nvrm_dma.c
blob: a08f1d47a98b78153131f77eb025b99353b0d1a5 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
/*
 * Copyright (c) 2007-2009 NVIDIA Corporation.
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *
 * Redistributions of source code must retain the above copyright notice,
 * this list of conditions and the following disclaimer.
 *
 * Redistributions in binary form must reproduce the above copyright notice,
 * this list of conditions and the following disclaimer in the documentation
 * and/or other materials provided with the distribution.
 *
 * Neither the name of the NVIDIA Corporation nor the names of its contributors
 * may be used to endorse or promote products derived from this software
 * without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 *
 */

/**
 * @file
 * @brief <b>nVIDIA Driver Development Kit:
 *           DMA Resource manager </b>
 *
 * @b Description: Implements the interface of the NvRM DMA. This files
 *  implements the API for the dma for the AP15 Dma controller.
 *
 * This file contains the common code for ap15 dma controller to manage
 * the different operation of the dma.
 */

/**
 *                Dma Design details
 *                ------------------
 * 1. There is two type of dma allocation i.e low priority and high priority
 * dma. The low prioirty allocation shares the same dma channel between different
 * client. The high prioirty allocation does not share the dma channel and the
 * dma channel is used by the requestd clients only. Hence, the high priority
 * dma allocation may fail if there is no channel for the allocation but the low
 * priority channel allocation will not fail till we have the sufficient memory
 * for the dma handle creation.
 *
 * 2. The dma allocation is done based on the requestor module Id. It only
 * support the dma transfer from teh memory to the apb peripheral or vice versa.
 *
 * 3. The DmaTransfer transfers the data from source to dest and dest to source
 * based on the direction passed. It may be possible to do the dma transfer
 * from destination to source address by passing the dma direction as reverse.
 *
 * 4. The destination and source address may be any type like peripheral or
 * memory or xmb memory. There is no restriction on passing the source/destn
 * address by the client. The implementation will take care of proper
 * configuration of the dma register address.
 *
 * 5. It may be possible to free the dma when transfer is going on.
 * In this case, the dma will be free for the another allocation once the
 * transfer completes. The dma handle will be destroyed immediately for the
 * client.
 *
 * 6. It is possible to abort the dma transfer for both type of dma, high
 * priority and low priority. In this case, the dma transfer will be immediatly
 * stops if the transfer is going on for the requestor client and all  dma
 * request will be aborted.
 *
 * 7. The client can request for any ammount of the data transfer. If dma is not
 * capable of transferring the data in one transaction, it will do the multiple
 * transaction internally and will notify the client after last transaction.
 *
 *
 *                Implementation details
 *                ----------------------
 *  1. The implementation should support any number of the apb dma
 * channel on run time. There should not be any static allocation till it
 * very necessarily. It does not support the ahb dma.
 *
 * 2. 1 dma channel allocated for the  low priority dma channel allocation to
 * allocate the low priority dma handle. These channes are shared between the
 * low priority reqestor clients.
 *
 * 3. The client will abort the dma request done by him only. It can not cancel
 * the request done by other clients.
 *
 * 4. Dma Request can be queued and there is not any limitation to queue the
 * request till we have the sufficient memory from the os.
 *
 * 5. It supports the synchrnous and asynchrnous, both type of the operation.
 *
 * 6. For each dma channel, it allocates the memory for keeping the client
 * request.
 * if the number of request is more than the allocated number of list then it
 * again reallocate the memory for the new request and free the already allocated
 * list. The old request transferered to the new allocated list. the benifit
 * of this type of method is that we need not to do the allocation to queue the
 * request for each transfer request. In this way we can avoid the memory
 * allocation and freeing of the memory for the each time.
 * We start the allocation of memory from n and if the number of request is more
 * than this (n) then reallocation is done for the (n +n) request and if it is
 * full then again reallocation is done for the (2n + 2n). In this way the order
 * of allocation is Log(n).
 *
 * 7. All apb dma channel inetrrupt is handle in single isr.
 * The detection of the interrupted dma channel is done by scanning all the dma
 * channels one by one.
 *
 * 8. The apb dma hw control api is called using the function pointer. So
 * whenever there is difefrence in the handling of the dma request for dma
 * channel, it uses the dma hw interface.
 *
 * 9. I2s channels related request will use the continuous double buffering.
 * Uart receive (from fifo to memory) will use the continuous double buffering
 * on same buffer.
 *
 */

#include "nvrm_dma.h"
#include "nvrm_interrupt.h"
#include "nvrm_power.h"
#include "nvrm_moduleids.h"
#include "nvrm_hardware_access.h"
#include "rm_dma_hw_private.h"
#include "nvassert.h"
#include "nvrm_priv_ap_general.h"
#include "mach/nvrm_linux.h"

/* FIXME move these to some header file */
NvError NvRmPrivDmaInit(NvRmDeviceHandle hDevice);
void NvRmPrivDmaDeInit(void);
NvError NvRmPrivDmaSuspend(void);
NvError NvRmPrivDmaResume(void);

#define MAX_AVP_DMA_CHANNELS 3

// DMA capabilities -- these currently do not vary between chips

// Maximum dma transfer size for one transfer.
#define DMA_MAX_TRANSFER_SIZE       0x10000

// Address allignment  reequirement for the dma buffer address
#define DMA_ADDRESS_ALIGNMENT       4

// Transfer size allignment  for the dma transfer.
#define DMA_TRANSFER_SIZE_ALIGNMENT 4

// Dma transfer request depth for initial req depth
#define DMA_TRANSFER_REQ_DEPTH 16

// The end index of the list
#define DMA_NULL_INDEX 0xFFFF

// Defines the dma request states.
typedef enum
{
    // The request has not been started.
    RmDmaRequestState_NotStarted = 0x1,

    // The request is running state.
    RmDmaRequestState_Running ,

    // The request is completed state.
    RmDmaRequestState_Completed ,

    // The request is stopped state.
    RmDmaRequestState_Stopped,

    // The request is unused state.
    RmDmaRequestState_Unused,

    RmDmaRequestState_Force32 = 0x7FFFFFFF
} RmDmaRequestState;

// Defines the dma channel allocation state.
typedef enum
{
    // Dma channel is free and available for the allocation.
    RmDmaChannelState_Free = 0x1,

    // The dma channel is free from the client but still it has the request
    // for the data transfer.
    RmDmaChannelState_MarkedFree ,

    // Dma channel is used by the client.
    RmDmaChannelState_Used,

    RmDmaChannelState_Force32 = 0x7FFFFFFF
} RmDmaChannelState;

// Defines the dma channel transfer mode and property.
typedef enum
{
    // initial value of the states.
    RmDmaTransferMode_Init = 0x0,

    // Dma channel transfer mode is continuous.
    RmDmaTransferMode_Continuous = 0x1,

    // Dma channel transfer mode is Double buffering.
    RmDmaTransferMode_DoubleBuff = 0x2,

    // Dma channel transfer mode is to transfer the same buffer afain and again.
    RmDmaTransferMode_SameBuff = 0x4,

    // Dma channel transfer where source address is the Xmb address.
    RmDmaTransferMode_SourceXmb = 0x8,

    // Dma channel transfer where source address is the Peripheral address.
    RmDmaTransferMode_SourcePeripheral = 0x10,

    // Dma channel transfer request is asynchrnous.
    RmDmaTransferMode_Asynch = 0x20,

    // Dma channel transfer is for the pin interrupt now.
    RmDmaTransferMode_PingIntMode = 0x40,

    RmDmaTransferMode_Force32 = 0x7FFFFFFF
} RmDmaTransferMode;

/**
 * Combines the Dma transfer request information which will be queued and
 * require to start the transfer and for notification after transfer completes.
 */
typedef struct DmaTransReqRec
{
    // Unique Id
    NvU32 UniqueId;

    // Current state of the channel.
    RmDmaRequestState State;

    // The dema request transfer mode and details of the request.
    RmDmaTransferMode TransferMode;

    // The Source address for the data transfer.
    NvRmPhysAddr SourceAdd;

    // The destiniation address for the data transfer.
    NvRmPhysAddr DestAdd;

    // The source address wrapping.
    NvU32 SourceAddWrap;

    // The destination address wrapping.
    NvU32 DestAddWrap;

    // Number of bytes requested.
    NvU32 BytesRequested;

    // Number of bytes programmed for current data transfer.
    NvU32 BytesCurrProgram;

    // Number of bytes remaining to transfer.
    NvU32 BytesRemaining;

    // The configuartion of dma in terms of register content and channel
    // register info.
    DmaChanRegisters DmaChanRegs;

    // Semaphore Id which need to be signalled after completion.
    NvOsSemaphoreHandle hOnDmaCompleteSema;

    // Semaphore Id which need to be signalled after half of the transfer
    // completion.
    NvOsSemaphoreHandle hOnHalfDmaCompleteSema;

    // Semaphore Id which need to be destoyed when new request will be placed
    // by this list memory.
    NvOsSemaphoreHandle hLastReqSema;

    // Array based the double link list.
    NvU16 NextIndex;

    NvU16 PrevIndex;

} DmaTransReq;

/**
 * Combines the channel information, status, requestor information for the
 * channel dma, type of dma etc.
 */
typedef struct RmDmaChannelRec
{
    // State of the channel.
    RmDmaChannelState ChannelState;

    // Dma priority whether this is low priority channel or high prority
    // channel.
    NvRmDmaPriority Priority;

    // Pointer to the list of the transfer request.
    struct DmaTransReqRec *pTransReqList;

    // Currently maximum request possible.
    NvU16 MaxReqList;

    // Head index to the request
    NvU16 HeadReqIndex;

    // Tail Index to the request
    NvU16 TailReqIndex;

    // Head index to the free list.
    NvU16 HeadFreeIndex;

    // Mutex to provide the thread/interrupt safety for the channel specific
    // data.
    NvOsIntrMutexHandle hIntrMutex;

    // The virtual base address of the channel registers.
    NvU32 *pVirtChannelAdd;

    // Channel address bank size.
    NvU32 ChannelAddBankSize;

    // Pointer to the dma hw interface apis strcuture.
    DmaHwInterface *pHwInterface;

    // Log the last requested size
    NvU32 LastReqSize;

#if NVOS_IS_LINUX
    // Channel interrupt handle 
    NvOsInterruptHandle hIntrHandle;
#endif

} RmDmaChannel, *RmDmaChannelHandle;

/**
 * Combines the dma information
 */
typedef struct
{
    // Device handle.
    NvRmDeviceHandle hDevice;

    // Actual numbers of Apb dma channels available on the soc.
    NvU32 NumApbDmaChannels;

    RmDmaChannel *pListApbDmaChannel;

    // Apb Dma General registers
    DmaGenRegisters ApbDmaGenReg;

    // OS mutex for channel allocation and deallocation:  provide thread safety
    NvOsMutexHandle hDmaAllocMutex;
} NvRmPrivDmaInfo;

/**
 * Combines the Dma requestor and related information which is required for
 * other dma operation request.
 */
typedef struct NvRmDmaRec
{
    // Store the Rm device handle
    NvRmDeviceHandle  hRmDevice;

    // Corresponding dma channel pointer to APB dma for this handle.
    RmDmaChannel *pDmaChannel;

    // Flag to tells whether 32 bit swap is enabled or not.
    NvBool IsBitSwapEnable;

    // Unique Id
    NvU32 UniqueId;

    // Dma requestor module Id.
    NvRmDmaModuleID DmaReqModuleId;

    // dma requestor instance Id.
    NvU32 DmaReqInstId;

    // Dma register information which contain the configuration for dma when it
    // was allocated
    DmaChanRegisters DmaChRegs;

    // NvOs semaphore which will be used when synchrnous operation is requested.
    NvOsSemaphoreHandle hSyncSema;
} NvRmDma;

static NvRmPrivDmaInfo s_DmaInfo;
static DmaHwInterface s_ApbDmaInterface;
#if !NVOS_IS_LINUX
static NvOsInterruptHandle s_ApbDmaInterruptHandle = NULL;
#endif

NvU32 NvRmDmaUnreservedChannels(void)
{
    return s_DmaInfo.NumApbDmaChannels - MAX_AVP_DMA_CHANNELS -
        TEGRA_SYSTEM_DMA_CH_NUM;
}


/**
 * Deinitialize the apb dma physical/virtual addresses. This function will
 * unmap the virtual mapping.
 *
 * Thread Safety: Caller responsibility.
 */
static void DeInitDmaGeneralHwRegsAddress(void)
{
    // Unmap the virtual mapping for apb general register.
    NvRmPhysicalMemUnmap(s_DmaInfo.ApbDmaGenReg.pGenVirtBaseAdd,
                         s_DmaInfo.ApbDmaGenReg.GenAddBankSize);
    s_DmaInfo.ApbDmaGenReg.pGenVirtBaseAdd = NULL;
}

/**
 * Initialize the apb dma physical/virtual addresses. This function will get
 * the physical address of Apb dma channel from Nvrm module APIs, get the
 * virtual address.
 *
 * Thread Safety: Caller responsibility.
 */
static NvError InitDmaGeneralHwRegsAddress(void)
{
    NvError Error = NvSuccess;
    NvRmDeviceHandle hDevice = NULL;
    NvRmModuleID ModuleId;
    NvRmPhysAddr ApbPhysAddr;

    // Required the valid device handles.
    hDevice = s_DmaInfo.hDevice;

    // Get the physical base address of the apb dma controller general register.
    ModuleId = NVRM_MODULE_ID(NvRmPrivModuleID_ApbDma, 0);
    NvRmModuleGetBaseAddress(hDevice, ModuleId,
        &ApbPhysAddr, &s_DmaInfo.ApbDmaGenReg.GenAddBankSize);

    // Initialize the apb dma register virtual address.
    s_DmaInfo.ApbDmaGenReg.pGenVirtBaseAdd = NULL;

    // Get the virtual address of apb dma general base address.
    Error = NvRmPhysicalMemMap(ApbPhysAddr,
        s_DmaInfo.ApbDmaGenReg.GenAddBankSize, NVOS_MEM_READ_WRITE,
        NvOsMemAttribute_Uncached,
        (void **)&s_DmaInfo.ApbDmaGenReg.pGenVirtBaseAdd);

    return Error;
}

static NvError AllocateReqList(RmDmaChannel *pDmaChannel, NvU16 MoreListSize)
{
    NvU16 Index;
    DmaTransReq *pTransReqList = NULL;
    DmaTransReq *pExistTransReqList = pDmaChannel->pTransReqList;
    NvU32 TotalReqSize = (pDmaChannel->MaxReqList + MoreListSize);

    // Allocate the memory for logging the client requests.
    pTransReqList = NvOsAlloc(TotalReqSize * sizeof(DmaTransReq));
    if (!pTransReqList)
        return NvError_InsufficientMemory;

    NvOsMemset(pTransReqList, 0, TotalReqSize * sizeof(DmaTransReq));

    // Copy the existing request if it exist to the new allocated request list.
    if (pExistTransReqList)
    {
        NvOsMemcpy(pTransReqList, pExistTransReqList,
                    pDmaChannel->MaxReqList * sizeof(DmaTransReq));
        NvOsFree(pExistTransReqList);
    }

    for (Index = pDmaChannel->MaxReqList; Index < TotalReqSize; ++Index)
    {
        if (Index == pDmaChannel->MaxReqList)
            pTransReqList[pDmaChannel->MaxReqList].PrevIndex = DMA_NULL_INDEX;
        else
            pTransReqList[Index].PrevIndex = Index-1;

        pTransReqList[Index].NextIndex = Index + 1;
    }
    pTransReqList[Index-1].NextIndex = DMA_NULL_INDEX;
    pDmaChannel->pTransReqList = pTransReqList;
    pDmaChannel->HeadFreeIndex = pDmaChannel->MaxReqList;
    pDmaChannel->MaxReqList += MoreListSize;
    return NvSuccess;
}

/**
 * Deinitialize the Apb dma channels. It will free all the memory and resource
 * allocated for the dma channels.
 *
 * Thread Safety: Caller responsibility.
 */
static void DeInitDmaChannels(RmDmaChannel *pDmaList, NvU32 TotalChannel)
{
    NvU32 i;
    if (!pDmaList)
        return;

    for (i = 0; i < TotalChannel; i++)
    {
        RmDmaChannel *pDmaChannel = &pDmaList[i];
        if (pDmaChannel)
        {
            NvOsFree(pDmaChannel->pTransReqList);
            pDmaChannel->MaxReqList = 0;

            // Free the dma virtual maping
            NvRmPhysicalMemUnmap(pDmaChannel->pVirtChannelAdd,
                pDmaChannel->ChannelAddBankSize);
            NvOsIntrMutexDestroy(pDmaChannel->hIntrMutex);
        }
    }
    NvOsFree(pDmaList);
}

/**
 * Init Apb dma channels.It makes the list of all available dma channesl and
 * keep in the free channel list so that it will be available for the
 * allocation.
 * Once client ask for dma channel, it will look in the free list and remove the
 * channel from the free list and attach with the dma handle and keep in the
 * used list. The client data trasfer request is queued for the dma channels.
 *
 * Thread Safety: Caller responsibility.
 */
static NvError
InitDmaChannels(
    NvRmDeviceHandle hDevice,
    RmDmaChannel **pDmaChannelList,
    NvU32 TotalChannel,

    NvRmModuleID DmaModuleId)
{
    NvU32 ChanIndex;
    NvError Error = NvSuccess;
    RmDmaChannel *pDmaChannel = NULL;
    NvRmModuleID ModuleId = 0;
    NvRmPhysAddr ChannelPhysAddr;
    RmDmaChannel *pDmaList = NULL;

    // Allocate the memory to store the all dma channel information.
    pDmaList = NvOsAlloc(TotalChannel * sizeof(RmDmaChannel));
    if (!pDmaList)
        return NvError_InsufficientMemory;

    // Initialize all dma channel structure with default values.
    for (ChanIndex = 0; ChanIndex < TotalChannel; ++ChanIndex)
    {
        pDmaChannel = &pDmaList[ChanIndex];

        // Initialize all channel member to the initial known states.
        pDmaChannel->ChannelState = RmDmaChannelState_Free;
        pDmaChannel->Priority = NvRmDmaPriority_High;
        pDmaChannel->pTransReqList = NULL;
        pDmaChannel->MaxReqList = 0;
        pDmaChannel->HeadReqIndex = DMA_NULL_INDEX;
        pDmaChannel->TailReqIndex = DMA_NULL_INDEX;
        pDmaChannel->HeadFreeIndex = DMA_NULL_INDEX;
        pDmaChannel->hIntrMutex = NULL;
        pDmaChannel->pVirtChannelAdd = NULL;
        pDmaChannel->ChannelAddBankSize = 0;
        pDmaChannel->pHwInterface = &s_ApbDmaInterface;
    }

    // Allocate the resource and register address for each channels.
    for (ChanIndex = 0; ChanIndex < TotalChannel; ++ChanIndex)
    {
        pDmaChannel = &pDmaList[ChanIndex];

        // Allocate the memory for logging the client request.
        Error = AllocateReqList(pDmaChannel, DMA_TRANSFER_REQ_DEPTH);

        // Create mutex for the channel access.
        if (!Error)
            Error = NvOsIntrMutexCreate(&pDmaChannel->hIntrMutex);

        // Initialize the base address of the channel.
        if (!Error)
        {
            ModuleId = NVRM_MODULE_ID(DmaModuleId, ChanIndex);
            NvRmModuleGetBaseAddress(hDevice, ModuleId, &ChannelPhysAddr,
                                &pDmaChannel->ChannelAddBankSize);
            Error = NvRmPhysicalMemMap(ChannelPhysAddr,
                pDmaChannel->ChannelAddBankSize, NVOS_MEM_READ_WRITE,
                NvOsMemAttribute_Uncached,
                (void **)&pDmaChannel->pVirtChannelAdd);
        }
        if (Error)
            break;
    }

    if (!Error)
    {
        // Allocate last channel as a low priority request, others are
        // high priority channel
        *pDmaChannelList = (RmDmaChannel *)pDmaList;
    }
    else
    {
        DeInitDmaChannels(pDmaList, TotalChannel);
        *pDmaChannelList = (RmDmaChannel *)NULL;
    }
    return Error;
}

/**
 * Initialize the Apb dma channels.
 * Thread Safety: Caller responsibility.
 */
static NvError InitAllDmaChannels(void)
{
    NvError Error = NvSuccess;

    // Initialize the apb dma channel list.
    Error = InitDmaChannels(s_DmaInfo.hDevice, &s_DmaInfo.pListApbDmaChannel,
                s_DmaInfo.NumApbDmaChannels, NvRmPrivModuleID_ApbDmaChannel);
    return Error;
}

/**
 * Deinitialize the Apb dma channels.
 * Thread Safety: Caller responsibility.
 */
static void DeInitAllDmaChannels(void)
{
    // Deinitialize the apb dma channels.
    DeInitDmaChannels(s_DmaInfo.pListApbDmaChannel, s_DmaInfo.NumApbDmaChannels);
    s_DmaInfo.pListApbDmaChannel = NULL;
}

/**
 * DeInitialize the Dmas. It include the deinitializaton of Apb dma channels.
 * It unmap the dma register address, disable clock of dma, reset the dma,
 * destroy the dma interrupt threads and destroy the list of all channels.
 *
 * Thread Safety: Caller responsibility.
 */
static void DeInitDmas(void)
{
    // Global disable the dma channels.
    s_ApbDmaInterface.DmaHwGlobalSetFxn(s_DmaInfo.ApbDmaGenReg.pGenVirtBaseAdd,
                                                        NV_FALSE);

    // Disable the dma clocks.
    // Disable clock for the apb dma channels.
    (void)NvRmPowerModuleClockControl(s_DmaInfo.hDevice, NvRmPrivModuleID_ApbDma,
                                                        0, NV_FALSE);

    // De-Initialize of the dma channel lists.
    DeInitAllDmaChannels();
}

/**
 * Initialize the Dma. It include the initializaton of Apb dma channels.
 * It initalize the dma register address, clock of dma, do the reset of dma,
 * create the dma interrupt threads and make the list of all channels
 * for allocation.
 *
 * Thread Safety: Caller responsibility.
 */
static NvError InitDmas(NvRmDeviceHandle hRmDevice)
{
    NvError Error = NvSuccess;

    // Initialize of the dma channel lists.
    Error = InitAllDmaChannels();

    // Enable the clocks of dma channels.
    if (!Error)
        Error = NvRmPowerModuleClockControl(hRmDevice, NvRmPrivModuleID_ApbDma,
                                                        0, NV_TRUE);
    // Reset the dma channels.
    if (!Error)
        NvRmModuleReset(hRmDevice, NVRM_MODULE_ID(NvRmPrivModuleID_ApbDma, 0));

    // Global enable the dma channels.
    if (!Error)
        s_ApbDmaInterface.DmaHwGlobalSetFxn(s_DmaInfo.ApbDmaGenReg.pGenVirtBaseAdd,
                                                            NV_TRUE);

    // If error exist then disable the dma clocks.
    if (Error)
        DeInitDmas();

    return Error;
}


/**
 * Continue the current transfer by sending the next chunk of the data from the
 * current dma transfer request. This may be called when requested size is
 * larger than the supported dma transfer size in single go by hw.
 *
 */
static void ApbDmaContinueRemainingTransfer(void *pDmaChan)
{
    NvU32 CurrProgSize;
    NvU32 LastTransferSize;
    DmaTransReq  *pCurrReq = NULL;
    NvBool IsDoubleBuff;
    NvBool IsContMode;
    RmDmaChannel *pDmaChannel = (RmDmaChannel *)pDmaChan;

    pCurrReq = &pDmaChannel->pTransReqList[pDmaChannel->HeadReqIndex];

    // Get the last transfer size in bytes from the start of the source and
    // destination address
    LastTransferSize  = pCurrReq->BytesCurrProgram;

    // Calculate the possible transfer size based on remaining bytes and
    // maximum transfer size. Updates the remaining size, transfer size and
    // programmed size accordingly.
    CurrProgSize = NV_MIN(pCurrReq->BytesRemaining, DMA_MAX_TRANSFER_SIZE);

    IsDoubleBuff = (pCurrReq->TransferMode & RmDmaTransferMode_DoubleBuff)? NV_TRUE: NV_FALSE;
    IsContMode = (pCurrReq->TransferMode & RmDmaTransferMode_Continuous)? NV_TRUE: NV_FALSE;

    // Program the transfer size.
    pDmaChannel->pHwInterface->DmaHwSetTransferSizeFxn(&pCurrReq->DmaChanRegs,
                                                CurrProgSize, IsDoubleBuff);
    pDmaChannel->pHwInterface->DmaHwStartTransferWithAddIncFxn(
                    &pCurrReq->DmaChanRegs, 0, LastTransferSize, IsContMode);

    // Update the parameter which will be used in future.
    pCurrReq->BytesRemaining  -= CurrProgSize;
    pCurrReq->BytesCurrProgram = CurrProgSize;
}


/**
 * Handle the dma complete interrupt in once mode.
 *
 * Thread Safety: Caller responsibility.
 */
static void
OnDmaCompleteInOnceMode(
    RmDmaChannel *pDmaChannel,
    DmaTransReq  *pCurrReq)
{
    NvOsSemaphoreHandle hSignalSema = NULL;
    NvU16 CurrHeadIndex;

    pDmaChannel->pHwInterface->DmaHwAckNClearInterruptFxn(&pCurrReq->DmaChanRegs);

    // The transfer was in running state.
    // Check if there is data remaining to transfer or not from the
    // current request. If there is bytes remaining for data transfer
    // then continue the transfer.
    if (pCurrReq->BytesRemaining)
    {
        pDmaChannel->pHwInterface->DmaContinueRemainingTransferFxn(pDmaChannel);
        return;
    }

    pCurrReq->State = RmDmaRequestState_Completed;

    // Store the sempahore whihc need to be signal.
    hSignalSema = pCurrReq->hOnDmaCompleteSema;
    pDmaChannel->LastReqSize = pCurrReq->BytesRequested;

    // Free this index.
    CurrHeadIndex = pDmaChannel->HeadReqIndex;
    pDmaChannel->HeadReqIndex = pDmaChannel->pTransReqList[CurrHeadIndex].NextIndex;
    pDmaChannel->pTransReqList[CurrHeadIndex].NextIndex = pDmaChannel->HeadFreeIndex;
    pDmaChannel->HeadFreeIndex = CurrHeadIndex;
    if (pDmaChannel->HeadReqIndex == DMA_NULL_INDEX)
    {
        pDmaChannel->TailReqIndex = DMA_NULL_INDEX;

        // If channel is marked as free by client then make this channel
        // for next allocation.
        if (pDmaChannel->ChannelState == RmDmaChannelState_MarkedFree)
            pDmaChannel->ChannelState = RmDmaChannelState_Free;

        // Notify the client for the data transfers completes.
        if (hSignalSema)
            NvOsSemaphoreSignal(hSignalSema);
        return;
    }
    pCurrReq = &pDmaChannel->pTransReqList[pDmaChannel->HeadReqIndex];
    pCurrReq->State = RmDmaRequestState_Running;
    pDmaChannel->pHwInterface->DmaHwStartTransferFxn(&pCurrReq->DmaChanRegs);

    // Generate the notification for the current transfer completes.
    if (hSignalSema)
        NvOsSemaphoreSignal(hSignalSema);
}

static void
OnDmaCompleteInContinuousMode(
    RmDmaChannel *pDmaChannel,
    DmaTransReq  *pCurrReq)
{
    NvOsSemaphoreHandle hSignalSema = NULL;
    NvU16 NextHeadIndex;
    DmaTransReq  *pNextReq = NULL;

    pDmaChannel->pHwInterface->DmaHwAckNClearInterruptFxn(&pCurrReq->DmaChanRegs);

    // The transfer was in running state.
    // Check if there is data remaining to transfer or not from the
    // current request. If there is bytes remaining for data transfer
    // then continue the transfer.
    if (pCurrReq->BytesRemaining)
    {
        if (pCurrReq->TransferMode & RmDmaTransferMode_PingIntMode)
        {
            pCurrReq->TransferMode &= ~RmDmaTransferMode_PingIntMode;
            pDmaChannel->pHwInterface->DmaContinueRemainingTransferFxn(pDmaChannel);
        }
        else
        {
            pCurrReq->TransferMode |= RmDmaTransferMode_PingIntMode;
        }
        return;
    }

    NextHeadIndex = pDmaChannel->pTransReqList[pDmaChannel->HeadReqIndex].NextIndex;
    if (NextHeadIndex != DMA_NULL_INDEX)
        pNextReq = &pDmaChannel->pTransReqList[NextHeadIndex];

    if (pCurrReq->TransferMode & RmDmaTransferMode_PingIntMode)
    {
        if (NextHeadIndex != DMA_NULL_INDEX)
        {
            pDmaChannel->pHwInterface->DmaHwContinueTransferFxn(&pNextReq->DmaChanRegs);
            pNextReq->State = RmDmaRequestState_Running;
            pNextReq->TransferMode |= RmDmaTransferMode_PingIntMode;
        }
        pDmaChannel->pHwInterface->DmaHwAddTransferCountFxn(&pCurrReq->DmaChanRegs);

        if (pCurrReq->hOnHalfDmaCompleteSema)
            NvOsSemaphoreSignal(pCurrReq->hOnHalfDmaCompleteSema);


        pCurrReq->TransferMode &= ~RmDmaTransferMode_PingIntMode;
        return;
    }

    pCurrReq->State = RmDmaRequestState_Completed;

    // Store the sempahore which need to be signal.
    hSignalSema = pCurrReq->hOnDmaCompleteSema;

    if (!pNextReq)
    {
        if (pCurrReq->TransferMode & RmDmaTransferMode_SameBuff)
        {
            pCurrReq->TransferMode |= RmDmaTransferMode_PingIntMode;
            pCurrReq->State = RmDmaRequestState_Running;
            if (hSignalSema)
                NvOsSemaphoreSignal(pCurrReq->hOnDmaCompleteSema);
            pDmaChannel->pHwInterface->DmaHwAddTransferCountFxn(&pCurrReq->DmaChanRegs);
            return;
        }
        else
        {
            pDmaChannel->pHwInterface->DmaHwStopTransferFxn(&pCurrReq->DmaChanRegs);

            pDmaChannel->pTransReqList[pDmaChannel->HeadReqIndex].NextIndex = pDmaChannel->HeadFreeIndex;
            pDmaChannel->HeadFreeIndex = pDmaChannel->HeadReqIndex;
            pDmaChannel->HeadReqIndex = DMA_NULL_INDEX;
            pDmaChannel->TailReqIndex = DMA_NULL_INDEX;

            // If channel is marked as free then make this channel available
            // for next allocation.
            if (pDmaChannel->ChannelState == RmDmaChannelState_MarkedFree)
                pDmaChannel->ChannelState = RmDmaChannelState_Free;
        }
    }
    else
    {
        pDmaChannel->pTransReqList[pDmaChannel->HeadReqIndex].NextIndex = pDmaChannel->HeadFreeIndex;
        pDmaChannel->HeadFreeIndex = pDmaChannel->HeadReqIndex;
        pDmaChannel->HeadReqIndex = NextHeadIndex;

        // May be we got this request after ping buffer completion.
        if (pNextReq->State != RmDmaRequestState_Running)
        {
            // Start the next request transfer.
            pDmaChannel->pHwInterface->DmaHwContinueTransferFxn(&pNextReq->DmaChanRegs);
            pNextReq->State = RmDmaRequestState_Running;
            pCurrReq->TransferMode |= RmDmaTransferMode_PingIntMode;
        }
    }

    // Generate the notification for the current transfer completes.
    if (hSignalSema)
        NvOsSemaphoreSignal(hSignalSema);
}



#if NVOS_IS_LINUX
/**
 * Handle the Apb dma interrupt.
 */
static void ApbDmaIsr(void *args)
{
    RmDmaChannel *pDmaChannel = (RmDmaChannel *)args;
    DmaTransReq *pCurrReq;
    NvBool IsTransferComplete;

    NvOsIntrMutexLock(pDmaChannel->hIntrMutex);
    if (pDmaChannel->HeadReqIndex == DMA_NULL_INDEX)
        goto exit;

    pCurrReq = &pDmaChannel->pTransReqList[pDmaChannel->HeadReqIndex];
    if (pCurrReq->State != RmDmaRequestState_Running)
        goto exit;

    IsTransferComplete = pDmaChannel->pHwInterface->DmaHwIsTransferCompletedFxn(
                                            &pCurrReq->DmaChanRegs);
    if (IsTransferComplete) {
        if (pCurrReq->TransferMode & RmDmaTransferMode_Continuous)
            OnDmaCompleteInContinuousMode(pDmaChannel, pCurrReq);
        else
            OnDmaCompleteInOnceMode(pDmaChannel, pCurrReq);
    }

exit:
    NvOsIntrMutexUnlock(pDmaChannel->hIntrMutex);
    NvRmInterruptDone(pDmaChannel->hIntrHandle);
}
#else
static void ApbDmaIsr(void *args)
{
    RmDmaChannel *pDmaChannel;
    DmaTransReq *pCurrReq;
    NvU32 ChanIndex;
    NvBool IsTransferComplete;

    for (ChanIndex = 0; ChanIndex < s_DmaInfo.NumApbDmaChannels; ++ChanIndex)
    {
        pDmaChannel = &s_DmaInfo.pListApbDmaChannel[ChanIndex];
        if (pDmaChannel->HeadReqIndex == DMA_NULL_INDEX)
            continue;

        NvOsIntrMutexLock(pDmaChannel->hIntrMutex);
        if (pDmaChannel->HeadReqIndex == DMA_NULL_INDEX)
            goto NextLoop;

        pCurrReq = &pDmaChannel->pTransReqList[pDmaChannel->HeadReqIndex];
        if (pCurrReq->State != RmDmaRequestState_Running)
            goto NextLoop;

        IsTransferComplete = pDmaChannel->pHwInterface->DmaHwIsTransferCompletedFxn(
                                                &pCurrReq->DmaChanRegs);
        if (!IsTransferComplete)
            goto NextLoop;

        if (pCurrReq->TransferMode & RmDmaTransferMode_Continuous)
            OnDmaCompleteInContinuousMode(pDmaChannel, pCurrReq);
        else
            OnDmaCompleteInOnceMode(pDmaChannel, pCurrReq);

    NextLoop:
        NvOsIntrMutexUnlock(pDmaChannel->hIntrMutex);
    }

    NvRmInterruptDone(s_ApbDmaInterruptHandle);
}
#endif


/**
 * Register apb Dma interrupt.
 */
static NvError RegisterAllDmaInterrupt(NvRmDeviceHandle hDevice)
{
    NvRmModuleID ModuleId = NvRmPrivModuleID_ApbDma;
    NvError Error = NvSuccess;
    NvOsInterruptHandler DmaIntHandler = ApbDmaIsr;
    NvU32 Irq = 0;
    NvU32 i;

    /* Disable interrupts for all channels */
    for (i=0; i < s_DmaInfo.NumApbDmaChannels; i++)
    {
        NvRmPrivDmaInterruptEnable(hDevice, i, NV_FALSE);
    }

#if NVOS_IS_LINUX
    /* Register same interrupt hanlder for all APB DMA channels. */
    for (i=0; i < NvRmDmaUnreservedChannels(); i++)
    {
        Irq = NvRmGetIrqForLogicalInterrupt(hDevice, ModuleId, i);
        Error = NvRmInterruptRegister(hDevice, 1, &Irq,
            &DmaIntHandler, &s_DmaInfo.pListApbDmaChannel[i], 
            &(s_DmaInfo.pListApbDmaChannel[i].hIntrHandle), NV_TRUE);
    }
#else
    /* Register one interrupt handler for all APB DMA channels
     * Pass index 0xFF to get the main IRQ of the ADB DMA sub-interrupt 
     * controller. */
    Irq = NvRmGetIrqForLogicalInterrupt(hDevice, ModuleId, 0xFF);
    Error = NvRmInterruptRegister(hDevice, 1, &Irq,
            &DmaIntHandler, hDevice, &s_ApbDmaInterruptHandle, NV_TRUE);

#endif

    if (Error != NvSuccess) return Error;

    /* Enable interrupts for all channels */
    for (i=0; i < s_DmaInfo.NumApbDmaChannels; i++)
    {
        NvRmPrivDmaInterruptEnable(hDevice, i, NV_TRUE);
    }
    return Error;
}

/**
 * Unregister apb Dma interrupts.
 */
static void UnregisterAllDmaInterrupt(NvRmDeviceHandle hDevice)
{
#if NVOS_IS_LINUX
    NvU32 i;

    for (i=0; i < NvRmDmaUnreservedChannels(); i++)
    {
        NvRmInterruptUnregister(hDevice,
            s_DmaInfo.pListApbDmaChannel[i].hIntrHandle);
    }
#else
    NvRmInterruptUnregister(hDevice, s_ApbDmaInterruptHandle);
    s_ApbDmaInterruptHandle = NULL;
#endif
}

/**
 * Destroy the dma informations. It releases all the memory and os resources
 * which was allocated to create the dma infomation.
 * PENDING: What happen if there is a request for data transfer and it is ask
 * for the DeInit().
 *
 */
static void DestroyDmaInfo(void)
{
    // Unregister for the dma interrupts.
    UnregisterAllDmaInterrupt(s_DmaInfo.hDevice);

    // Deinitialize the dmas.
    DeInitDmas();

    // Destroy the list of dma channels and release memory for all dma channels.
    NvOsMutexDestroy(s_DmaInfo.hDmaAllocMutex);
    s_DmaInfo.hDmaAllocMutex = NULL;

    //Deinitialize the dma hw register address.
    DeInitDmaGeneralHwRegsAddress();
}

/**
 * Create the dma information and setup the dma channesl to their initial state.
 * It enables all dma channels, make list of dma channels, initailize the
 * registes address, create reosurces for the channel allocation and bring the
 * dma driver in know states.
 *
 * It creates all the mutex which are used for dma channel, register the
 * interrupt, enable the clock and reset the dma channel.
 *
 * Verification of al the steps is done and if it fails then it relases the
 * resource which were created and it will return error.
 *
 */
static NvError CreateDmaInfo(NvRmDeviceHandle hDevice)
{
    NvError Error = NvSuccess;

    s_DmaInfo.hDevice = hDevice;
    s_DmaInfo.NumApbDmaChannels =
            NvRmModuleGetNumInstances(hDevice, NvRmPrivModuleID_ApbDmaChannel);

    NV_ASSERT(s_DmaInfo.NumApbDmaChannels > 0);
    NV_ASSERT(s_DmaInfo.NumApbDmaChannels <= MAX_APB_DMA_CHANNELS);

    // Initialize the dma hw register addresses.
    Error = InitDmaGeneralHwRegsAddress();

    // Initialze the channel alllocation mutex.
    if (!Error)
        Error = NvOsMutexCreate(&s_DmaInfo.hDmaAllocMutex);

    // Initialze the dma channels.
    if (!Error)
        Error = InitDmas(hDevice);

    // Register for the dma interrupts.
    if (!Error)
        Error = RegisterAllDmaInterrupt(hDevice);

    if (Error)
        DestroyDmaInfo();
    return Error;
}

/**
 * Start the dma transfer from the head request of the dma channels.
 * Thread Safety: Caller responsibilty.
 */
static void StartDmaTransfer(RmDmaChannel *pDmaChannel)
{
    DmaTransReq *pCurrReq = &pDmaChannel->pTransReqList[pDmaChannel->HeadReqIndex];

    // The state of the transfer will be running state.
    pCurrReq->State = RmDmaRequestState_Running;

    // Start the dma transfer.
    pDmaChannel->pHwInterface->DmaHwStartTransferFxn(&pCurrReq->DmaChanRegs);
}

/**
 * Stop the current transfer on dma channel immediately.
 *
 * Thread Safety: It is caller responsibility.
 */
static void StopDmaTransfer(RmDmaChannel *pDmaChannel)
{
    // Get the curent request of the dma channel.
    DmaTransReq *pCurrReq = NULL;
    if (pDmaChannel->HeadReqIndex != DMA_NULL_INDEX)
    {
        pCurrReq = &pDmaChannel->pTransReqList[pDmaChannel->HeadReqIndex];
        if (pCurrReq->State == RmDmaRequestState_Running)
        {
            pDmaChannel->pHwInterface->DmaHwStopTransferFxn(&pCurrReq->DmaChanRegs);
            pCurrReq->State = RmDmaRequestState_Stopped;
        }
    }
}

/**
 * Set the mode of the data transfer whether this is once mode or continuous mode
 * or single buffering or double buffering mode.
 */
static void
SetApbDmaSpecialTransferMode(
    NvRmDmaHandle hDma,
    NvBool IsSourceAddPerip,
    DmaTransReq *pCurrReq)
{
    // Special mode of dma transfer is not supported for the low priority channel.
    if (hDma->pDmaChannel->Priority == NvRmDmaPriority_Low)
        return;

    // For I2s the continuous double buffering is selected.
    if (hDma->DmaReqModuleId == NvRmDmaModuleID_I2s ||
        hDma->DmaReqModuleId == NvRmDmaModuleID_Spdif)
    {
        pCurrReq->TransferMode |=  (RmDmaTransferMode_Continuous |
                                   RmDmaTransferMode_DoubleBuff);
        hDma->pDmaChannel->pHwInterface->DmaHwSetTransferModeFxn(
                            &pCurrReq->DmaChanRegs, NV_TRUE, NV_TRUE);
        pCurrReq->hOnHalfDmaCompleteSema = NULL;
        return;
    }

    // For Uart only receive mode is supported in the continuous double transfer
    if ((hDma->DmaReqModuleId == NvRmDmaModuleID_Uart) && (IsSourceAddPerip))
    {
        pCurrReq->TransferMode |=  (RmDmaTransferMode_Continuous |
                                   RmDmaTransferMode_DoubleBuff |
                                   RmDmaTransferMode_SameBuff);
        hDma->pDmaChannel->pHwInterface->DmaHwSetTransferModeFxn(
                            &pCurrReq->DmaChanRegs, NV_TRUE, NV_TRUE);
        pCurrReq->hOnHalfDmaCompleteSema = pCurrReq->hOnDmaCompleteSema;
        return;
    }
}

/**
 * Configure the current request of the apb dma transfer into the request
 * struture.
 *
 * It validates the source and destination address for the dma transfers.
 * It validates the address wrap and get the address wrapping value.
 * It sets the ahp/apb address as per dma request.
 * It set the direction of transfer and destination bit swap.
 *
 * It break the dma transfer size in multiple transfer if the request transfer
 * size is more than supported transfer size of one dma transfer.
 * Thread Safety: Not required as it will not access any shared informations.
 *
 */
static NvError LogApbDmaTransferRequest(NvRmDmaHandle hDma, void *pCurrRequest)
{
    NvBool IsSourceAddPerip;
    NvBool IsDestAddPerip;
    NvBool IsDoubleBuff;
    DmaTransReq *pCurrReq = (DmaTransReq *)pCurrRequest;

    // Find which address is the Perip address.
    IsSourceAddPerip = NvRmPrivDmaHwIsValidPeripheralAddress(pCurrReq->SourceAdd);
    IsDestAddPerip = NvRmPrivDmaHwIsValidPeripheralAddress(pCurrReq->DestAdd);

    // Only one of the address should be Peripheral address to use the apb dma.
    if (((IsSourceAddPerip == NV_TRUE) && (IsDestAddPerip == NV_TRUE)) ||
        ((IsSourceAddPerip == NV_FALSE) && (IsDestAddPerip == NV_FALSE)))
    {
        return NvError_NotSupported;
    }

    if (IsSourceAddPerip)
        pCurrReq->TransferMode |= RmDmaTransferMode_SourcePeripheral;

    // Configure for address wrapping of the dma register as per source and
    // destination address wrapping of this transfer request.
    hDma->pDmaChannel->pHwInterface->DmaHwSetAddressWrappingFxn(
                                &pCurrReq->DmaChanRegs, pCurrReq->SourceAddWrap,
                                pCurrReq->DestAddWrap, pCurrReq->BytesRequested,
                                IsSourceAddPerip);

    // Configure for source and destination address for data transfer.
    hDma->pDmaChannel->pHwInterface->DmaHwConfigureAddressFxn(
                                &pCurrReq->DmaChanRegs, pCurrReq->SourceAdd,
                                pCurrReq->DestAdd, IsSourceAddPerip);

    // Configure the dma register for direction of transfer as per
    // source/destination address of this transfer request and dma direction
    hDma->pDmaChannel->pHwInterface->DmaHwSetDirectionFxn(&pCurrReq->DmaChanRegs,
                        IsSourceAddPerip);

    if (pCurrReq->TransferMode & RmDmaTransferMode_Asynch)
        SetApbDmaSpecialTransferMode(hDma, IsSourceAddPerip, pCurrReq);

    // Configure the dma register as per the clients byte swap infrmation
    // It will swap for destination only
    if (hDma->IsBitSwapEnable)
        hDma->pDmaChannel->pHwInterface->DmaHwEnableDestBitSwapFxn(
                                &pCurrReq->DmaChanRegs, IsDestAddPerip);

    // Configure the dma register for the burst size. This is calculated based
    // on the requested transfer size.
    hDma->pDmaChannel->pHwInterface->DmaHwSetBurstSizeFxn(&pCurrReq->DmaChanRegs,
                        hDma->DmaReqModuleId, pCurrReq->BytesRequested);

    // Configure the dma register for the transfer bytes count. The requested
    // transfer size can go on many dma transfer cycles.
    pCurrReq->BytesCurrProgram = NV_MIN(pCurrReq->BytesRequested, DMA_MAX_TRANSFER_SIZE);
    pCurrReq->BytesRemaining = pCurrReq->BytesRequested - pCurrReq->BytesCurrProgram;

    IsDoubleBuff = (pCurrReq->TransferMode & RmDmaTransferMode_DoubleBuff)? NV_TRUE: NV_FALSE;
    hDma->pDmaChannel->pHwInterface->DmaHwSetTransferSizeFxn(&pCurrReq->DmaChanRegs,
                pCurrReq->BytesCurrProgram, IsDoubleBuff);
    return NvSuccess;
}


/**
 * Initialize the NvRm dma informations and allocates all resources.
 */
NvError NvRmPrivDmaInit(NvRmDeviceHandle hDevice)
{

    s_ApbDmaInterface.DmaContinueRemainingTransferFxn = ApbDmaContinueRemainingTransfer;
    s_ApbDmaInterface.LogDmaTransferRequestFxn = LogApbDmaTransferRequest;

    NvRmPrivDmaInitDmaHwInterfaces(&s_ApbDmaInterface);

    // Create the dma information.
    return CreateDmaInfo(hDevice);
}

/**
 * Deinitialize the NvRm dma informations and frees all resources.
 */
void NvRmPrivDmaDeInit(void)
{
    DestroyDmaInfo();
}


/**
 * Get the RmDma capabilities.
 */
NvError
NvRmDmaGetCapabilities(
    NvRmDeviceHandle hDevice,
    NvRmDmaCapabilities *pRmDmaCaps)
{
    NV_ASSERT(hDevice);
    NV_ASSERT(pRmDmaCaps);
    pRmDmaCaps->DmaAddressAlignmentSize = DMA_ADDRESS_ALIGNMENT;
    pRmDmaCaps->DmaGranularitySize = DMA_TRANSFER_SIZE_ALIGNMENT;
    return NvSuccess;
}

/**
 * Allocate the dma handles.
 *
 * Implementation Details:
 * For high priority dma handle, it allocated from the available free channel.
 * If there is not the free channel then it reutrns error. The high priority dma
 * requestor client owns the dma channel. Such channel will not be shared by
 * other clients.
 *
 * For low priority dma handle, it allocates the handle from the low priotity
 * channel. The allocation of hande only fails if there is unsufficient memory
 * to allocate the  handle. The low priority dma requestor client share the
 * channel with other clients which is requested for the lower priority dma and
 * so it can suffer the delayed response.
 *
 * Validation of the parameter:
 * It allocates the memory for the dma handle and if memory allocation fails then
 * it return error.
 *
 * Thread safety: Thread safety is provided by locking the mutex for the dma
 * data. This will avoid to access the dma data by the other threads. This is
 * require because it allocate the channel for high priority.
 *
 */
NvError
NvRmDmaAllocate(
    NvRmDeviceHandle  hRmDevice,
    NvRmDmaHandle    *phDma,
    NvBool            Enable32bitSwap,
    NvRmDmaPriority   Priority,
    NvRmDmaModuleID   DmaRequestorModuleId,
    NvU32             DmaRequestorInstanceId)
{
    NvError Error = NvSuccess;

    NvU32 UniqueId;
    RmDmaChannel *pDmaChannel = NULL;
    NvRmDmaHandle hNewDma = NULL;
    RmDmaChannel *pChannelList = NULL;
    NvU32 ChanIndex;

    NV_ASSERT(hRmDevice);
    NV_ASSERT(phDma);

    // Do not allow mem->mem DMAs, which use AHB DMA;
    NV_ASSERT(DmaRequestorModuleId != NvRmDmaModuleID_Memory);

    *phDma = NULL;

    if ((DmaRequestorModuleId == NvRmDmaModuleID_Invalid) ||
        (DmaRequestorModuleId >= NvRmDmaModuleID_Max))
    {
        return NvError_InvalidSourceId;
    }

    // Create the unique Id for each allocation based on requestors
    UniqueId = ((DmaRequestorModuleId << 24) | (DmaRequestorInstanceId << 16) |
                    (NvRmDmaModuleID_Memory << 8));

    // Allocate the memory for the new dma handle.
    hNewDma = NvOsAlloc(sizeof(*hNewDma));

    // If memory allocation fails then it will return error
    if (!hNewDma)
        return NvError_InsufficientMemory;

    // Initialize the allocated memory area with 0
    NvOsMemset(hNewDma, 0, sizeof(*hNewDma));

    // Log all requestor information in the dma handle for future reference.
    hNewDma->DmaReqModuleId = DmaRequestorModuleId;
    hNewDma->DmaReqInstId = DmaRequestorInstanceId;
    hNewDma->IsBitSwapEnable = Enable32bitSwap;
    hNewDma->hRmDevice = hRmDevice;
    hNewDma->UniqueId = UniqueId;
    hNewDma->pDmaChannel = NULL;
    hNewDma->hSyncSema = NULL;

    // Create the semaphore for synchronous semaphore allocation.
    Error = NvOsSemaphoreCreate(&hNewDma->hSyncSema, 0);

    // If error the free the allocation and return error.
    if (Error)
        goto ErrorExit;

    // Configure the dma channel configuration registers as per requestor.
    s_ApbDmaInterface.DmaHwInitRegistersFxn(&hNewDma->DmaChRegs,
                            DmaRequestorModuleId, DmaRequestorInstanceId);

    // If it is the high priority dma request then allocate the channel from
    // free available channel.Otherwise it will return the handle and will
    // share the channel across the clients. All clients with low priority dma
    // requestor will use the low priority channel.

    // For high priority dma channel request, use the free channel. And for low
    // priority channel use the used channel low priority channels.
    pChannelList = s_DmaInfo.pListApbDmaChannel;

    // Going to access the data which is shared across the different thread.
    NvOsMutexLock(s_DmaInfo.hDmaAllocMutex);

    for (ChanIndex = 0; ChanIndex < NvRmDmaUnreservedChannels(); ++ChanIndex)
    {
        pDmaChannel = &pChannelList[ChanIndex];
        if ((Priority == pDmaChannel->Priority) && (pDmaChannel->ChannelState == RmDmaChannelState_Free))
            break;
        pDmaChannel = NULL;
    }

    // If the dma channel is null then it is error.
    if (!pDmaChannel)
    {
        NvOsMutexUnlock(s_DmaInfo.hDmaAllocMutex);
        Error = NvError_DmaChannelNotAvailable;
        goto ErrorExit;
    }

    // If got the free channel for the high priority then mark at used.
    if (NvRmDmaPriority_High == Priority)
        pDmaChannel->ChannelState = RmDmaChannelState_Used;

    NvOsMutexUnlock(s_DmaInfo.hDmaAllocMutex);

    // Attach the dma channel in the dma handle.
    hNewDma->pDmaChannel = pDmaChannel;
    hNewDma->DmaChRegs.pHwDmaChanReg = pDmaChannel->pVirtChannelAdd;

    *phDma = hNewDma;
    return Error;

ErrorExit:
    NvOsSemaphoreDestroy(hNewDma->hSyncSema);
    NvOsFree(hNewDma);
    return Error;
}


/**
 * Free the dma handle which is allocated to the user.
 * Implementation Details:
 * For high priority dma handle, mark the channel free if it has pending
 * transfer request. If the there is no pending request then release the channel
 * and add in the free list so that it will be allocated to the other clients.
 *
 * For Low priority dma handle, it deletes the handle only. The low priority dma
 * requestor does not own the channel so the channel will not be added in the
 * free list.
 *
 * Thread safety: Done inside the functions.
 *
 */
void NvRmDmaFree(NvRmDmaHandle hDma)
{
    RmDmaChannel *pDmaChannel = NULL;

    // If it is null handle then return.
    if (!hDma)
        return;

    // Get the dma channels.
    pDmaChannel = hDma->pDmaChannel;

    // For high priority dma handle, mark the channel is free.
    // For Low priority dma handle, it deletes the handle only. The low priority
    // dma requestor does not own the channel.

    if (NvRmDmaPriority_High == pDmaChannel->Priority)
    {
        // Thread safety: Avoid any request for this channel
        NvOsIntrMutexLock(pDmaChannel->hIntrMutex);

        // If there is a transfer request then mark channel as free but does not
        // free the channel now. This channel will be free after last transfer
        // is done.
        // If there is no pending transfer request then free this channel
        // immediately so that it will be available for the next allocation.
        if (pDmaChannel->HeadReqIndex != DMA_NULL_INDEX)
            pDmaChannel->ChannelState = RmDmaChannelState_MarkedFree;
        else
        {
            // Thread Safety: Lock the channel allocation data base to avoid the
            // access by other threads
            pDmaChannel->ChannelState = RmDmaChannelState_Free;
        }
        NvOsIntrMutexUnlock(pDmaChannel->hIntrMutex);
    }

    // Release the semaphore created for the synchronous operation.
    NvOsSemaphoreDestroy(hDma->hSyncSema);

    // Free the dma channels.
    NvOsFree(hDma);
}


/**
 * Start the dma transfer. It queued the rwueste if there is already reueet on
 * the dma channel.
 * It supports the synchrnous and asynchrnous request both.
 *
 * For sync opeartion, it will wait till timeout or till data transfer completes,
 * whichever happens first.
 *
 * For asynch operation it queued the request, start if no data transfer is
 * going on the channel and return to the caller. This is the caller
 * resposibility to synchrnoise the request. On completion, it will signal the
 * semaphore which was passed alongwith request.
 * If no sempahor is passed then also it queued the request but after
 * completion it will not signal the semaphore.
 *
 * Thread safety: The thread safety is provided inside the function.
 *
 */

NvError
NvRmDmaStartDmaTransfer(
    NvRmDmaHandle       hDma,
    NvRmDmaClientBuffer *pClientBuffer,
    NvRmDmaDirection    DmaDirection,
    NvU32               WaitTimeoutInMS,
    NvOsSemaphoreHandle       AsynchSemaphoreId)
{
    DmaTransReq  *pCurrReq = NULL;
    RmDmaChannel *pDmaChannel = NULL;
    NvOsSemaphoreHandle hOnCompleteSema = NULL;
    NvOsSemaphoreHandle hClonedSemaphore = NULL;
    NvError Error = NvSuccess;
    NvU16 FreeIndex;
    NvU16 PrevIndex;
    NvU16 NextIndex;

    NV_ASSERT(hDma);
    NV_ASSERT(pClientBuffer);

    // Get the dma info and the dma channel and validate that it shoudl not be
    // null
    pDmaChannel = hDma->pDmaChannel;

    // Validate for the source and destination address alignment.
    NV_ASSERT(!(pClientBuffer->SourceBufferPhyAddress & (DMA_ADDRESS_ALIGNMENT-1)));
    NV_ASSERT(!(pClientBuffer->DestinationBufferPhyAddress & (DMA_ADDRESS_ALIGNMENT-1)));

    // Validate for the transfer size granularity level.
    NV_ASSERT(!(pClientBuffer->TransferSize & (DMA_TRANSFER_SIZE_ALIGNMENT-1)));

    //Log the notification parameters after completion.
    if (WaitTimeoutInMS)
    {
         hOnCompleteSema = hDma->hSyncSema;
    }
    else
    {
        if (AsynchSemaphoreId)
        {
            Error = NvOsSemaphoreClone(AsynchSemaphoreId, &hClonedSemaphore);
            if (Error)
                return Error;
            hOnCompleteSema = hClonedSemaphore;
        }
    }

    NvOsIntrMutexLock(pDmaChannel->hIntrMutex);
    if (pDmaChannel->HeadFreeIndex == DMA_NULL_INDEX)
    {
        Error = AllocateReqList(pDmaChannel, pDmaChannel->MaxReqList);
        if (Error)
            goto Exit;
    }

    pCurrReq = &pDmaChannel->pTransReqList[pDmaChannel->HeadFreeIndex];

    // Delete the semaphore which was cloned during the last req by this list.
    NvOsSemaphoreDestroy(pCurrReq->hLastReqSema);
    pCurrReq->hLastReqSema = NULL;


    // Configure the request infromation.
    pCurrReq->UniqueId = hDma->UniqueId;
    pCurrReq->TransferMode = RmDmaTransferMode_PingIntMode;
    pCurrReq->State = RmDmaRequestState_NotStarted;
    pCurrReq->hOnDmaCompleteSema = hOnCompleteSema;
    pCurrReq->hOnHalfDmaCompleteSema = NULL;

    if (!WaitTimeoutInMS)
        pCurrReq->TransferMode |= RmDmaTransferMode_Asynch;

    if (DmaDirection ==  NvRmDmaDirection_Forward)
    {
       pCurrReq->SourceAdd = pClientBuffer->SourceBufferPhyAddress;
       pCurrReq->DestAdd = pClientBuffer->DestinationBufferPhyAddress;
       pCurrReq->SourceAddWrap = pClientBuffer->SourceAddressWrapSize;
       pCurrReq->DestAddWrap = pClientBuffer->DestinationAddressWrapSize;
    }
    else
    {
       pCurrReq->SourceAdd = pClientBuffer->DestinationBufferPhyAddress;
       pCurrReq->DestAdd = pClientBuffer->SourceBufferPhyAddress;;
       pCurrReq->SourceAddWrap = pClientBuffer->DestinationAddressWrapSize;
       pCurrReq->DestAddWrap = pClientBuffer->SourceAddressWrapSize;
    }

    pCurrReq->BytesRequested = pClientBuffer->TransferSize;
    pCurrReq->BytesCurrProgram = 0;
    pCurrReq->BytesRemaining = 0;

    // Copy the Client related information from register to the current request.
    pCurrReq->DmaChanRegs.ControlReg = hDma->DmaChRegs.ControlReg;
    pCurrReq->DmaChanRegs.AhbSequenceReg = hDma->DmaChRegs.AhbSequenceReg;
    pCurrReq->DmaChanRegs.ApbSequenceReg = hDma->DmaChRegs.ApbSequenceReg;
    pCurrReq->DmaChanRegs.XmbSequenceReg = hDma->DmaChRegs.XmbSequenceReg;
    pCurrReq->DmaChanRegs.pHwDmaChanReg = hDma->pDmaChannel->pVirtChannelAdd;


    // Configure registers as per current data request.
    Error = hDma->pDmaChannel->pHwInterface->LogDmaTransferRequestFxn(hDma, pCurrReq);
    if (Error)
        goto Exit;

    // Adding the request on the list
    FreeIndex = pDmaChannel->HeadFreeIndex;
    pDmaChannel->HeadFreeIndex = pDmaChannel->pTransReqList[pDmaChannel->HeadFreeIndex].NextIndex;

    PrevIndex = pDmaChannel->TailReqIndex;
    if (pDmaChannel->HeadReqIndex == DMA_NULL_INDEX)
    {
        pDmaChannel->HeadReqIndex = FreeIndex;
        pDmaChannel->TailReqIndex = FreeIndex;
        pDmaChannel->pTransReqList[FreeIndex].NextIndex = DMA_NULL_INDEX;
        StartDmaTransfer(pDmaChannel);
    }
    else
    {
        pDmaChannel->pTransReqList[pDmaChannel->TailReqIndex].NextIndex = FreeIndex;
        pDmaChannel->pTransReqList[FreeIndex].NextIndex = DMA_NULL_INDEX;
        pDmaChannel->pTransReqList[FreeIndex].PrevIndex = pDmaChannel->TailReqIndex;
        pDmaChannel->TailReqIndex = FreeIndex;
    }

    // If asynchronous operation then return.
    if (!WaitTimeoutInMS)
    {
        pCurrReq->hLastReqSema = hClonedSemaphore;
        goto Exit;
    }
    NvOsIntrMutexUnlock(pDmaChannel->hIntrMutex);

    // Not worrying about the wait error as the state of the request will decide
    // the status of the transfer.
    (void)NvOsSemaphoreWaitTimeout(hOnCompleteSema, WaitTimeoutInMS);

    // Lock the channel to access the request.
    NvOsIntrMutexLock(pDmaChannel->hIntrMutex);

    // Check for the state of the current transfer.
    switch (pCurrReq->State)
    {
        case RmDmaRequestState_NotStarted :
            // Free the req list.
            NextIndex = pDmaChannel->pTransReqList[FreeIndex].NextIndex;
            pDmaChannel->pTransReqList[FreeIndex].NextIndex = pDmaChannel->HeadFreeIndex;
            pDmaChannel->HeadFreeIndex = FreeIndex;
            if (PrevIndex == DMA_NULL_INDEX)
            {
                pDmaChannel->HeadReqIndex = NextIndex;
                if (NextIndex == DMA_NULL_INDEX)
                    pDmaChannel->TailReqIndex = DMA_NULL_INDEX;
            }
            else
            {
                pDmaChannel->pTransReqList[PrevIndex].NextIndex = NextIndex;
                if (NextIndex != DMA_NULL_INDEX)
                    pDmaChannel->pTransReqList[NextIndex].PrevIndex = PrevIndex;
            }
            Error =  NvError_Timeout;
            break;

        case RmDmaRequestState_Running:
            // Current transfer is running so stop it now.
            StopDmaTransfer(pDmaChannel);
            if (pDmaChannel->pTransReqList[pDmaChannel->HeadReqIndex].NextIndex
                            == DMA_NULL_INDEX)
            {
                pDmaChannel->HeadReqIndex = DMA_NULL_INDEX;
                pDmaChannel->TailReqIndex = DMA_NULL_INDEX;
            }
            else
            {
                pDmaChannel->HeadReqIndex = pDmaChannel->pTransReqList[pDmaChannel->HeadReqIndex].NextIndex;
            }
            pDmaChannel->pTransReqList[FreeIndex].NextIndex = pDmaChannel->HeadFreeIndex;
            pDmaChannel->HeadFreeIndex = FreeIndex;

            // if there is more request then Start the transfer now.
            if (pDmaChannel->HeadReqIndex != DMA_NULL_INDEX)
                StartDmaTransfer(pDmaChannel);
            Error =  NvError_Timeout;
            break;


        case RmDmaRequestState_Completed:
            // If transfer is completed then transfer state will be NvSuccess;
            Error =  NvSuccess;
            break;

        default:
            NV_ASSERT(!"Client Request is in the invalid state");
            break;
    }

Exit:
    NvOsIntrMutexUnlock(pDmaChannel->hIntrMutex);
    if (Error)
        NvOsSemaphoreDestroy(hClonedSemaphore);

    return Error;
}

/**
 * It Immediately stop the dma transfer in the channel, delete all the request
 * from the queue,
 * Free all the memory of requests.
 *
 * Thread safety: During killing of all request, the channel specific data
 * access is locked to avoid the access of these data by the other thread.
 * This provide the thread safety.
 *
 * For async queued request, the semaphore Id which was passed with start
 * transfer request are not destroyed. This is the caller responsibility to
 * destroy all the semaphore which was passed.
 *
 */
void NvRmDmaAbort(NvRmDmaHandle hDma)
{
    NvU16 ReqIndex;
    NvU16 NextIndex;
    NvU16 PrevIndex;
    RmDmaChannel *pDmaChannel = NULL;
    NvBool IsRequireToStart = NV_FALSE;

    // If null dma handle then return.
    if (!hDma)
        return;

    // Get the dma channel pointer and if its null pointer then return.
    pDmaChannel = hDma->pDmaChannel;

    // The process of killing all the request is depends on the priority of the
    // dma.
    if (NvRmDmaPriority_High == pDmaChannel->Priority)
    {
        // Stop the dma transfer.
        StopDmaTransfer(pDmaChannel);

        // Kill all request
        // Lock the channel related data base to avoid the access by other
        // client.
        NvOsIntrMutexLock(pDmaChannel->hIntrMutex);

        ReqIndex = pDmaChannel->HeadReqIndex;
        while (ReqIndex != DMA_NULL_INDEX)
        {
            NextIndex = pDmaChannel->pTransReqList[ReqIndex].NextIndex;
            if (pDmaChannel->pTransReqList[ReqIndex].hOnDmaCompleteSema)
            {
                NvOsSemaphoreDestroy(pDmaChannel->pTransReqList[ReqIndex].hOnDmaCompleteSema);
                pDmaChannel->pTransReqList[ReqIndex].hLastReqSema = NULL;
                pDmaChannel->pTransReqList[ReqIndex].hOnDmaCompleteSema = NULL;
            }

            if (pDmaChannel->HeadFreeIndex != DMA_NULL_INDEX)
                pDmaChannel->pTransReqList[ReqIndex].NextIndex = pDmaChannel->HeadFreeIndex;
            pDmaChannel->HeadFreeIndex = ReqIndex;
            ReqIndex = NextIndex;
        }
        pDmaChannel->HeadReqIndex = DMA_NULL_INDEX;
        pDmaChannel->TailReqIndex = DMA_NULL_INDEX;

        // Unlock the channel related data base so that it can be access by
        // other client
        NvOsIntrMutexUnlock(pDmaChannel->hIntrMutex);
    }
    else
    {
        // Lock the channel access mutex.
        NvOsIntrMutexLock(pDmaChannel->hIntrMutex);

        // Check whether the abort request is for current running transfer
        // or not. The identification is done based on unique Id.
        IsRequireToStart = NV_FALSE;
        if (pDmaChannel->pTransReqList[pDmaChannel->HeadReqIndex].UniqueId ==
                                                            hDma->UniqueId)
        {
            // The request need to be abort so stop the dma channel.
            StopDmaTransfer(pDmaChannel);
            IsRequireToStart = NV_TRUE;
        }

        ReqIndex = pDmaChannel->HeadReqIndex;
        PrevIndex = DMA_NULL_INDEX;
        while (ReqIndex != DMA_NULL_INDEX)
        {
            NextIndex = pDmaChannel->pTransReqList[ReqIndex].NextIndex;
            if (pDmaChannel->pTransReqList[ReqIndex].UniqueId == hDma->UniqueId)
            {
                if (pDmaChannel->pTransReqList[ReqIndex].hOnDmaCompleteSema)
                {
                    NvOsSemaphoreDestroy(pDmaChannel->pTransReqList[ReqIndex].hOnDmaCompleteSema);
                    pDmaChannel->pTransReqList[ReqIndex].hLastReqSema = NULL;
                    pDmaChannel->pTransReqList[ReqIndex].hOnDmaCompleteSema = NULL;
                }
                if (PrevIndex != DMA_NULL_INDEX)
                    pDmaChannel->pTransReqList[PrevIndex].NextIndex = NextIndex;

                if (NextIndex == DMA_NULL_INDEX)
                    pDmaChannel->TailReqIndex = PrevIndex;
                else
                    pDmaChannel->pTransReqList[NextIndex].PrevIndex = PrevIndex;
                pDmaChannel->pTransReqList[ReqIndex].NextIndex = pDmaChannel->HeadFreeIndex;
                pDmaChannel->HeadFreeIndex = ReqIndex;
            }
            PrevIndex = ReqIndex;
            if (pDmaChannel->HeadReqIndex == ReqIndex)
                    pDmaChannel->HeadReqIndex = NextIndex;
            ReqIndex = NextIndex;
        }
        if (pDmaChannel->HeadReqIndex != DMA_NULL_INDEX)
        {
            if (IsRequireToStart)
                StartDmaTransfer(pDmaChannel);
        }
        // Unlock the channel access mutex.
        NvOsIntrMutexUnlock(pDmaChannel->hIntrMutex);
    }
}

#define DEBUG_GET_COUNT 0
NvError NvRmDmaGetTransferredCount(
    NvRmDmaHandle hDma,
    NvU32 *pTransferCount,
    NvBool IsTransferStop )
{
    DmaTransReq  *pCurrReq = NULL;
    NvError Error = NvSuccess;
#if DEBUG_GET_COUNT
    NvBool IsPrint = NV_TRUE;
#endif

    NV_ASSERT(hDma);
    NV_ASSERT(pTransferCount);

    NvOsIntrMutexLock(hDma->pDmaChannel->hIntrMutex);

    if (hDma->pDmaChannel->HeadReqIndex == DMA_NULL_INDEX)
    {
        *pTransferCount = hDma->pDmaChannel->LastReqSize;
#if DEBUG_GET_COUNT
        NvOsDebugPrintf("RmDmaGetTransCount ERROR1\n");
#endif
        goto ErrorExit;
    }

    pCurrReq = &hDma->pDmaChannel->pTransReqList[hDma->pDmaChannel->HeadReqIndex];
    if ((pCurrReq->State != RmDmaRequestState_Running) &&
            (pCurrReq->State != RmDmaRequestState_Stopped))
    {
        Error = NvError_InvalidState;
#if DEBUG_GET_COUNT
        NvOsDebugPrintf("RmDmaGetTransCount ERROR\n");
#endif
        goto ErrorExit;
    }

    if (IsTransferStop)
    {
        if (pCurrReq->State == RmDmaRequestState_Running)
        {
            *pTransferCount = hDma->pDmaChannel->pHwInterface->DmaHwGetTransferredCountWithStopFxn(
                                &pCurrReq->DmaChanRegs, NV_TRUE);
            pCurrReq->State = RmDmaRequestState_Stopped;
            hDma->pDmaChannel->pHwInterface->DmaHwStopTransferFxn(&pCurrReq->DmaChanRegs);
        }
        else
        {
            *pTransferCount = hDma->pDmaChannel->pHwInterface->DmaHwGetTransferredCountFxn(
                                        &pCurrReq->DmaChanRegs);
        }
    }
    else
    {
        if (pCurrReq->State == RmDmaRequestState_Stopped)
        {
            pCurrReq->State = RmDmaRequestState_Running;
            hDma->pDmaChannel->pHwInterface->DmaHwStartTransferFxn(&pCurrReq->DmaChanRegs);
            *pTransferCount = 0;
#if DEBUG_GET_COUNT
                IsPrint = NV_FALSE;
#endif
        }
        else
        {
            *pTransferCount = hDma->pDmaChannel->pHwInterface->DmaHwGetTransferredCountFxn(
                                        &pCurrReq->DmaChanRegs);
        }
    }

#if DEBUG_GET_COUNT
          NvOsDebugPrintf("RmDmaGetTransCount() TransferCount 0x%08x \n", *pTransferCount);
#endif

ErrorExit:
    NvOsIntrMutexUnlock(hDma->pDmaChannel->hIntrMutex);
    return Error;
}

NvBool NvRmDmaIsDmaTransferCompletes(
    NvRmDmaHandle hDma,
    NvBool IsFirstHalfBuffer)
{
    // This API is not supported in the os level driver.
    NV_ASSERT(0);
    return NV_FALSE;
}


NvError NvRmPrivDmaSuspend()
{
    // Global disable the dma channels.
    s_ApbDmaInterface.DmaHwGlobalSetFxn(s_DmaInfo.ApbDmaGenReg.pGenVirtBaseAdd,
                                                        NV_FALSE);
    // Disables clocks
    (void)NvRmPowerModuleClockControl(s_DmaInfo.hDevice, NvRmPrivModuleID_ApbDma,
                                                        0, NV_FALSE);
    return NvSuccess;
}

NvError NvRmPrivDmaResume()
{
    // Global enable the dma channels.
    s_ApbDmaInterface.DmaHwGlobalSetFxn(s_DmaInfo.ApbDmaGenReg.pGenVirtBaseAdd,
                                                        NV_TRUE);
    // Enables clocks
    (void)NvRmPowerModuleClockControl(s_DmaInfo.hDevice, NvRmPrivModuleID_ApbDma,
                                                        0, NV_TRUE);
    return NvSuccess;
}