1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
|
From f79f0faf65fedc6d8ef44c5aff3b673ae0d1793c Mon Sep 17 00:00:00 2001
From: Gabriel Craciunescu <crazy@frugalware.org>
Date: Mon, 9 Apr 2018 20:57:38 +0200
Subject: [PATCH] Revert "swiotlb: remove various exports"
This reverts commit 4bd89ed39b2ab8dc4ac4b6c59b07d420b0213bec.
I know NVIDIA ... but lets be nice..
Until sorted revert the whole commit
see : https://bugzilla.kernel.org/show_bug.cgi?id=198997
---
lib/swiotlb.c | 13 +++++++++++++
1 file changed, 13 insertions(+)
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index c43ec2271469..0039b7c5e690 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -605,6 +605,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
return tlb_addr;
}
+EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
/*
* Allocates bounce buffer and returns its kernel virtual address.
@@ -674,6 +675,7 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
}
spin_unlock_irqrestore(&io_tlb_lock, flags);
}
+EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single);
void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
size_t size, enum dma_data_direction dir,
@@ -705,6 +707,7 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
BUG();
}
}
+EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single);
static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr,
size_t size)
@@ -881,6 +884,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer);
}
+EXPORT_SYMBOL_GPL(swiotlb_map_page);
/*
* Unmap a single streaming mode DMA translation. The dma_addr and size must
@@ -921,6 +925,7 @@ void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
{
unmap_single(hwdev, dev_addr, size, dir, attrs);
}
+EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
/*
* Make physical memory consistent for a single streaming mode DMA translation
@@ -958,6 +963,7 @@ swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
{
swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
}
+EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
void
swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
@@ -965,6 +971,7 @@ swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
{
swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
}
+EXPORT_SYMBOL(swiotlb_sync_single_for_device);
/*
* Map a set of buffers described by scatterlist in streaming mode for DMA.
@@ -1016,6 +1023,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
}
return nelems;
}
+EXPORT_SYMBOL(swiotlb_map_sg_attrs);
/*
* Unmap a set of streaming mode DMA translations. Again, cpu read rules
@@ -1035,6 +1043,7 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir,
attrs);
}
+EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
/*
* Make physical memory consistent for a set of streaming mode DMA translations
@@ -1062,6 +1071,7 @@ swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
{
swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
}
+EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
void
swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
@@ -1069,12 +1079,14 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
{
swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
}
+EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
int
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
{
return (dma_addr == swiotlb_phys_to_dma(hwdev, io_tlb_overflow_buffer));
}
+EXPORT_SYMBOL(swiotlb_dma_mapping_error);
/*
* Return whether the given device DMA address mask can be supported
@@ -1087,6 +1099,7 @@ swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
return swiotlb_phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
}
+EXPORT_SYMBOL(swiotlb_dma_supported);
#ifdef CONFIG_DMA_DIRECT_OPS
void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
--
2.17.0
|