转自:BufferAllocator.cpp - OpenGrok cross reference for /system/memory/libdmabufheap/BufferAllocator.cpp
/* 2 * Copyright (C) 2020 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #define LOG_TAG "DMABUFHEAPS" 18 19 #include <BufferAllocator/BufferAllocator.h> 20 21 #include <errno.h> 22 #include <fcntl.h> 23 #include <ion/ion.h> 24 #include <linux/dma-buf.h> 25 #include <linux/dma-heap.h> 26 #include <linux/ion_4.12.h> 27 #include <stdlib.h> 28 #include <sys/types.h> 29 #include <unistd.h> 30 31 #include <shared_mutex> 32 #include <string> 33 #include <unordered_set> 34 35 #include <android-base/logging.h> 36 #include <android-base/unique_fd.h> 37 38 static constexpr char kDmaHeapRoot[] = "/dev/dma_heap/"; 39 static constexpr char kIonDevice[] = "/dev/ion"; 40 static constexpr char kIonSystemHeapName[] = "ion_system_heap"; 41 42 void BufferAllocator::LogInterface(const std::string& interface) { 43 if (!logged_interface_) { 44 LOG(INFO) << "Using : " << interface; 45 logged_interface_ = true; 46 } 47 } 48 49 int BufferAllocator::OpenDmabufHeap(const std::string& heap_name) { 50 std::shared_lock<std::shared_mutex> slock(dmabuf_heap_fd_mutex_); 51 52 /* Check if heap has already been opened. */ 53 auto it = dmabuf_heap_fds_.find(heap_name); 54 if (it != dmabuf_heap_fds_.end()) 55 return it->second; 56 57 slock.unlock(); 58 59 /* 60 * Heap device needs to be opened, use a unique_lock since dmabuf_heap_fd_ 61 * needs to be modified. 62 */ 63 std::unique_lock<std::shared_mutex> ulock(dmabuf_heap_fd_mutex_); 64 65 /* 66 * Check if we already opened this heap again to prevent racing threads from 67 * opening the heap device multiple times. 68 */ 69 it = dmabuf_heap_fds_.find(heap_name); 70 if (it != dmabuf_heap_fds_.end()) return it->second; 71 72 std::string heap_path = kDmaHeapRoot + heap_name; 73 int fd = TEMP_FAILURE_RETRY(open(heap_path.c_str(), O_RDONLY | O_CLOEXEC)); 74 if (fd < 0) return -errno; 75 76 LOG(INFO) << "Using DMA-BUF heap named: " << heap_name; 77 78 auto ret = dmabuf_heap_fds_.insert({heap_name, android::base::unique_fd(fd)}); 79 CHECK(ret.second); 80 return fd; 81 } 82 83 void BufferAllocator::QueryIonHeaps() { 84 uses_legacy_ion_iface_ = ion_is_legacy(ion_fd_); 85 if (uses_legacy_ion_iface_) { 86 LogInterface("Legacy ion heaps"); 87 MapNameToIonMask(kDmabufSystemHeapName, ION_HEAP_SYSTEM_MASK, ION_FLAG_CACHED); 88 MapNameToIonMask(kDmabufSystemUncachedHeapName, ION_HEAP_SYSTEM_MASK); 89 return; 90 } 91 92 int heap_count; 93 int ret = ion_query_heap_cnt(ion_fd_, &heap_count); 94 if (ret == 0) { 95 ion_heap_info_.resize(heap_count, {}); 96 ret = ion_query_get_heaps(ion_fd_, heap_count, ion_heap_info_.data()); 97 } 98 99 // Abort if heap query fails 100 CHECK(ret == 0) 101 << "Non-legacy ION implementation must support heap information queries"; 102 LogInterface("Non-legacy ION heaps"); 103 104 /* 105 * No error checking here, it is possible that devices may have used another name for 106 * the ion system heap. 107 */ 108 MapNameToIonName(kDmabufSystemHeapName, kIonSystemHeapName, ION_FLAG_CACHED); 109 MapNameToIonName(kDmabufSystemUncachedHeapName, kIonSystemHeapName); 110 } 111 112 BufferAllocator::BufferAllocator() { 113 ion_fd_.reset(TEMP_FAILURE_RETRY(open(kIonDevice, O_RDONLY| O_CLOEXEC))); 114 if (ion_fd_ >= 0) 115 QueryIonHeaps(); 116 } 117 118 int BufferAllocator::MapNameToIonMask(const std::string& heap_name, unsigned int ion_heap_mask, 119 unsigned int ion_heap_flags) { 120 if (!ion_heap_mask) 121 return -EINVAL; 122 IonHeapConfig heap_config = { ion_heap_mask, ion_heap_flags }; 123 124 std::unique_lock<std::shared_mutex> ulock(heap_name_to_config_mutex_); 125 heap_name_to_config_[heap_name] = heap_config; 126 return 0; 127 } 128 129 int BufferAllocator::GetIonHeapIdByName(const std::string& heap_name, unsigned int* heap_id) { 130 for (auto& it : ion_heap_info_) { 131 if (heap_name == it.name) { 132 *heap_id = it.heap_id; 133 return 0; 134 } 135 } 136 137 LOG(ERROR) << "No ion heap of name " << heap_name << " exists"; 138 return -EINVAL; 139 } 140 141 int BufferAllocator::MapNameToIonName(const std::string& heap_name, 142 const std::string& ion_heap_name, 143 unsigned int ion_heap_flags) { 144 unsigned int ion_heap_id = 0; 145 auto ret = GetIonHeapIdByName(ion_heap_name, &ion_heap_id); 146 if (ret < 0) 147 return ret; 148 149 unsigned int ion_heap_mask = 1 << ion_heap_id; 150 IonHeapConfig heap_config = { ion_heap_mask, ion_heap_flags }; 151 152 std::unique_lock<std::shared_mutex> ulock(heap_name_to_config_mutex_); 153 heap_name_to_config_[heap_name] = heap_config; 154 155 return 0; 156 } 157 158 int BufferAllocator::MapNameToIonHeap(const std::string& heap_name, 159 const std::string& ion_heap_name, 160 unsigned int ion_heap_flags, 161 unsigned int legacy_ion_heap_mask, 162 unsigned int legacy_ion_heap_flags) { 163 /* if the DMA-BUF Heap exists, we can ignore ion mappings */ 164 int ret = OpenDmabufHeap(heap_name); 165 if (ret >= 0) 166 return 0; 167 168 if (uses_legacy_ion_iface_ || ion_heap_name == "") { 169 ret = MapNameToIonMask(heap_name, legacy_ion_heap_mask, legacy_ion_heap_flags); 170 } else if (!ion_heap_name.empty()) { 171 ret = MapNameToIonName(heap_name, ion_heap_name, ion_heap_flags); 172 } 173 174 return ret; 175 } 176 177 int BufferAllocator::GetIonConfig(const std::string& heap_name, IonHeapConfig& heap_config) { 178 int ret = 0; 179 180 std::shared_lock<std::shared_mutex> slock(heap_name_to_config_mutex_); 181 182 auto it = heap_name_to_config_.find(heap_name); 183 if (it != heap_name_to_config_.end()) { 184 heap_config = it->second; 185 return ret; 186 } 187 188 slock.unlock(); 189 190 if (uses_legacy_ion_iface_) { 191 ret = -EINVAL; 192 } else { 193 unsigned int heap_id; 194 ret = GetIonHeapIdByName(heap_name, &heap_id); 195 if (ret == 0) { 196 heap_config.mask = 1 << heap_id; 197 heap_config.flags = 0; 198 /* save it so that this lookup does not need to happen again */ 199 std::unique_lock<std::shared_mutex> ulock(heap_name_to_config_mutex_); 200 heap_name_to_config_[heap_name] = heap_config; 201 } 202 } 203 204 if (ret) 205 LOG(ERROR) << "No ion heap of name " << heap_name << " exists"; 206 return ret; 207 } 208 209 int BufferAllocator::DmabufAlloc(const std::string& heap_name, size_t len) { 210 int fd = OpenDmabufHeap(heap_name); 211 if (fd < 0) return fd; 212 213 struct dma_heap_allocation_data heap_data{ 214 .len = len, // length of data to be allocated in bytes 215 .fd_flags = O_RDWR | O_CLOEXEC, // permissions for the memory to be allocated 216 }; 217 218 auto ret = TEMP_FAILURE_RETRY(ioctl(fd, DMA_HEAP_IOCTL_ALLOC, &heap_data)); 219 if (ret < 0) { 220 PLOG(ERROR) << "Unable to allocate from DMA-BUF heap: " << heap_name; 221 return ret; 222 } 223 224 return heap_data.fd; 225 } 226 227 int BufferAllocator::IonAlloc(const std::string& heap_name, size_t len, 228 unsigned int heap_flags, size_t legacy_align) { 229 IonHeapConfig heap_config; 230 auto ret = GetIonConfig(heap_name, heap_config); 231 if (ret) 232 return ret; 233 234 int alloc_fd = -1; 235 unsigned int flags = heap_config.flags | heap_flags; 236 ret = ion_alloc_fd(ion_fd_, len, legacy_align, heap_config.mask, flags, &alloc_fd); 237 if (ret) { 238 PLOG(ERROR) << "allocation fails for ion heap with mask: " << heap_config.mask 239 << " and flags: " << flags; 240 return ret; 241 } 242 return alloc_fd; 243 } 244 245 int BufferAllocator::Alloc(const std::string& heap_name, size_t len, 246 unsigned int heap_flags, size_t legacy_align) { 247 int fd = DmabufAlloc(heap_name, len); 248 249 if (fd < 0) 250 fd = IonAlloc(heap_name, len, heap_flags, legacy_align); 251 252 return fd; 253 } 254 255 int BufferAllocator::AllocSystem(bool cpu_access_needed, size_t len, unsigned int heap_flags, 256 size_t legacy_align) { 257 if (!cpu_access_needed) { 258 /* 259 * CPU does not need to access allocated buffer so we try to allocate in 260 * the 'system-uncached' heap after querying for its existence. 261 */ 262 static bool uncached_dmabuf_system_heap_support = [this]() -> bool { 263 auto dmabuf_heap_list = this->GetDmabufHeapList(); 264 return (dmabuf_heap_list.find(kDmabufSystemUncachedHeapName) != dmabuf_heap_list.end()); 265 }(); 266 267 if (uncached_dmabuf_system_heap_support) 268 return DmabufAlloc(kDmabufSystemUncachedHeapName, len); 269 270 static bool uncached_ion_system_heap_support = [this]() -> bool { 271 IonHeapConfig heap_config; 272 auto ret = this->GetIonConfig(kDmabufSystemUncachedHeapName, heap_config); 273 return (ret == 0); 274 }(); 275 276 if (uncached_ion_system_heap_support) 277 return IonAlloc(kDmabufSystemUncachedHeapName, len, heap_flags, legacy_align); 278 } 279 280 /* 281 * Either 1) CPU needs to access allocated buffer OR 2) CPU does not need to 282 * access allocated buffer but the "system-uncached" heap is unsupported. 283 */ 284 return Alloc(kDmabufSystemHeapName, len, heap_flags, legacy_align); 285 } 286 287 int BufferAllocator::LegacyIonCpuSync(unsigned int dmabuf_fd, 288 const CustomCpuSyncLegacyIon& legacy_ion_cpu_sync_custom, 289 void *legacy_ion_custom_data) { 290 if (!legacy_ion_cpu_sync_custom) 291 return ion_sync_fd(ion_fd_, dmabuf_fd); 292 293 // dup ion_fd_ so that we retain its ownership. 294 int new_ion_fd = TEMP_FAILURE_RETRY(dup(ion_fd_.get())); 295 if (new_ion_fd < 0) { 296 PLOG(ERROR) << "Unable to dup ion fd. error: " << new_ion_fd; 297 return new_ion_fd; 298 } 299 300 int ret = legacy_ion_cpu_sync_custom(new_ion_fd, dmabuf_fd, legacy_ion_custom_data); 301 302 close(new_ion_fd); 303 return ret; 304 } 305 306 int BufferAllocator::DoSync(unsigned int dmabuf_fd, bool start, SyncType sync_type, 307 const CustomCpuSyncLegacyIon& legacy_ion_cpu_sync_custom, 308 void *legacy_ion_custom_data) { 309 if (uses_legacy_ion_iface_) { 310 return LegacyIonCpuSync(dmabuf_fd, legacy_ion_cpu_sync_custom, 311 legacy_ion_custom_data); 312 } 313 314 struct dma_buf_sync sync = { 315 .flags = (start ? DMA_BUF_SYNC_START : DMA_BUF_SYNC_END) | 316 static_cast<uint64_t>(sync_type), 317 }; 318 return TEMP_FAILURE_RETRY(ioctl(dmabuf_fd, DMA_BUF_IOCTL_SYNC, &sync)); 319 } 320 321 int BufferAllocator::CpuSyncStart(unsigned int dmabuf_fd, SyncType sync_type, 322 const CustomCpuSyncLegacyIon& legacy_ion_cpu_sync_custom, 323 void *legacy_ion_custom_data) { 324 int ret = DoSync(dmabuf_fd, true /* start */, sync_type, legacy_ion_cpu_sync_custom, 325 legacy_ion_custom_data); 326 327 if (ret) PLOG(ERROR) << "CpuSyncStart() failure"; 328 return ret; 329 } 330 331 int BufferAllocator::CpuSyncEnd(unsigned int dmabuf_fd, SyncType sync_type, 332 const CustomCpuSyncLegacyIon& legacy_ion_cpu_sync_custom, 333 void* legacy_ion_custom_data) { 334 int ret = DoSync(dmabuf_fd, false /* start */, sync_type, legacy_ion_cpu_sync_custom, 335 legacy_ion_custom_data); 336 if (ret) PLOG(ERROR) << "CpuSyncEnd() failure"; 337 338 return ret; 339 } 340 341 std::unordered_set<std::string> BufferAllocator::GetDmabufHeapList() { 342 std::unordered_set<std::string> heap_list; 343 std::unique_ptr<DIR, int (*)(DIR*)> dir(opendir(kDmaHeapRoot), closedir); 344 345 if (dir) { 346 struct dirent* dent; 347 while ((dent = readdir(dir.get()))) { 348 if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, "..")) continue; 349 350 heap_list.insert(dent->d_name); 351 } 352 } 353 354 return heap_list; 355 } 356 357 bool BufferAllocator::CheckIonSupport() { 358 static bool ion_support = (access(kIonDevice, R_OK) == 0); 359 360 return ion_support; 361 }