Android10 车载音频架构之AudioTrack

xref: /frameworks/base/media/java/android/media/AudioTrack.java

598      private AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,

599              int mode, int sessionId, boolean offload)

600                      throws IllegalArgumentException {

601          super(attributes, AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK);

602          // mState already == STATE_UNINITIALIZED

603  

604          mConfiguredAudioAttributes = attributes; // object copy not needed, immutable.

605  

606          if (format == null) {

607              throw new IllegalArgumentException("Illegal null AudioFormat");

608          }

609  

610          // Check if we should enable deep buffer mode

611          if (shouldEnablePowerSaving(mAttributes, format, bufferSizeInBytes, mode)) {

612              mAttributes = new AudioAttributes.Builder(mAttributes)

613                  .replaceFlags((mAttributes.getAllFlags()

614                          | AudioAttributes.FLAG_DEEP_BUFFER)

615                          & ~AudioAttributes.FLAG_LOW_LATENCY)

616                  .build();

617          }

618  

619          // remember which looper is associated with the AudioTrack instantiation

620          Looper looper;

621          if ((looper = Looper.myLooper()) == null) {

622              looper = Looper.getMainLooper();

623          }

624  

625          int rate = format.getSampleRate();

626          if (rate == AudioFormat.SAMPLE_RATE_UNSPECIFIED) {

627              rate = 0;

628          }

629  

630          int channelIndexMask = 0;

631          if ((format.getPropertySetMask()

632                  & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) {

633              channelIndexMask = format.getChannelIndexMask();

634          }

635          int channelMask = 0;

636          if ((format.getPropertySetMask()

637                  & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) {

638              channelMask = format.getChannelMask();

639          } else if (channelIndexMask == 0) { // if no masks at all, use stereo

640              channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT

641                      | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;

642          }

643          int encoding = AudioFormat.ENCODING_DEFAULT;

644          if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) {

645              encoding = format.getEncoding();

646          }

647          audioParamCheck(rate, channelMask, channelIndexMask, encoding, mode);

648          mOffloaded = offload;

649          mStreamType = AudioSystem.STREAM_DEFAULT;

650  

651          audioBuffSizeCheck(bufferSizeInBytes);

652  

653          mInitializationLooper = looper;

654  

655          if (sessionId < 0) {

656              throw new IllegalArgumentException("Invalid audio session ID: "+sessionId);

657          }

658  

659          int[] sampleRate = new int[] {mSampleRate};

660          int[] session = new int[1];

661          session[0] = sessionId;

662          // native initialization

663          int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes,

664                  sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat,

665                  mNativeBufferSizeInBytes, mDataLoadMode, session, 0 /*nativeTrackInJavaObj*/,

666                  offload);

667          if (initResult != SUCCESS) {

668              loge("Error code "+initResult+" when initializing AudioTrack.");

669              return; // with mState == STATE_UNINITIALIZED

670          }

671  

672          mSampleRate = sampleRate[0];

673          mSessionId = session[0];

674  

675          if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) != 0) {

676              int frameSizeInBytes;

677              if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) {

678                  frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat);

679              } else {

680                  frameSizeInBytes = 1;

681              }

682              mOffset = ((int) Math.ceil(HEADER_V2_SIZE_BYTES / frameSizeInBytes)) * frameSizeInBytes;

683          }

684  

685          if (mDataLoadMode == MODE_STATIC) {

686              mState = STATE_NO_STATIC_DATA;

687          } else {

688              mState = STATE_INITIALIZED;

689          }

690  

691          baseRegisterPlayer();

692      }

693  

694      /**

695       * A constructor which explicitly connects a Native (C++) AudioTrack. For use by

696       * the AudioTrackRoutingProxy subclass.

697       * @param nativeTrackInJavaObj a C/C++ pointer to a native AudioTrack

698       * (associated with an OpenSL ES player).

699       * IMPORTANT: For "N", this method is ONLY called to setup a Java routing proxy,

700       * i.e. IAndroidConfiguration::AcquireJavaProxy(). If we call with a 0 in nativeTrackInJavaObj

701       * it means that the OpenSL player interface hasn't been realized, so there is no native

702       * Audiotrack to connect to. In this case wait to call deferred_connect() until the

703       * OpenSLES interface is realized.

704       */

705      /*package*/ AudioTrack(long nativeTrackInJavaObj) {

706          super(new AudioAttributes.Builder().build(),

707                  AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK);

708          // "final"s

709          mNativeTrackInJavaObj = 0;

710          mJniData = 0;

711  

712          // remember which looper is associated with the AudioTrack instantiation

713          Looper looper;

714          if ((looper = Looper.myLooper()) == null) {

715              looper = Looper.getMainLooper();

716          }

717          mInitializationLooper = looper;

718  

719          // other initialization...

720          if (nativeTrackInJavaObj != 0) {

721              baseRegisterPlayer();

722              deferred_connect(nativeTrackInJavaObj);

723          } else {

724              mState = STATE_UNINITIALIZED;

725          }

726      }

xref: /frameworks/base/core/jni/android_media_AudioTrack.cpp

216  static jint

217  android_media_AudioTrack_setup(JNIEnv *env, jobject thiz, jobject weak_this, jobject jaa,

218          jintArray jSampleRate, jint channelPositionMask, jint channelIndexMask,

219          jint audioFormat, jint buffSizeInBytes, jint memoryMode, jintArray jSession,

220          jlong nativeAudioTrack, jboolean offload) {

221  

222      ALOGV("sampleRates=%p, channel mask=%x, index mask=%x, audioFormat(Java)=%d, buffSize=%d,"

223          " nativeAudioTrack=0x%" PRIX64 ", offload=%d",

224          jSampleRate, channelPositionMask, channelIndexMask, audioFormat, buffSizeInBytes,

225          nativeAudioTrack, offload);

226  

227      sp<AudioTrack> lpTrack = 0;

228  

229      if (jSession == NULL) {

230          ALOGE("Error creating AudioTrack: invalid session ID pointer");

231          return (jint) AUDIO_JAVA_ERROR;

232      }

233  

234      jint* nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);

235      if (nSession == NULL) {

236          ALOGE("Error creating AudioTrack: Error retrieving session id pointer");

237          return (jint) AUDIO_JAVA_ERROR;

238      }

239      audio_session_t sessionId = (audio_session_t) nSession[0];

240      env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);

241      nSession = NULL;

242  

243      AudioTrackJniStorage* lpJniStorage = NULL;

244  

245      jclass clazz = env->GetObjectClass(thiz);

246      if (clazz == NULL) {

247          ALOGE("Can't find %s when setting up callback.", kClassPathName);

248          return (jint) AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;

249      }

250  

251      // if we pass in an existing *Native* AudioTrack, we don't need to create/initialize one.

252      if (nativeAudioTrack == 0) {

253          if (jaa == 0) {

254              ALOGE("Error creating AudioTrack: invalid audio attributes");

255              return (jint) AUDIO_JAVA_ERROR;

256          }

257  

258          if (jSampleRate == 0) {

259              ALOGE("Error creating AudioTrack: invalid sample rates");

260              return (jint) AUDIO_JAVA_ERROR;

261          }

262  

263          int* sampleRates = env->GetIntArrayElements(jSampleRate, NULL);

264          int sampleRateInHertz = sampleRates[0];

265          env->ReleaseIntArrayElements(jSampleRate, sampleRates, JNI_ABORT);

266  

267          // Invalid channel representations are caught by !audio_is_output_channel() below.

268          audio_channel_mask_t nativeChannelMask = nativeChannelMaskFromJavaChannelMasks(

269                  channelPositionMask, channelIndexMask);

270          if (!audio_is_output_channel(nativeChannelMask)) {

271              ALOGE("Error creating AudioTrack: invalid native channel mask %#x.", nativeChannelMask);

272              return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDCHANNELMASK;

273          }

274  

275          uint32_t channelCount = audio_channel_count_from_out_mask(nativeChannelMask);

276  

277          // check the format.

278          // This function was called from Java, so we compare the format against the Java constants

279          audio_format_t format = audioFormatToNative(audioFormat);

280          if (format == AUDIO_FORMAT_INVALID) {

281              ALOGE("Error creating AudioTrack: unsupported audio format %d.", audioFormat);

282              return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDFORMAT;

283          }

284  

285          // compute the frame count

286          size_t frameCount;

287          if (audio_has_proportional_frames(format)) {

288              const size_t bytesPerSample = audio_bytes_per_sample(format);

289              frameCount = buffSizeInBytes / (channelCount * bytesPerSample);

290          } else {

291              frameCount = buffSizeInBytes;

292          }

293  

294          // create the native AudioTrack object

295          lpTrack = new AudioTrack();

296  

297          // read the AudioAttributes values

298          auto paa = JNIAudioAttributeHelper::makeUnique();

299          jint jStatus = JNIAudioAttributeHelper::nativeFromJava(env, jaa, paa.get());

300          if (jStatus != (jint)AUDIO_JAVA_SUCCESS) {

301              return jStatus;

302          }

303          ALOGV("AudioTrack_setup for usage=%d content=%d flags=0x%#x tags=%s",

304                  paa->usage, paa->content_type, paa->flags, paa->tags);

305  

306          // initialize the callback information:

307          // this data will be passed with every AudioTrack callback

308          lpJniStorage = new AudioTrackJniStorage();

309          lpJniStorage->mCallbackData.audioTrack_class = (jclass)env->NewGlobalRef(clazz);

310          // we use a weak reference so the AudioTrack object can be garbage collected.

311          lpJniStorage->mCallbackData.audioTrack_ref = env->NewGlobalRef(weak_this);

312          lpJniStorage->mCallbackData.isOffload = offload;

313          lpJniStorage->mCallbackData.busy = false;

314  

315          audio_offload_info_t offloadInfo;

316          if (offload == JNI_TRUE) {

317              offloadInfo = AUDIO_INFO_INITIALIZER;

318              offloadInfo.format = format;

319              offloadInfo.sample_rate = sampleRateInHertz;

320              offloadInfo.channel_mask = nativeChannelMask;

321              offloadInfo.has_video = false;

322              offloadInfo.stream_type = AUDIO_STREAM_MUSIC; //required for offload

323          }

324  

325          // initialize the native AudioTrack object

326          status_t status = NO_ERROR;

327          switch (memoryMode) {

328          case MODE_STREAM:

329              status = lpTrack->set(

330                      AUDIO_STREAM_DEFAULT,// stream type, but more info conveyed in paa (last argument)

331                      sampleRateInHertz,

332                      format,// word length, PCM

333                      nativeChannelMask,

334                      offload ? 0 : frameCount,

335                      offload ? AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD : AUDIO_OUTPUT_FLAG_NONE,

336                      audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user)

337                      0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack

338                      0,// shared mem

339                      true,// thread can call Java

340                      sessionId,// audio session ID

341                      offload ? AudioTrack::TRANSFER_SYNC_NOTIF_CALLBACK : AudioTrack::TRANSFER_SYNC,

342                      offload ? &offloadInfo : NULL,

343                      -1, -1,                       // default uid, pid values

344                      paa.get());

345  

346              break;

347  

348          case MODE_STATIC:

349              // AudioTrack is using shared memory

350  

351              if (!lpJniStorage->allocSharedMem(buffSizeInBytes)) {

352                  ALOGE("Error creating AudioTrack in static mode: error creating mem heap base");

353                  goto native_init_failure;

354              }

355  

356              status = lpTrack->set(

357                      AUDIO_STREAM_DEFAULT,// stream type, but more info conveyed in paa (last argument)

358                      sampleRateInHertz,

359                      format,// word length, PCM

360                      nativeChannelMask,

361                      frameCount,

362                      AUDIO_OUTPUT_FLAG_NONE,

363                      audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user));

364                      0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack

365                      lpJniStorage->mMemBase,// shared mem

366                      true,// thread can call Java

367                      sessionId,// audio session ID

368                      AudioTrack::TRANSFER_SHARED,

369                      NULL,                         // default offloadInfo

370                      -1, -1,                       // default uid, pid values

371                      paa.get());

372              break;

373  

374          default:

375              ALOGE("Unknown mode %d", memoryMode);

376              goto native_init_failure;

377          }

378  

379          if (status != NO_ERROR) {

380              ALOGE("Error %d initializing AudioTrack", status);

381              goto native_init_failure;

382          }

383      } else {  // end if (nativeAudioTrack == 0)

384          lpTrack = (AudioTrack*)nativeAudioTrack;

385          // TODO: We need to find out which members of the Java AudioTrack might

386          // need to be initialized from the Native AudioTrack

387          // these are directly returned from getters:

388          //  mSampleRate

389          //  mAudioFormat

390          //  mStreamType

391          //  mChannelConfiguration

392          //  mChannelCount

393          //  mState (?)

394          //  mPlayState (?)

395          // these may be used internally (Java AudioTrack.audioParamCheck():

396          //  mChannelMask

397          //  mChannelIndexMask

398          //  mDataLoadMode

399  

400          // initialize the callback information:

401          // this data will be passed with every AudioTrack callback

402          lpJniStorage = new AudioTrackJniStorage();

403          lpJniStorage->mCallbackData.audioTrack_class = (jclass)env->NewGlobalRef(clazz);

404          // we use a weak reference so the AudioTrack object can be garbage collected.

405          lpJniStorage->mCallbackData.audioTrack_ref = env->NewGlobalRef(weak_this);

406          lpJniStorage->mCallbackData.busy = false;

407      }

408  

409      nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);

410      if (nSession == NULL) {

411          ALOGE("Error creating AudioTrack: Error retrieving session id pointer");

412          goto native_init_failure;

413      }

414      // read the audio session ID back from AudioTrack in case we create a new session

415      nSession[0] = lpTrack->getSessionId();

416      env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);

417      nSession = NULL;

418  

419      {

420          const jint elements[1] = { (jint) lpTrack->getSampleRate() };

421          env->SetIntArrayRegion(jSampleRate, 0, 1, elements);

422      }

423  

424      {   // scope for the lock

425          Mutex::Autolock l(sLock);

426          sAudioTrackCallBackCookies.add(&lpJniStorage->mCallbackData);

427      }

428      // save our newly created C++ AudioTrack in the "nativeTrackInJavaObj" field

429      // of the Java object (in mNativeTrackInJavaObj)

430      setAudioTrack(env, thiz, lpTrack);

431  

432      // save the JNI resources so we can free them later

433      //ALOGV("storing lpJniStorage: %x\n", (long)lpJniStorage);

434      env->SetLongField(thiz, javaAudioTrackFields.jniData, (jlong)lpJniStorage);

435  

436      // since we had audio attributes, the stream type was derived from them during the

437      // creation of the native AudioTrack: push the same value to the Java object

438      env->SetIntField(thiz, javaAudioTrackFields.fieldStreamType, (jint) lpTrack->streamType());

439  

440      return (jint) AUDIO_JAVA_SUCCESS;

441  

442      // failures:

443  native_init_failure:

444      if (nSession != NULL) {

445          env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);

446      }

447      env->DeleteGlobalRef(lpJniStorage->mCallbackData.audioTrack_class);

448      env->DeleteGlobalRef(lpJniStorage->mCallbackData.audioTrack_ref);

449      delete lpJniStorage;

450      env->SetLongField(thiz, javaAudioTrackFields.jniData, 0);

451  

452      // lpTrack goes out of scope, so reference count drops to zero

453      return (jint) AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;

454  }

1.mode的取值:

       构造方法中会传人一个mode参数,如果是MODE_STATIC则代表数据由app一次性传人,MODE_STREAM则代表数据不是一次性传人。android_media_AudioTrack.cpp的android_media_AudioTrack_setup方法中会创建一个不带参的lpTrack = new AudioTrack();

对于MODE_STATIC会通过allocSharedMem分配共享内存,对于MODE_STREAM不会分配共享内存。

xref: /frameworks/av/services/audioflinger/Tracks.cpp

496  // Track constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held

497  AudioFlinger::PlaybackThread::Track::Track(

498              PlaybackThread *thread,

499              const sp<Client>& client,

500              audio_stream_type_t streamType,

501              const audio_attributes_t& attr,

502              uint32_t sampleRate,

503              audio_format_t format,

504              audio_channel_mask_t channelMask,

505              size_t frameCount,

506              void *buffer,

507              size_t bufferSize,

508              const sp<IMemory>& sharedBuffer,

509              audio_session_t sessionId,

510              pid_t creatorPid,

511              uid_t uid,

512              audio_output_flags_t flags,

513              track_type type,

514              audio_port_handle_t portId)

515      :   TrackBase(thread, client, attr, sampleRate, format, channelMask, frameCount,

516                    (sharedBuffer != 0) ? sharedBuffer->pointer() : buffer,

517                    (sharedBuffer != 0) ? sharedBuffer->size() : bufferSize,

518                    sessionId, creatorPid, uid, true /*isOut*/,

519                    (type == TYPE_PATCH) ? ( buffer == NULL ? ALLOC_LOCAL : ALLOC_NONE) : ALLOC_CBLK,

520                    type, portId),

521      mFillingUpStatus(FS_INVALID),

522      // mRetryCount initialized later when needed

523      mSharedBuffer(sharedBuffer),

524      mStreamType(streamType),

525      mMainBuffer(thread->sinkBuffer()),

526      mAuxBuffer(NULL),

527      mAuxEffectId(0), mHasVolumeController(false),

528      mPresentationCompleteFrames(0),

529      mFrameMap(16 /* sink-frame-to-track-frame map memory */),

530      mVolumeHandler(new media::VolumeHandler(sampleRate)),

531      mOpPlayAudioMonitor(OpPlayAudioMonitor::createIfNeeded(uid, attr, id(), streamType)),

532      // mSinkTimestamp

533      mFastIndex(-1),

534      mCachedVolume(1.0),

535      /* The track might not play immediately after being active, similarly as if its volume was 0.

536       * When the track starts playing, its volume will be computed. */

537      mFinalVolume(0.f),

538      mResumeToStopping(false),

539      mFlushHwPending(false),

540      mFlags(flags)

541  {

542      // client == 0 implies sharedBuffer == 0

543      ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));

544  

545      ALOGV_IF(sharedBuffer != 0, "%s(%d): sharedBuffer: %p, size: %zu",

546              __func__, mId, sharedBuffer->pointer(), sharedBuffer->size());

547  

548      if (mCblk == NULL) {

549          return;

550      }

551  

552      if (sharedBuffer == 0) {

553          mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount,

554                  mFrameSize, !isExternalTrack(), sampleRate);

555      } else {

556          mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount,

557                  mFrameSize);

558      }

559      mServerProxy = mAudioTrackServerProxy;

560  

561      if (!thread->isTrackAllowed_l(channelMask, format, sessionId, uid)) {

562          ALOGE("%s(%d): no more tracks available", __func__, mId);

563          return;

564      }

565      // only allocate a fast track index if we were able to allocate a normal track name

566      if (flags & AUDIO_OUTPUT_FLAG_FAST) {

567          // FIXME: Not calling framesReadyIsCalledByMultipleThreads() exposes a potential

568          // race with setSyncEvent(). However, if we call it, we cannot properly start

569          // static fast tracks (SoundPool) immediately after stopping.

570          //mAudioTrackServerProxy->framesReadyIsCalledByMultipleThreads();

571          ALOG_ASSERT(thread->mFastTrackAvailMask != 0);

572          int i = __builtin_ctz(thread->mFastTrackAvailMask);

573          ALOG_ASSERT(0 < i && i < (int)FastMixerState::sMaxFastTracks);

574          // FIXME This is too eager.  We allocate a fast track index before the

575          //       fast track becomes active.  Since fast tracks are a scarce resource,

576          //       this means we are potentially denying other more important fast tracks from

577          //       being created.  It would be better to allocate the index dynamically.

578          mFastIndex = i;

579          thread->mFastTrackAvailMask &= ~(1 << i);

580      }

581  

582      mServerLatencySupported = thread->type() == ThreadBase::MIXER

583              || thread->type() == ThreadBase::DUPLICATING;

584  #ifdef TEE_SINK

585      mTee.setId(std::string("_") + std::to_string(mThreadIoHandle)

586              + "_" + std::to_string(mId) + "_T");

587  #endif

588  

589      if (channelMask & AUDIO_CHANNEL_HAPTIC_ALL) {

590          mAudioVibrationController = new AudioVibrationController(this);

591          mExternalVibration = new os::ExternalVibration(

592                  mUid, "" /* pkg */, mAttr, mAudioVibrationController);

593      }

594  }

xref: /frameworks/av/services/audioflinger/Tracks.cpp


64  // TrackBase constructor must be called with AudioFlinger::mLock held
65  AudioFlinger::ThreadBase::TrackBase::TrackBase(
66              ThreadBase *thread,
67              const sp<Client>& client,
68              const audio_attributes_t& attr,
69              uint32_t sampleRate,
70              audio_format_t format,
71              audio_channel_mask_t channelMask,
72              size_t frameCount,
73              void *buffer,
74              size_t bufferSize,
75              audio_session_t sessionId,
76              pid_t creatorPid,
77              uid_t clientUid,
78              bool isOut,
79              alloc_type alloc,
80              track_type type,
81              audio_port_handle_t portId)
82      :   RefBase(),
83          mThread(thread),
84          mClient(client),
85          mCblk(NULL),
86          // mBuffer, mBufferSize
87          mState(IDLE),
88          mAttr(attr),
89          mSampleRate(sampleRate),
90          mFormat(format),
91          mChannelMask(channelMask),
92          mChannelCount(isOut ?
93                  audio_channel_count_from_out_mask(channelMask) :
94                  audio_channel_count_from_in_mask(channelMask)),
95          mFrameSize(audio_has_proportional_frames(format) ?
96                  mChannelCount * audio_bytes_per_sample(format) : sizeof(int8_t)),
97          mFrameCount(frameCount),
98          mSessionId(sessionId),
99          mIsOut(isOut),
100          mId(android_atomic_inc(&nextTrackId)),
101          mTerminated(false),
102          mType(type),
103          mThreadIoHandle(thread ? thread->id() : AUDIO_IO_HANDLE_NONE),
104          mPortId(portId),
105          mIsInvalid(false),
106          mCreatorPid(creatorPid)
107  {
108      const uid_t callingUid = IPCThreadState::self()->getCallingUid();
109      if (!isAudioServerOrMediaServerUid(callingUid) || clientUid == AUDIO_UID_INVALID) {
110          ALOGW_IF(clientUid != AUDIO_UID_INVALID && clientUid != callingUid,
111                  "%s(%d): uid %d tried to pass itself off as %d",
112                   __func__, mId, callingUid, clientUid);
113          clientUid = callingUid;
114      }
115      // clientUid contains the uid of the app that is responsible for this track, so we can blame
116      // battery usage on it.
117      mUid = clientUid;
118  
119      // ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize);
120  
121      size_t minBufferSize = buffer == NULL ? roundup(frameCount) : frameCount;
122      // check overflow when computing bufferSize due to multiplication by mFrameSize.
123      if (minBufferSize < frameCount  // roundup rounds down for values above UINT_MAX / 2
124              || mFrameSize == 0   // format needs to be correct
125              || minBufferSize > SIZE_MAX / mFrameSize) {
126          android_errorWriteLog(0x534e4554, "34749571");
127          return;
128      }
129      minBufferSize *= mFrameSize;
130  
131      if (buffer == nullptr) {
132          bufferSize = minBufferSize; // allocated here.
133      } else if (minBufferSize > bufferSize) {
134          android_errorWriteLog(0x534e4554, "38340117");
135          return;
136      }
137  
138      size_t size = sizeof(audio_track_cblk_t);
139      if (buffer == NULL && alloc == ALLOC_CBLK) {
140          // check overflow when computing allocation size for streaming tracks.
141          if (size > SIZE_MAX - bufferSize) {
142              android_errorWriteLog(0x534e4554, "34749571");
143              return;
144          }
145          size += bufferSize;
146      }
147  
148      if (client != 0) {
149          mCblkMemory = client->heap()->allocate(size);
150          if (mCblkMemory == 0 ||
151                  (mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer())) == NULL) {
152              ALOGE("%s(%d): not enough memory for AudioTrack size=%zu", __func__, mId, size);
153              client->heap()->dump("AudioTrack");
154              mCblkMemory.clear();
155              return;
156          }
157      } else {
158          mCblk = (audio_track_cblk_t *) malloc(size);
159          if (mCblk == NULL) {
160              ALOGE("%s(%d): not enough memory for AudioTrack size=%zu", __func__, mId, size);
161              return;
162          }
163      }
164  
165      // construct the shared structure in-place.
166      if (mCblk != NULL) {
167          new(mCblk) audio_track_cblk_t();
168          switch (alloc) {
169          case ALLOC_READONLY: {
170              const sp<MemoryDealer> roHeap(thread->readOnlyHeap());
171              if (roHeap == 0 ||
172                      (mBufferMemory = roHeap->allocate(bufferSize)) == 0 ||
173                      (mBuffer = mBufferMemory->pointer()) == NULL) {
174                  ALOGE("%s(%d): not enough memory for read-only buffer size=%zu",
175                          __func__, mId, bufferSize);
176                  if (roHeap != 0) {
177                      roHeap->dump("buffer");
178                  }
179                  mCblkMemory.clear();
180                  mBufferMemory.clear();
181                  return;
182              }
183              memset(mBuffer, 0, bufferSize);
184              } break;
185          case ALLOC_PIPE:
186              mBufferMemory = thread->pipeMemory();
187              // mBuffer is the virtual address as seen from current process (mediaserver),
188              // and should normally be coming from mBufferMemory->pointer().
189              // However in this case the TrackBase does not reference the buffer directly.
190              // It should references the buffer via the pipe.
191              // Therefore, to detect incorrect usage of the buffer, we set mBuffer to NULL.
192              mBuffer = NULL;
193              bufferSize = 0;
194              break;
195          case ALLOC_CBLK:
196              // clear all buffers
197              if (buffer == NULL) {
198                  mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
199                  memset(mBuffer, 0, bufferSize);
200              } else {
201                  mBuffer = buffer;
202  #if 0
203                  mCblk->mFlags = CBLK_FORCEREADY;    // FIXME hack, need to fix the track ready logic
204  #endif
205              }
206              break;
207          case ALLOC_LOCAL:
208              mBuffer = calloc(1, bufferSize);
209              break;
210          case ALLOC_NONE:
211              mBuffer = buffer;
212              break;
213          default:
214              LOG_ALWAYS_FATAL("%s(%d): invalid allocation type: %d", __func__, mId, (int)alloc);
215          }
216          mBufferSize = bufferSize;
217  
218  #ifdef TEE_SINK
219          mTee.set(sampleRate, mChannelCount, format, NBAIO_Tee::TEE_FLAG_TRACK);
220  #endif
221  
222      }
223  }

2.共享内存size的分配

   每一个应用程序的AudioTrack都对应PlaybackThread中mTracks中的一个track,sharedBuffer是应用程序传过来的的共享内存。如果sharedBuffer等于null,则track的基类TrackBase中会创建一个共享内存。
    如果应用程序提供Buffer,只分配mCblk。如果应用程序没有提供Buffer,则分配mCblk+ sizeof(audio_track_cblk_t)。
552      if (sharedBuffer == 0) {
553          mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount,
554                  mFrameSize, !isExternalTrack(), sampleRate);
555      } else {
556          mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount,
557                  mFrameSize);
558      }
559      mServerProxy = mAudioTrackServerProxy;
3.buffer的管理
    如果应用程序没有创建sharedBuffer,则通过AudioTrackServerProxy来管理buffer。如果应用程序创建了sharedBuffer,则通过StaticAudioTrackServerProxy来管理buffer。
634  template <typename T>
635  static jint writeToTrack(const sp<AudioTrack>& track, jint audioFormat, const T *data,
636                           jint offsetInSamples, jint sizeInSamples, bool blocking) {
637      // give the data to the native AudioTrack object (the data starts at the offset)
638      ssize_t written = 0;
639      // regular write() or copy the data to the AudioTrack's shared memory?
640      size_t sizeInBytes = sizeInSamples * sizeof(T);
641      if (track->sharedBuffer() == 0) {
642          written = track->write(data + offsetInSamples, sizeInBytes, blocking);
643          // for compatibility with earlier behavior of write(), return 0 in this case
644          if (written == (ssize_t) WOULD_BLOCK) {
645              written = 0;
646          }
647      } else {
648          // writing to shared memory, check for capacity
649          if ((size_t)sizeInBytes > track->sharedBuffer()->size()) {
650              sizeInBytes = track->sharedBuffer()->size();
651          }
652          memcpy(track->sharedBuffer()->pointer(), data + offsetInSamples, sizeInBytes);
653          written = sizeInBytes;
654      }
655      if (written >= 0) {
656          return written / sizeof(T);
657      }
658      return interpretWriteSizeError(written);
659  }
xref: /frameworks/av/services/audioflinger/Tracks.cpp
786  // AudioBufferProvider interface
787  status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(AudioBufferProvider::Buffer* buffer)
788  {
789      ServerProxy::Buffer buf;
790      size_t desiredFrames = buffer->frameCount;
791      buf.mFrameCount = desiredFrames;
792      status_t status = mServerProxy->obtainBuffer(&buf);
793      buffer->frameCount = buf.mFrameCount;
794      buffer->raw = buf.mRaw;
795      if (buf.mFrameCount == 0 && !isStopping() && !isStopped() && !isPaused()) {
796          ALOGV("%s(%d): underrun,  framesReady(%zu) < framesDesired(%zd), state: %d",
797                  __func__, mId, buf.mFrameCount, desiredFrames, mState);
798          mAudioTrackServerProxy->tallyUnderrunFrames(desiredFrames);
799      } else {
800          mAudioTrackServerProxy->tallyUnderrunFrames(0);
801      }
802      return status;
803  }
4.buffer的传递
    通过writeToTrack写数据给PlaybackThread,在AudioFlinger::PlaybackThread::Track::getNextBuffer中通过mServerProxy->obtainBuffer获取buffer。
xref: /frameworks/av/media/libaudioclient/AudioTrackShared.cpp
1109  __attribute__((no_sanitize("integer")))
1110  status_t StaticAudioTrackServerProxy::obtainBuffer(Buffer* buffer, bool ackFlush)
1111  {
1112      if (mIsShutdown) {
1113          buffer->mFrameCount = 0;
1114          buffer->mRaw = NULL;
1115          buffer->mNonContig = 0;
1116          mUnreleased = 0;
1117          return NO_INIT;
1118      }
1119      ssize_t positionOrStatus = pollPosition();
1120      if (positionOrStatus < 0) {
1121          buffer->mFrameCount = 0;
1122          buffer->mRaw = NULL;
1123          buffer->mNonContig = 0;
1124          mUnreleased = 0;
1125          return (status_t) positionOrStatus;
1126      }
1127      size_t position = (size_t) positionOrStatus;
1128      size_t end = mState.mLoopCount != 0 ? mState.mLoopEnd : mFrameCount;
1129      size_t avail;
1130      if (position < end) {
1131          avail = end - position;
1132          size_t wanted = buffer->mFrameCount;
1133          if (avail < wanted) {
1134              buffer->mFrameCount = avail;
1135          } else {
1136              avail = wanted;
1137          }
1138          buffer->mRaw = &((char *) mBuffers)[position * mFrameSize];
1139      } else {
1140          avail = 0;
1141          buffer->mFrameCount = 0;
1142          buffer->mRaw = NULL;
1143      }
1144      // As mFramesReady is the total remaining frames in the static audio track,
1145      // it is always larger or equal to avail.
1146      LOG_ALWAYS_FATAL_IF(mFramesReady < (int64_t) avail,
1147              "%s: mFramesReady out of range, mFramesReady:%lld < avail:%zu",
1148              __func__, (long long)mFramesReady, avail);
1149      buffer->mNonContig = mFramesReady == INT64_MAX ? SIZE_MAX : clampToSize(mFramesReady - avail);
1150      if (!ackFlush) {
1151          mUnreleased = avail;
1152      }
1153      return NO_ERROR;
1154  }
5.buffer的获取
​​​​​​​    根据mSharedBuffer创建不同的AudioTrack,我们这里以StaticAudioTrackClientProxy的创建为例。StaticAudioTrackServerProxy::obtainBuffer用来获取buffer。具体计算省略…
xref: /frameworks/av/media/libaudioclient/AudioTrackShared.cpp​​​​​​​
1156  __attribute__((no_sanitize("integer")))
1157  void StaticAudioTrackServerProxy::releaseBuffer(Buffer* buffer)
1158  {
1159      size_t stepCount = buffer->mFrameCount;
1160      LOG_ALWAYS_FATAL_IF(!((int64_t) stepCount <= mFramesReady),
1161              "%s: stepCount out of range, "
1162              "!(stepCount:%zu <= mFramesReady:%lld)",
1163              __func__, stepCount, (long long)mFramesReady);
1164      LOG_ALWAYS_FATAL_IF(!(stepCount <= mUnreleased),
1165              "%s: stepCount out of range, "
1166              "!(stepCount:%zu <= mUnreleased:%zu)",
1167              __func__, stepCount, mUnreleased);
1168      if (stepCount == 0) {
1169          // prevent accidental re-use of buffer
1170          buffer->mRaw = NULL;
1171          buffer->mNonContig = 0;
1172          return;
1173      }
1174      mUnreleased -= stepCount;
1175      audio_track_cblk_t* cblk = mCblk;
1176      size_t position = mState.mPosition;
1177      size_t newPosition = position + stepCount;
1178      int32_t setFlags = 0;
1179      if (!(position <= newPosition && newPosition <= mFrameCount)) {
1180          ALOGW("%s newPosition %zu outside [%zu, %zu]", __func__, newPosition, position,
1181                  mFrameCount);
1182          newPosition = mFrameCount;
1183      } else if (mState.mLoopCount != 0 && newPosition == mState.mLoopEnd) {
1184          newPosition = mState.mLoopStart;
1185          if (mState.mLoopCount == -1 || --mState.mLoopCount != 0) {
1186              setFlags = CBLK_LOOP_CYCLE;
1187          } else {
1188              setFlags = CBLK_LOOP_FINAL;
1189          }
1190      }
1191      if (newPosition == mFrameCount) {
1192          setFlags |= CBLK_BUFFER_END;
1193      }
1194      mState.mPosition = newPosition;
1195      if (mFramesReady != INT64_MAX) {
1196          mFramesReady -= stepCount;
1197      }
1198      mFramesReadySafe = clampToSize(mFramesReady);
1199  
1200      cblk->mServer += stepCount;
1201      mReleased += stepCount;
1202  
1203      // This may overflow, but client is not supposed to rely on it
1204      StaticAudioTrackPosLoop posLoop;
1205      posLoop.mBufferPosition = mState.mPosition;
1206      posLoop.mLoopCount = mState.mLoopCount;
1207      mPosLoopMutator.push(posLoop);
1208      if (setFlags != 0) {
1209          (void) android_atomic_or(setFlags, &cblk->mFlags);
1210          // this would be a good place to wake a futex
1211      }
1212  
1213      buffer->mFrameCount = 0;
1214      buffer->mRaw = NULL;
1215      buffer->mNonContig = 0;
1216  }
6.释放buffer
7.数据的同步
    MODE_STREAM模式时AudioTrack和PlaybackThread之间数据的传递采用环形缓冲区。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值