锐单电子商城 , 一站式电子元器件采购平台!
  • 电话:400-990-0325

Android Camera(三) MediaRecorder的基本流程

时间:2022-10-30 09:30:00 2218连接器1435连接器

Android MediaRecoder

一.MediaRecorder整体架构

1.1 MediaRecorder录制数据流框架请添加图片描述

简单过程:
1.Camera至少有两个应用Surface,一个使用于preview是的,用另一个record的,record的surface是PersistentSurface类型,PersistentSurface中的GraphicBufferSource成员变量类型mBufferSource最后,引用
编码器创建;
2.CameraServer中持有Record Surface的producer引用和Preview Surface的producer因此,预览和录制CameraServer充当生产者;
3.在向CameraProvider发request的时候先dequeueBuffer送至HAL填充,填充HAL发回result这时queueBuffer填写数据BufferQueue中,由BufferQueue的原理,可知这时候BufferQueue的Consumer将回调onFrameAvailable函数接收数据准备通知,然后Consumer使用acquireBuffer消费可以消费,消费完成releaseBuffer去释放Buffer;
4.对于preview,消费者就是Surfaceflinger合成消费后显示,对record,例如,消费者是编码器。OMX编码器用于编码获取数据消费;
5.编码器编完码之后将调用Framework中MediaServer回调将编码后的数据传输到MediaRecorder;
6.MediaRecorder在start然后开始一个WriteThread,两个TrackThread(分别是Video和Audio),当TrackThread跟踪到相应的数据后video或audio数据分装成Chunk数据结构,保存在MPEG4Writer成员变量mChunks中.这时WriteThread会发现有数据可写mChunks中的chunk写到文件中.
以上参考 Android MediaRecorder简单梳理框架

二、MediaRecoder的相关流程

2.1 MediaRecoder的创建

在Android Camera(一)的4.创建一个中会MediaRecoder对象。

/frameworks/base/media/java/android/media/MediaRecorder.java 141      @Deprecated 142      public MediaRecorder() { 
         143          this(ActivityThread.currentApplication()); 144      } 145   146      /** 147 * Default constructor. 148 * 149 * @param context Context the recorder belongs to 150 */ 151      public MediaRecorder(@NonNull Context context) { 
         152          Objects.requireNonNull(context); 153          Looper looper; 154          if ((looper = Looper.myLooper()) != null) { 
         155              mEventHandler = new EventHandler(this, looper);
156          } else if ((looper = Looper.getMainLooper()) != null) { 
        
157              mEventHandler = new EventHandler(this, looper);
158          } else { 
        
159              mEventHandler = null;
160          }
161  
162          mChannelCount = 1;
163          /* Native setup requires a weak reference to our object. 164 * It's easier to create it here than in C++. 165 */
166          try (ScopedParcelState attributionSourceState = context.getAttributionSource()
167                  .asScopedParcelState()) { 
        
168              native_setup(new WeakReference<>(this), ActivityThread.currentPackageName(),
169                      attributionSourceState.getParcel());
170          }
171      }

在加载MediaRecoder的类时会加载media_jni的库接着调用native_init初始化meida

/frameworks/base/media/java/android/media/MediaRecorder.java
106      static { 
        
107          System.loadLibrary("media_jni");
    108          native_init();
109      }

在MediaRecoder的构造函数中则是调用了native_setup。

native_init和native_setup时JNI函数最终会调用到下面的两个函数:

/frameworks/base/media/jni/android_media_MediaRecorder.cpp
580  static void
581  android_media_MediaRecorder_native_init(JNIEnv *env)
582  {
583      jclass clazz;
584      //native_init只是将native的成员变量和java的成员变量关联起来
585      clazz = env->FindClass("android/media/MediaRecorder");
586      if (clazz == NULL) {
587          return;
588      }
589  
590      fields.context = env->GetFieldID(clazz, "mNativeContext", "J");
591      if (fields.context == NULL) {
592          return;
593      }
594      
595      fields.surface = env->GetFieldID(clazz, "mSurface", "Landroid/view/Surface;");
596      if (fields.surface == NULL) {
597          return;
598      }
599  
600      jclass surface = env->FindClass("android/view/Surface");
601      if (surface == NULL) {
602          return;
603      }
604  
605      fields.post_event = env->GetStaticMethodID(clazz, "postEventFromNative",
606                                                 "(Ljava/lang/Object;IIILjava/lang/Object;)V");
607      if (fields.post_event == NULL) {
608          return;
609      }
610  
611      clazz = env->FindClass("java/util/ArrayList");
612      if (clazz == NULL) {
613          return;
614      }
615      gArrayListFields.add = env->GetMethodID(clazz, "add", "(Ljava/lang/Object;)Z");
616      gArrayListFields.classId = static_cast(env->NewGlobalRef(clazz));
617  }
618  
619  
620  static void
621  android_media_MediaRecorder_native_setup(JNIEnv *env, jobject thiz, jobject weak_this,
622                                           jstring packageName, jobject jAttributionSource)
623  {
624      ALOGV("setup");
625  
626      Parcel* parcel = parcelForJavaObject(env, jAttributionSource);
627      android::content::AttributionSourceState attributionSource;
628      attributionSource.readFromParcel(parcel);
         //创建native层的MediaRecorder
629      sp mr = new MediaRecorder(attributionSource);
630  
631      if (mr == NULL) {
632          jniThrowException(env, "java/lang/RuntimeException", "Out of memory");
633          return;
634      }
635      if (mr->initCheck() != NO_ERROR) {
636          jniThrowException(env, "java/lang/RuntimeException", "Unable to initialize media recorder");
637          return;
638      }
639  
640      // create new listener and give it to MediaRecorder
         //创建一个回调监听,当有回调时会调用到java层的postEventFromNative
641      sp listener = new JNIMediaRecorderListener(env, thiz, weak_this);
642      mr->setListener(listener);
643  
644      // Convert client name jstring to String16
645      const char16_t *rawClientName = reinterpret_cast(
646          env->GetStringChars(packageName, NULL));
647      jsize rawClientNameLen = env->GetStringLength(packageName);
648      String16 clientName(rawClientName, rawClientNameLen);
649      env->ReleaseStringChars(packageName,
650                              reinterpret_cast(rawClientName));
651  
652      // pass client package name for permissions tracking
653      mr->setClientName(clientName);
654  
655      setMediaRecorder(env, thiz, mr);
656  }

2.1.1 native层的MediaRecorder

/frameworks/av/media/libmedia/mediarecorder.cpp
763  MediaRecorder::MediaRecorder(const AttributionSourceState &attributionSource)
764          : mSurfaceMediaSource(NULL)
765  { 
        
766      ALOGV("constructor");
767      //通过binder连接到MediaPlayerService来创建一个MediaRecorder
768      const sp<IMediaPlayerService> service(getMediaPlayerService());
769      if (service != NULL) { 
        
770          mMediaRecorder = service->createMediaRecorder(attributionSource);
771      }
772      if (mMediaRecorder != NULL) { 
        
773          mCurrentState = MEDIA_RECORDER_IDLE;
774      }
775  
776      //设置一些变量为false
777      doCleanUp();
778  }

从上面我们知道了native层的MediaRecoder最终会调用到MediaPlayerService中创建MediaRecorder从这里开始就跨进程了,从app进程到media server进程(MediaPlayerService时运行在media server进程中的)

/frameworks/av/media/libmediaplayerservice/MediaPlayerService.cpp
458  sp<IMediaRecorder> MediaPlayerService::createMediaRecorder(
459          const AttributionSourceState& attributionSource)
460  { 
        
461      // TODO b/182392769: use attribution source util
462      AttributionSourceState verifiedAttributionSource = attributionSource;
463      verifiedAttributionSource.uid = VALUE_OR_FATAL(
464        legacy2aidl_uid_t_int32_t(IPCThreadState::self()->getCallingUid()));
465      verifiedAttributionSource.pid = VALUE_OR_FATAL(
466          legacy2aidl_pid_t_int32_t(IPCThreadState::self()->getCallingPid()));
         //创建一个MediaRecorderClient并返回给app进程
467      sp<MediaRecorderClient> recorder =
468          new MediaRecorderClient(this, verifiedAttributionSource);
469      wp<MediaRecorderClient> w = recorder;
470      Mutex::Autolock lock(mLock);
471      mMediaRecorderClients.add(w);
472      ALOGV("Create new media recorder client from pid %s",
473          verifiedAttributionSource.toString().c_str());
474      return recorder;
475  }
/frameworks/av/media/libmediaplayerservice/MediaRecorderClient.cpp
380  MediaRecorderClient::MediaRecorderClient(const sp<MediaPlayerService>& service,
381          const AttributionSourceState& attributionSource)
382  { 
        
383      ALOGV("Client constructor");
384      // attribution source already validated in createMediaRecorder
385      mAttributionSource = attributionSource;
         //创建一个StagefrightRecorder
386      mRecorder = new StagefrightRecorder(attributionSource);
387      mMediaPlayerService = service;
388  }

所以media recorder最终是创建了StagefrightRecorder来做media recorder的工作。

2.2 设置MediaRecorder

media recorder的设置主要在Android Camera模块(一)的4.2 其中设置了很多相关的属性

1146          mMediaRecorder.setCamera(camera);      //对应StagefrightRecorder 的 setCamera
1147          mMediaRecorder.setAudioSource(MediaRecorder.AudioSource.CAMCORDER);//对应StagefrightRecorder 的 setAudioSource
1148          mMediaRecorder.setVideoSource(MediaRecorder.VideoSource.CAMERA);//对应StagefrightRecorder 的 setVideoSource
1149          mMediaRecorder.setProfile(mProfile);//设置默认的各种参数
1150          mMediaRecorder.setVideoSize(mProfile.videoFrameWidth, mProfile.videoFrameHeight);//对应StagefrightRecorder 的 setVideoSize
1151          mMediaRecorder.setMaxDuration(mMaxVideoDurationInMs);//对应StagefrightRecorder 的 setParameters
              ...
1203          Location loc = mLocationManager.getCurrentLocation();
1204          if (loc != null) { 
        
1205              mMediaRecorder.setLocation((float) loc.getLatitude(),
1206                      (float) loc.getLongitude());//对应StagefrightRecorder 的 setParameters
1207          }
1156          if (mVideoFileDescriptor != null) { 
        
1157              mMediaRecorder.setOutputFile(mVideoFileDescriptor.getFileDescriptor());//对应StagefrightRecorder 的 setOutputFile
1158          } else { 
        
1159              releaseMediaRecorder();
1160              throw new RuntimeException("No valid video file descriptor");
1161          }
1169          try { 
        
1170              mMediaRecorder.setMaxFileSize(maxFileSize);//对应StagefrightRecorder 的 setParameters
1171          } catch (RuntimeException exception) { 
        
1172              // We are going to ignore failure of setMaxFileSize here, as
1173              // a) The composer selected may simply not support it, or
1174              // b) The underlying media framework may not handle 64-bit range
1175              // on the size restriction.
1176          }
1184          mMediaRecorder.setOrientationHint(rotation);//对应StagefrightRecorder 的 setParameters

从上面的代码中可以看到MediaRecorder设置的VideoSource为CAMERA,AudioSource是CAMCORDER 因为VideoSource不是SURFACE,所以不需要传Surface进去。

2.3 MediaRecorder.prepare

根据上面所描述的 MediaRecorder.prepare的调用分为两种情况,

1.MediaRecorder持有Surface的情况

2.MediaRecorder没有Surface的情况

主要区别在于jni的调用流程不同

/frameworks/base/media/jni/android_media_MediaRecorder.cpp
438  static void
439  android_media_MediaRecorder_prepare(JNIEnv *env, jobject thiz)
440  {
441      ALOGV("prepare");
442      sp mr = getMediaRecorder(env, thiz);
443      if (mr == NULL) {
444          jniThrowException(env, "java/lang/IllegalStateException", NULL);
445          return;
446      }
447  
448      jobject surface = env->GetObjectField(thiz, fields.surface);
449      if (surface != NULL) {
             //如果java层的MeidaRecorder持有sufrace,那么prepare最终会调用到StagefrightRecorder的setPreviewSurface
450          const sp native_surface = get_surface(env, surface);
451  
452          // The application may misbehave and
453          // the preview surface becomes unavailable
454          if (native_surface.get() == 0) {
455              ALOGE("Application lost the surface");
456              jniThrowException(env, "java/io/IOException", "invalid preview surface");
457              return;
458          }
459         
460          ALOGI("prepare: surface=%p", native_surface.get());
461          if (process_media_recorder_call(env, mr->setPreviewSurface(native_surface->getIGraphicBufferProducer()), "java/lang/RuntimeException", "setPreviewSurface failed.")) {
462              return;
463          }
464      }
         //java层的MeidaRecorder持有sufrace为null,调用StagefrightRecorder的prepare
465      process_media_recorder_call(env, mr->prepare(), "java/io/IOException", "prepare failed.");
466  }

在JNI的android_media_MediaRecorder_prepare函数调用中如果如果java层的MeidaRecorder持有sufrace,那么prepare最终会调用到StagefrightRecorder的setPreviewSurface,将surface设置给StagefrightRecorder的成员变量mPreviewSurface,但是如果没有surface那么就会调用StagefrightRecorder的prepare

/frameworks/av/media/libmediaplayerservice/StagefrightRecorder.cpp
1204  status_t StagefrightRecorder::prepare() {
1205      ALOGV("prepare");
1206      Mutex::Autolock autolock(mLock);
1207      if (mVideoSource == VIDEO_SOURCE_SURFACE) {
1208          return prepareInternal();
1209      }
1210      return OK;
1211  }
1212  

由于我们这里的mVideoSource == VIDEO_SOURCE_CAMERA(因为我们设置的VideoSource是MediaRecorder.VideoSource.CAMERA)所以相当于什么也没做。

2.4 MediaRecorder.start

同样的 MediaRecorder.start最终也是调用到StagefrightRecorder的start函数

/frameworks/av/media/libmediaplayerservice/StagefrightRecorder.cpp
1213  status_t StagefrightRecorder::start() {
1214      ALOGV("start");
1215      Mutex::Autolock autolock(mLock);
1216      if (mOutputFd < 0) {
1217          ALOGE("Output file descriptor is invalid");
1218          return INVALID_OPERATION;
1219      }
1220  
1221      status_t status = OK;
1222      //我们的mVideoSource是VIDEO_SOURCE_CAMERA 所以这边会调用prepareInternal去根据输出文件的格式来设置这里以mWriter
1223      if (mVideoSource != VIDEO_SOURCE_SURFACE) {
1224          status = prepareInternal();
1225          if (status != OK) {
1226              return status;
1227          }
1228      }
1229  
1230      if (mWriter == NULL) {
1231          ALOGE("File writer is not avaialble");
1232          return UNKNOWN_ERROR;
1233      }
1234      //同样需要根据输出文件格式设置MetaData接着调用对应的writer的start函数开始录制
1235      switch (mOutputFormat) {
              //这里以MPEG4为例介绍 文件封装为mp4
1236          case OUTPUT_FORMAT_DEFAULT:
1237          case OUTPUT_FORMAT_THREE_GPP:
1238          case OUTPUT_FORMAT_MPEG_4:
1239          case OUTPUT_FORMAT_WEBM:
1240          {
1241              bool isMPEG4 = true;
1242              if (mOutputFormat == OUTPUT_FORMAT_WEBM) {
1243                  isMPEG4 = false;
1244              }
1245              sp meta = new MetaData;
1246              setupMPEG4orWEBMMetaData(&meta);
1247              status = mWriter->start(meta.get());
1248              break;
1249          }
1250  
              ...
1276  
1277          default:
1278          {
1279              ALOGE("Unsupported output file format: %d", mOutputFormat);
1280              status = UNKNOWN_ERROR;
1281              break;
1282          }
1283      }
1284  
1285      if (status != OK) {
1286          mWriter.clear();
1287          mWriter = NULL;
1288      }
1289  
1290      if ((status == OK) && (!mStarted)) {
1291          mAnalyticsDirty = true;
1292          mStarted = true;
1293  
1294          mStartedRecordingUs = systemTime() / 1000;
1295  
1296          uint32_t params = IMediaPlayerService::kBatteryDataCodecStarted;
1297          if (mAudioSource != AUDIO_SOURCE_CNT) {
1298              params |= IMediaPlayerService::kBatteryDataTrackAudio;
1299          }
1300          if (mVideoSource != VIDEO_SOURCE_LIST_END) {
1301              params |= IMediaPlayerService::kBatteryDataTrackVideo;
1302          }
1303  
1304          addBatteryData(params);
1305      }
1306  
1307      return status;
1308  }

1153  status_t StagefrightRecorder::prepareInternal() {
1154      ALOGV("prepare");
1155      if (mOutputFd < 0) {
1156          ALOGE("Output file descriptor is invalid");
1157          return INVALID_OPERATION;
1158      }
1159  
1160      status_t status = OK;
1161  
1162      switch (mOutputFormat) {
1163          case OUTPUT_FORMAT_DEFAULT:
1164          case OUTPUT_FORMAT_THREE_GPP:
1165          case OUTPUT_FORMAT_MPEG_4:
1166          case OUTPUT_FORMAT_WEBM:
                  //调用setupMPEG4orWEBMRecording来新建MediaWriter
1167              status = setupMPEG4orWEBMRecording();
1168              break;
1169          ...
1191  
1192          default:
1193              ALOGE("Unsupported output file format: %d", mOutputFormat);
1194              status = UNKNOWN_ERROR;
1195              break;
1196      }
1197  
1198      ALOGV("Recording frameRate: %d captureFps: %f",
1199              mFrameRate, mCaptureFps);
1200  
1201      return status;
1202  }

2146  status_t StagefrightRecorder::setupMPEG4orWEBMRecording() {
2147      mWriter.clear();
2148      mTotalBitRate = 0;
2149  
2150      status_t err = OK;
2151      sp writer;
2152      sp mp4writer;
2153      if (mOutputFormat == OUTPUT_FORMAT_WEBM) {
2154          writer = new WebmWriter(mOutputFd);
2155      } else {
2156          writer = mp4writer = new MPEG4Writer(mOutputFd);
2157      }
2158      //mVideoSource == VIDEO_SOURCE_CAMERA < VIDEO_SOURCE_LIST_END
2159      if (mVideoSource < VIDEO_SOURCE_LIST_END) {
2160          setDefaultVideoEncoderIfNecessary();
2161  
2162          sp mediaSource;
              //当前是录像所以这边的mediaSource返回的是Camera的数据
2163          err = setupMediaSource(&mediaSource);
2164          if (err != OK) {
2165              return err;
2166          }
2167  
2168          sp encoder;
              //设置H264/mpeg_4-avc编码
2169          err = setupVideoEncoder(mediaSource, &encoder);
2170          if (err != OK) {
2171              return err;
2172          }
2173          //将视频编码器包装成Track加入到MPEG4Writer中的mTracks
2174          writer->addSource(encoder);
2175          mVideoEncoderSource = encoder;
2176          mTotalBitRate += mVideoBitRate;
2177      }
2178  
2179      // Audio source is added at the end if it exists.
2180      // This help make sure that the "recoding" sound is suppressed for
2181      // camcorder applications in the recorded files.
2182      // disable audio for time lapse recording
2183      const bool disableAudio = mCaptureFpsEnable && mCaptureFps < mFrameRate;
2184      if (!disableAudio && mAudioSource != AUDIO_SOURCE_CNT) {
              //设置音频解码器并包装成Track加入到MPEG4Writer中的mTracks
2185          err = setupAudioEncoder(writer);
2186          if (err != OK) return err;
2187          mTotalBitRate += mAudioBitRate;
2188      }
2189  
2190      if (mOutputFormat != OUTPUT_FORMAT_WEBM) {
2191          if (mCaptureFpsEnable) {
2192              mp4writer->setCaptureRate(mCaptureFps);
2193          }
2194  
2195          if (mInterleaveDurationUs > 0) {
2196              mp4writer->setInterleaveDuration(mInterleaveDurationUs);
2197          }
2198          if (mLongitudex10000 > -3600000 && mLatitudex10000 > -3600000) {
2199              mp4writer->setGeoData(mLatitudex10000, mLongitudex10000);
2200          }
2201      }
2202      if (mMaxFileDurationUs != 0) {
2203          writer->setMaxFileDuration(mMaxFileDurationUs);
2204      }
2205      if (mMaxFileSizeBytes != 0) {
2206          writer->setMaxFileSize(mMaxFileSizeBytes);
2207      }
2208      if (mVideoSource == VIDEO_SOURCE_DEFAULT
2209              || mVideoSource == VIDEO_SOURCE_CAMERA) {
2210          mStartTimeOffsetMs = mEncoderProfiles->getStartTimeOffsetMs(mCameraId);
2211      } else if (mVideoSource == VIDEO_SOURCE_SURFACE) {
2212          // surface source doesn't need large initial delay
2213          mStartTimeOffsetMs = 100;
2214      }
2215      if (mStartTimeOffsetMs > 0) {
2216          writer->setStartTimeOffsetMs(mStartTimeOffsetMs);
2217      }
2218  
2219      writer->setListener(mListener);
2220      mWriter = writer;
2221      return OK;
2222  }

在StagefrightRecorder::start函数由于我们设置的Video的资源类型是Camera,所以我们是在start函数中调用了prepareInternal来设置我们的writer,但是如果你传入了 surface 并设置Video的资源类型是Surface,那么其实在调用MediaRecorder.prepare时就会设置writer了。此外我们一般使用的是MP4格式,所以对应的mOutputFormat是OUTPUT_FORMAT_MPEG_4,所以我们创建的writer是MPEG4Writer。接着还需要设置视频和音频的解码器到MPEG4Writer中。

2.4.1 设置视频编码器

代码如下:

2146  status_t StagefrightRecorder::setupMPEG4orWEBMRecording() {
    ...
2159      if (mVideoSource < VIDEO_SOURCE_LIST_END) {
2160          setDefaultVideoEncoderIfNecessary();
2161  
2162          sp mediaSource;
2163          err = setupMediaSource(&mediaSource);
2164          if (err != OK) {
2165              return err;
2166          }
2167  
2168          sp encoder;
2169          err = setupVideoEncoder(mediaSource, &encoder);
2170          if (err != OK) {
2171              return err;
2172          }
2173  
2174          writer->addSource(encoder);
2175          mVideoEncoderSource = encoder;
2176          mTotalBitRate += mVideoBitRate;
2177      }
...
	}

可以看到上面的流程主要是三步:

1.设置媒体资源 (可能是camera传过来的数据,也可能是surface的数据)

2.设置视频解码器

3.将解码器加入到MPEG4Writer中

先看第一步设置媒体资源

/frameworks/av/media/libmediaplayerservice/StagefrightRecorder.cpp
1857  status_t StagefrightRecorder::setupMediaSource(
1858                        sp *mediaSource) {
          //由于我们这里是Camera的资源,所以走if分支
1859      if (mVideoSource == VIDEO_SOURCE_DEFAULT
1860              || mVideoSource == VIDEO_SOURCE_CAMERA) {
1861          sp cameraSource;
1862          status_t err = setupCameraSource(&cameraSource);
1863          if (err != OK) {
1864              return err;
1865          }
1866          *mediaSource = cameraSource;
1867      } else if (mVideoSource == VIDEO_SOURCE_SURFACE) {
1868          *mediaSource = NULL;
1869      } else {
1870          return INVALID_OPERATION;
1871      }
1872      return OK;
1873  }

1875  status_t StagefrightRecorder::setupCameraSource(
1876          sp *cameraSource) {
1877      status_t err = OK;
1878      if ((err = checkVideoEncoderCapabilities()) != OK) {
1879          return err;
1880      }
1881      Size videoSize;
1882      videoSize.width = mVideoWidth;
1883      videoSize.height = mVideoHeight;
1884      uid_t uid = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_uid_t(mAttributionSource.uid));
1885      pid_t pid = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_pid_t(mAttributionSource.pid));
1886      String16 clientName = VALUE_OR_RETURN_STATUS(
1887          aidl2legacy_string_view_String16(mAttributionSource.packageName.value_or("")));
          //这里一般走else 和 MediaRecorder.java的setCaptureRate函数是否调用有关
1888      if (mCaptureFpsEnable) {
1889          if (!(mCaptureFps > 0.)) {
1890              ALOGE("Invalid mCaptureFps value: %lf", mCaptureFps);
1891              return BAD_VALUE;
1892          }
1893  
1894          mCameraSourceTimeLapse = CameraSourceTimeLapse::CreateFromCamera(
1895                  mCamera, mCameraProxy, mCameraId, clientName, uid, pid,
1896                  videoSize, mFrameRate, mPreviewSurface,
1897                  std::llround(1e6 / mCaptureFps));
1898          *cameraSource = mCameraSourceTimeLapse;
1899      } else {
              //创建CameraSource
1900          *cameraSource = CameraSource::CreateFromCamera(
1901                  mCamera, mCameraProxy, mCameraId, clientName, uid, pid,
1902                  videoSize, mFrameRate,
1903                  mPreviewSurface);
1904      }
1905      mCamera.clear();
1906      mCameraProxy.clear();
1907      if (*cameraSource == NULL) {
1908          return UNKNOWN_ERROR;
1909      }
1910  
1911      if ((*cameraSource)->initCheck() != OK) {
1912          (*cameraSource).clear();
1913          *cameraSource = NULL;
1914          return NO_INIT;
1915      }
1916  
1917      // When frame rate is not set, the actual frame rate will be set to
1918      // the current frame rate being used.
1919      if (mFrameRate == -1) {
1920          int32_t frameRate = 0;
1921          CHECK ((*cameraSource)->getFormat()->findInt32(
1922                      kKeyFrameRate, &frameRate));
1923          ALOGI("Frame rate is not explicitly set. Use the current frame "
1924               "rate (%d fps)", frameRate);
1925          mFrameRate = frameRate;
1926      }
1927  
1928      CHECK(mFrameRate != -1);
1929  
1930      mMetaDataStoredInVideoBuffers =
1931          (*cameraSource)->metaDataStoredInVideoBuffers();
1932  
1933      return OK;
1934  }

setupCameraSource主要就是创建CameraSource对象,接着需要将CameraSource设置给视频编码器,

1936  status_t StagefrightRecorder::setupVideoEncoder(
1937          const sp &cameraSource,
1938          sp *source) {
1939      source->clear();
1940  
1941      sp format = new AMessage();
1942      
          //设置各种参数给format
          ...
          //把CameraSource传入创建编码器    
2094      sp encoder = MediaCodecSource::Create(
2095              mLooper, format, cameraSource, mPersistentSurface, flags);
2096      if (encoder == NULL) {
2097          ALOGE("Failed to create video encoder");
2098          // When the encoder fails to be created, we need
2099          // release the camera source due to the camera's lock
2100          // and unlock mechanism.
2101          if (cameraSource != NULL) {
2102              cameraSource->stop();
2103          }
2104          return UNKNOWN_ERROR;
2105      }
2106  
2107      if (cameraSource == NULL) {
2108          mGraphicBufferProducer = encoder->getGraphicBufferProducer();
2109      }
2110  
2111      *source = encoder;
2112  
2113      return OK;
2114  }
/frameworks/av/media/libstagefright/MediaCodecSource.cpp
453  MediaCodecSource::MediaCodecSource(
454          const sp &looper,
455          const sp &outputFormat,
456          const sp &source,
457          const sp &persistentSurface,
458          uint32_t flags)
459      ... {
476      CHECK(mLooper != NULL);
477  
478      if (!(mFlags & FLAG_USE_SURFACE_INPUT)) {
479          mPuller = new Puller(source);
480      }
481  }
109  MediaCodecSource::Puller::Puller(const sp &source)
110      : mSource(source),
111        mLooper(new ALooper()),
112        mIsAudio(false)
113  {
114      sp meta = source->getFormat();
115      const char *mime;
116      CHECK(meta->findCString(kKeyMIMEType, &mime));
117  
118      mIsAudio = !strncasecmp(mime, "audio/", 6);
119  
120      mLooper->setName("pull_looper");
121  }

从上面可以知道视频编码器对象就是MediaCodecSource对象,而CameraSource对象则被传给了Puller的成员变量mSource

接着会将创建的视频编码器对象(MediaCodecSource)加入到MPEG4Writer中

/frameworks/av/media/libstagefright/MPEG4Writer.cpp
653  status_t MPEG4Writer::addSource(const sp &source) {
         ...
678  
679      // This is a metadata track or the first track of either audio or video
680      // Go ahead to add the track.
         //将编码器包装成Track放到mTracks中
681      Track *track = new Track(this, source, 1 + mTracks.size());
682      mTracks.push_back(track);
683  
684      mHasMoovBox |= !track->isHeic();
685      mHasFileLevelMeta |= track->isHeic();
686  
687      return OK;
688  }

2128  MPEG4Writer::Track::Track(
2129          MPEG4Writer *owner, const sp &source, uint32_t aTrackId)
2130      : mOwner(owner),
2131        mMeta(source->getFormat()),
2132        mSource(source),
            ...
}                

所以在MPEG4Writer中视频编码器被包装成了Track并且被赋给了Track的mSource成员变量

2.4.2 设置音频资源

设置音频资源其实和视频资源类似核心代码如下:

/frameworks/av/media/libmediaplayerservice/StagefrightRecorder.cpp
2146  status_t StagefrightRecorder::setupMPEG4orWEBMRecording() {
           ...
2179      // Audio source is added at the end if it exists.
2180      // This help make sure that the "recoding" sound is suppressed for
2181      // camcorder applications in the recorded files.
2182      // disable audio for time lapse recording
2183      const bool disableAudio = mCaptureFpsEnable && mCaptureFps < mFrameRate;
2184      if (!disableAudio && mAudioSource != AUDIO_SOURCE_CNT) {
              //设置音频编码器
2185          err = setupAudioEncoder(writer);
2186          if (err != OK) return err;
2187          mTotalBitRate += mAudioBitRate;
2188      }
    ...
}
2116  status_t StagefrightRecorder::setupAudioEncoder(const sp& writer) {
2117      status_t status = BAD_VALUE;
          
2135      //创建音频编码器
2136      sp audioEncoder = createAudioSource();
2137      if (audioEncoder == NULL) {
2138          return UNKNOWN_ERROR;
2139      }
2140      //将编码器加入到writer中
2141      writer->addSource(audioEncoder);
2142      mAudioEncoderSource = audioEncoder;
2143      return OK;
2144  }
1310  sp StagefrightRecorder::createAudioSource() {
1311      int32_t sourceSampleRate = mSampleRate;
1312  
          ...
1355      //创建音频资源
1356      sp audioSource =
1357          new AudioSource(
1358                  &attr,
1359                  mAttributionSource,
1360                  sourceSampleRate,
1361                  mAudioChannels,
1362                  mSampleRate,
1363                  mSelectedDeviceId,
1364                  mSelectedMicDirection,
1365                  mSelectedMicFieldDimension);
1366  
1367      status_t err = audioSource->initCheck();
1368  
1369      if (err != OK) {
1370          ALOGE("audio source is not initialized");
1371          return NULL;
1372      }
1373      //设置编码的格式
         ...
1424      //以音频资源为参数创建编码器
1425      sp audioEncoder =
1426              MediaCodecSource::Create(mLooper, format, audioSource);
1427      sp callback = mAudioDeviceCallback.promote();
1428      if (mDeviceCallbackEnabled && callback != 0) {
1429          audioSource->addAudioDeviceCallback(callback);
1430      }
1431      mAudioSourceNode = audioSource;
1432  
1433      if (audioEncoder == NULL) {
1434          ALOGE("Failed to create audio encoder");
1435      }
1436  
1437      return audioEncoder;
1438  }

可以看到和视频编码器的流程是一样的。

2.4.3 MPEG4Writer.start

接着会调用MPEG4Writer的start开启录制流程。

/frameworks/av/media/libstagefright/MPEG4Writer.cpp
849  status_t MPEG4Writer::start(MetaData *param) {
850      if (mInitCheck != OK) {
851          return UNKNOWN_ERROR;
852      }
853      mStartMeta = param;
854  
		 ...
971      //启动 writer线程
972      err = startWriterThread();
973      if (err != OK) {
974          return err;
975      }
976      //设置并启动Looper
977      err = setupAndStartLooper();
978      if (err != OK) {
979          return err;
980      }
981      //写入录制文件的文件头部信息
982      writeFtypBox(param);
983  
         ...
1021  
1022      mOffset = mMdatOffset;
1023      seekOrPostError(mFd, mMdatOffset, SEEK_SET);
1024      write("\x00\x00\x00\x01mdat????????", 16);
1025  
1026      /* Confirm whether the writing of the initial file atoms, ftyp and free,
1027       * are written to the file properly by posting kWhatNoIOErrorSoFar to the
1028       * MP4WtrCtrlHlpLooper that's handling write and seek errors also. If there
1029       * was kWhatIOError, the following two scenarios should be handled.
1030       * 1) If kWhatIOError was delivered and processed, MP4WtrCtrlHlpLooper
1031       * would have stopped all threads gracefully already and posting
1032       * kWhatNoIOErrorSoFar would fail.
1033       * 2) If kWhatIOError wasn't delivered or getting processed,
1034       * kWhatNoIOErrorSoFar should get posted successfully.  Wait for
1035       * response from MP4WtrCtrlHlpLooper.
1036       */
1037      sp msg = new AMessage(kWhatNoIOErrorSoFar, mReflector);
1038      sp response;
1039      err = msg->postAndAwaitResponse(&response);
1040      if (err != OK || !response->findInt32("err", &err) || err != OK) {
1041          return ERROR_IO;
1042      }
1043      //开启音视频序列
1044      err = startTracks(param);
1045      if (err != OK) {
1046          return err;
1047      }
1048  
1049      mStarted = true;
1050      return OK;
1051  }

2.4.4 startWriterThread

/frameworks/av/media/libstagefright/MPEG4Writer.cpp
2829  status_t MPEG4Writer::startWriterThread() {
2830      ALOGV("startWriterThread");
2831  
2832      mDone = false;
2833      mIsFirstChunk = true;
2834      mDriftTimeUs = 0;
         //前面加入的音频和视频的编码器会被包装成track放到mTracks
2835      for (List::iterator it = mTracks.begin();
2836           it != mTracks.end(); ++it) {
2837          ChunkInfo info;
2838          info.mTrack = *it;
2839          info.mPrevChunkTimestampUs = 0;
2840          info.mMaxInterChunkDurUs = 0;
              //这里取出再包装成ChunkInfo放到mChunkInfos中
2841          mChunkInfos.push_back(info);
2842      }
2843  
2844      pthread_attr_t attr;
2845      pthread_attr_init(&attr);
2846      pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
          //创建子线程mThread,并在子线程中执行MPEG4Writer的ThreadWrapper函数
2847      pthread_create(&mThread, &attr, ThreadWrapper, this);
2848      pthread_attr_destroy(&attr);
2849      mWriterThreadStarted = true;
2850      return OK;
2851  }

2671  // static
2672  void *MPEG4Writer::ThreadWrapper(void *me) {
2673      ALOGV("ThreadWrapper: %p", me);
2674      MPEG4Writer *writer = static_cast(me);
2675      writer->threadFunc();
2676      return NULL;
2677  }
2789  void MPEG4Writer::threadFunc() {
2790      ALOGV("threadFunc");
2791  
2792      prctl(PR_SET_NAME, (unsigned long)"MPEG4Writer", 0, 0, 0);
2793  
2794      if (mIsBackgroundMode) {
2795          // Background priority for media transcoding.
2796          androidSetThreadPriority(0 /* tid (0 = current) */, ANDROID_PRIORITY_BACKGROUND);
2797      }
2798  
2799      Mutex::Autolock autoLock(mLock);
2800      while (!mDone) {
2801          Chunk chunk;
2802          bool chunkFound = false;
2803          //findChunkToWrite寻找需要写入的数据块
2804          while (!mDone && !(chunkFound = findChunkToWrite(&chunk))) {
                  //等待mChunkReadyCondition信号
2805              mChunkReadyCondition.wait(mLock);
2806          }
2807  
2808          // In real time recording mode, write without holding the lock in order
2809          // to reduce the blocking time for media track threads.
2810          // Otherwise, hold the lock until the existing chunks get written to the
2811          // file.
2812          if (chunkFound) {
2813              if (mIsRealTimeRecording) {
2814                  mLock.unlock();
2815              }
                  //写入chunk到file中
2816              writeChunkToFile(&chunk);
2817              if (mIsRealTimeRecording) {
2818                  mLock.lock();
2819              }
2820          }
2821      }
2822  
2823      writeAllChunks();
2824      ALOGV("threadFunc mOffset:%lld, mMaxOffsetAppend:%lld", (long long)mOffset,
2825            (long long)mMaxOffsetAppend);
2826      mOffset = std::max(mOffset, mMaxOffsetAppend);
2827  }

2.4.5 startTracks

/frameworks/av/media/libstagefright/MPEG4Writer.cpp
690  status_t MPEG4Writer::startTracks(MetaData *params) {
691      if (mTracks.empty()) {
692          ALOGE("No source added");
693          return INVALID_OPERATION;
694      }
695  
696      for (List::iterator it = mTracks.begin();
697           it != mTracks.end(); ++it) {
698          status_t err = (*it)->start(params);
699  
700          if (err != OK) {
701              for (List::iterator it2 = mTracks.begin();
702                   it2 != it; ++it2) {
703                  (*it2)->stop();
704              }
705  
706              return err;
707          }
708      }
709      return OK;
710  }

2854  status_t MPEG4Writer::Track::start(MetaData *params) {
2855      if (!mDone && mPaused) {
2856          mPaused = false;
2857          mResumed = true;
2858          return OK;
2859      }
2860  
2861      int64_t startTimeUs;
2862      if (params == NULL || !params->findInt64(kKeyTime, &startTimeUs)) {
2863          startTimeUs = 0;
2864      }
2865      mStartTimeRealUs = startTimeUs;
2866  
2867      int32_t rotationDegrees;
2868      if ((mIsVideo || mIsHeic) && params &&
2869              params->findInt32(kKeyRotation, &rotationDegrees)) {
2870          mRotation = rotationDegrees;
2871      }
2872      if (mIsHeic) {
2873          // Reserve the item ids, so that the item ids are ordered in the same
2874          // order that the image tracks are added.
2875          // If we leave the item ids to be assigned when the sample is written out,
2876          // the original track order may not be preserved, if two image tracks
2877          // have data around the same time. (This could happen especially when
2878          // we're encoding with single tile.) The reordering may be undesirable,
2879          // even if the file is well-formed and the primary picture is correct.
2880  
2881          // Reserve item ids for samples + grid
2882          size_t numItemsToReserve = mNumTiles + (mNumTiles > 0);
2883          status_t err = mOwner->reserveItemId_l(numItemsToReserve, &mItemIdBase);
2884          if (err != OK) {
2885              return err;
2886          }

相关文章