上一次講解了一下startPreview過程,主要是為了畫出一條大致的從上到下的線條,今天我們看一下Camera在Framework的sendCommand和dataCallback,這部分屬於銜接過程,可以看到上下是如何交流溝通的。
首先,sendCommand
Camera.java中並沒有sendCommand方法,在Camera.cpp中存在sendCommand函式,所以這個sendCommand是從android_hardware_interface.cpp中開始使用的
android_hardware_Camera.cpp (base\core\jni)
startSmoothZoom
android_hardware_Camera_startSmoothZoom
------>CAMERA_CMD_START_SMOOTH_ZOOM
stopSmoothZoom
android_hardware_Camera_stopSmoothZoom
------>CAMERA_CMD_STOP_SMOOTH_ZOOM
setDisplayOrientation
android_hardware_Camera_setDisplayOrientation
------>CAMERA_CMD_SET_DISPLAY_ORIENTATION
_enableShutterSound
android_hardware_Camera_enableShutterSound
------>CAMERA_CMD_ENABLE_SHUTTER_SOUND
_startFaceDetection
android_hardware_Camera_startFaceDetection
------>CAMERA_CMD_START_FACE_DETECTION
_stopFaceDetection
android_hardware_Camera_stopFaceDetection
------>CAMERA_CMD_STOP_FACE_DETECTION
enableFocusMoveCallback
android_hardware_Camera_enableFocusMoveCallback
------>CAMERA_CMD_ENABLE_FOCUS_MOVE_MSG複製程式碼
諸如此類的命令型別定義在Camera.h (system\core\include\system)
enum {
CAMERA_CMD_START_SMOOTH_ZOOM = 1,
CAMERA_CMD_STOP_SMOOTH_ZOOM = 2,
CAMERA_CMD_SET_DISPLAY_ORIENTATION = 3,
CAMERA_CMD_ENABLE_SHUTTER_SOUND = 4,
CAMERA_CMD_PLAY_RECORDING_SOUND = 5,
CAMERA_CMD_START_FACE_DETECTION = 6,
CAMERA_CMD_STOP_FACE_DETECTION = 7,
CAMERA_CMD_ENABLE_FOCUS_MOVE_MSG = 8,
CAMERA_CMD_PING = 9,
CAMERA_CMD_SET_VIDEO_BUFFER_COUNT = 10,
};複製程式碼
以上者幾種操作都是採用sendCommand()的函式來實現的,對應的命令型別也列舉出來了,HAL層會根據這個訊息型別做出判斷,然後做出對應的操作。
來看下sendCommand的實現:
Camera.cpp (frameworks\av\camera)
// send command to camera driver
status_t Camera::sendCommand(int32_t cmd, int32_t arg1, int32_t arg2)
{
ALOGV("sendCommand");
sp <ICamera> c = mCamera;
if (c == 0) return NO_INIT;
return c->sendCommand(cmd, arg1, arg2); //三個引數,第一個是訊息型別,後面兩個引數有時候使用有時不使用,這裡應該是具有擴充套件性的,如果需要新增更多的引數,上下介面同時修改就可以了。
}複製程式碼
然後通過Binder機制,
virtual status_t sendCommand(int32_t cmd, int32_t arg1, int32_t arg2)
{
ALOGV("sendCommand");
Parcel data, reply;
data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
data.writeInt32(cmd);
data.writeInt32(arg1);
data.writeInt32(arg2);
remote()->transact(SEND_COMMAND, data, &reply);
return reply.readInt32();
}複製程式碼
status_t BnCamera::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
......
case SEND_COMMAND: {
ALOGV("SEND_COMMAND");
CHECK_INTERFACE(ICamera, data, reply);
int command = data.readInt32();
int arg1 = data.readInt32();
int arg2 = data.readInt32();
reply->writeInt32(sendCommand(command, arg1, arg2));
return NO_ERROR;
} break;
}複製程式碼
然後呼叫到
status_t CameraClient::sendCommand(int32_t cmd, int32_t arg1, int32_t arg2) {
......
if (cmd == CAMERA_CMD_SET_DISPLAY_ORIENTATION) {
// Mirror the preview if the camera is front-facing.
orientation = getOrientation(arg1, mCameraFacing == CAMERA_FACING_FRONT);
if (orientation == -1) return BAD_VALUE;
if (mOrientation != orientation) {
mOrientation = orientation;
if (mPreviewWindow != 0) {
native_window_set_buffers_transform(mPreviewWindow.get(),
mOrientation);
}
}
return OK;
} else if (cmd == CAMERA_CMD_ENABLE_SHUTTER_SOUND) {
switch (arg1) { //這裡就是對引數arg1的使用,通過引數來開關拍照聲音
case 0:
return enableShutterSound(false);
case 1:
return enableShutterSound(true);
default:
return BAD_VALUE;
}
return OK;
} else if (cmd == CAMERA_CMD_PLAY_RECORDING_SOUND) { //錄影聲音
mCameraService->playSound(CameraService::SOUND_RECORDING);
} else if (cmd == CAMERA_CMD_SET_VIDEO_BUFFER_COUNT) {
// Silently ignore this command
return INVALID_OPERATION;
} else if (cmd == CAMERA_CMD_PING) {
// If mHardware is 0, checkPidAndHardware will return error.
return OK;
} //以上是framework可以處理的命令
return mHardware->sendCommand(cmd, arg1, arg2); //HAL層處理
}複製程式碼
在HAL層中處理的訊息型別主要是開啟和停止人臉檢測過程,
QCamera2HWI.cpp (\device\asus\flo\camera\qcamera2\hal)
int QCamera2HardwareInterface::sendCommand(int32_t command, int32_t /*arg1*/, int32_t /*arg2*/)
{
int rc = NO_ERROR;
switch (command) {
case CAMERA_CMD_START_FACE_DETECTION:
case CAMERA_CMD_STOP_FACE_DETECTION:
//開關人臉檢測
rc = setFaceDetection(command == CAMERA_CMD_START_FACE_DETECTION? true : false);
break;
default:
rc = NO_ERROR;
break;
}
return rc;
}複製程式碼
綜上,如上就是sendCommand的過程
然後,回撥Callback
從之前的文章可以看到callback主要有三種型別
notifyCallback
dataCallback
dataTimestampCallback
這個過程我們就不能按照之前的從上至下的跟過程了,這次需要反著來,從HAL層回撥到
CameraHardwareInterface.h (frameworks\av\services\camera\libcameraservice\device1)
static void __notify_cb(int32_t msg_type, int32_t ext1,
int32_t ext2, void *user)
{
ALOGV("%s", __FUNCTION__);
CameraHardwareInterface *__this =
static_cast<CameraHardwareInterface *>(user);
__this->mNotifyCb(msg_type, ext1, ext2, __this->mCbUser);
}
static void __data_cb(int32_t msg_type,
const camera_memory_t *data, unsigned int index,
camera_frame_metadata_t *metadata,
void *user)
{
ALOGV("%s", __FUNCTION__);
CameraHardwareInterface *__this =
static_cast<CameraHardwareInterface *>(user);
......
__this->mDataCb(msg_type, mem->mBuffers[index], metadata, __this->mCbUser);
}
static void __data_cb_timestamp(nsecs_t timestamp, int32_t msg_type,
const camera_memory_t *data, unsigned index,
void *user)
{
ALOGV("%s", __FUNCTION__);
CameraHardwareInterface *__this =
static_cast<CameraHardwareInterface *>(user);
......
__this->mDataCbTimestamp(timestamp, msg_type, mem->mBuffers[index], __this->mCbUser);
}複製程式碼
其中的mNotifyCb,mDataCb,mDataCbTimestamp是在CameraClient::initialize函式中設定的
void setCallbacks(notify_callback notify_cb,
data_callback data_cb,
data_callback_timestamp data_cb_timestamp,
void* user)
{
mNotifyCb = notify_cb;
mDataCb = data_cb;
mDataCbTimestamp = data_cb_timestamp;
mCbUser = user;
ALOGV("%s(%s)", __FUNCTION__, mName.string());
if (mDevice->ops->set_callbacks) {
mDevice->ops->set_callbacks(mDevice,
__notify_cb,
__data_cb,
__data_cb_timestamp,
__get_memory,
this);
}
}複製程式碼
回撥自然是到CameraClient中去找了
void CameraClient::notifyCallback(int32_t msgType, int32_t ext1,
int32_t ext2, void* user) {
......
}
void CameraClient::dataCallback(int32_t msgType,
const sp<IMemory>& dataPtr, camera_frame_metadata_t *metadata, void* user) {
LOG2("dataCallback(%d)", msgType);
Mutex* lock = getClientLockFromCookie(user);
if (lock == NULL) return;
Mutex::Autolock alock(*lock);
CameraClient* client =
static_cast<CameraClient*>(getClientFromCookie(user));
if (client == NULL) return;
if (!client->lockIfMessageWanted(msgType)) return;
if (dataPtr == 0 && metadata == NULL) {
ALOGE("Null data returned in data callback");
client->handleGenericNotify(CAMERA_MSG_ERROR, UNKNOWN_ERROR, 0);
return;
}
switch (msgType & ~CAMERA_MSG_PREVIEW_METADATA) {
case CAMERA_MSG_PREVIEW_FRAME:
client->handlePreviewData(msgType, dataPtr, metadata);
break;
case CAMERA_MSG_POSTVIEW_FRAME:
client->handlePostview(dataPtr);
break;
case CAMERA_MSG_RAW_IMAGE:
client->handleRawPicture(dataPtr);
break;
case CAMERA_MSG_COMPRESSED_IMAGE:
client->handleCompressedPicture(dataPtr);
break;
default:
client->handleGenericData(msgType, dataPtr, metadata);
break;
}
}
void CameraClient::dataCallbackTimestamp(nsecs_t timestamp,
int32_t msgType, const sp<IMemory>& dataPtr, void* user) {
......
}複製程式碼
這裡主要看下dataCallback的過程吧,
switch (msgType & ~CAMERA_MSG_PREVIEW_METADATA) {
case CAMERA_MSG_PREVIEW_FRAME: //預覽幀資料
client->handlePreviewData(msgType, dataPtr, metadata);
break;
case CAMERA_MSG_POSTVIEW_FRAME: //postview image
client->handlePostview(dataPtr);
break;
case CAMERA_MSG_RAW_IMAGE: //原始資料
client->handleRawPicture(dataPtr);
break;
case CAMERA_MSG_COMPRESSED_IMAGE: //真實圖片
client->handleCompressedPicture(dataPtr);
break;
default:
client->handleGenericData(msgType, dataPtr, metadata);
break;
}複製程式碼
這裡最後會呼叫到
c->dataCallback,然後根據訊息型別來做處理,然後在通過binder機制
// generic data callback from camera service to app with image data
void dataCallback(int32_t msgType, const sp<IMemory>& imageData,
camera_frame_metadata_t *metadata)
{
ALOGV("dataCallback");
Parcel data, reply;
data.writeInterfaceToken(ICameraClient::getInterfaceDescriptor());
data.writeInt32(msgType);
data.writeStrongBinder(imageData->asBinder());
if (metadata) {
data.writeInt32(metadata->number_of_faces);
data.write(metadata->faces, sizeof(camera_face_t) * metadata->number_of_faces);
}
remote()->transact(DATA_CALLBACK, data, &reply, IBinder::FLAG_ONEWAY);
}複製程式碼
status_t BnCameraClient::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
switch(code) {
......
case DATA_CALLBACK: {
ALOGV("DATA_CALLBACK");
CHECK_INTERFACE(ICameraClient, data, reply);
int32_t msgType = data.readInt32();
sp<IMemory> imageData = interface_cast<IMemory>(data.readStrongBinder());
camera_frame_metadata_t *metadata = NULL;
if (data.dataAvail() > 0) {
metadata = new camera_frame_metadata_t;
metadata->number_of_faces = data.readInt32();
metadata->faces = (camera_face_t *) data.readInplace(
sizeof(camera_face_t) * metadata->number_of_faces);
}
dataCallback(msgType, imageData, metadata);
if (metadata) delete metadata;
return NO_ERROR;
} break;
......
}複製程式碼
這裡轉到Camera.cpp
// callback from camera service when frame or image is ready
void Camera::dataCallback(int32_t msgType, const sp<IMemory>& dataPtr,
camera_frame_metadata_t *metadata)
{
sp<CameraListener> listener;
{
Mutex::Autolock _l(mLock);
listener = mListener;
}
if (listener != NULL) {
listener->postData(msgType, dataPtr, metadata);
}
}複製程式碼
通過listener的方式來往上層甩資料,那麼問題來了,這個listener是什麼時候設定的?
回想一下第一篇FWK分析部落格中的native_setup過程中有這麼一段
sp<JNICameraContext> context = new JNICameraContext(env, weak_this, clazz, camera);
context->incStrong((void*)android_hardware_Camera_native_setup);
camera->setListener(context);複製程式碼
就是在這裡設定的listener,JNICameraContext繼承CameraListener,複寫父類的方法
class JNICameraContext: public CameraListener{
......
}
class CameraListener: virtual public RefBase
{
public:
virtual void notify(int32_t msgType, int32_t ext1, int32_t ext2) = 0;
virtual void postData(int32_t msgType, const sp<IMemory>& dataPtr,
camera_frame_metadata_t *metadata) = 0;
virtual void postDataTimestamp(nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr) = 0;
};複製程式碼
承接上面那一段,這裡我們只看postData()
android_hardware_Camera.cpp (frameworks\base\core\jni)
void JNICameraContext::postData(int32_t msgType, const sp<IMemory>& dataPtr,
camera_frame_metadata_t *metadata)
{
......
int32_t dataMsgType = msgType & ~CAMERA_MSG_PREVIEW_METADATA;
// return data based on callback type
switch (dataMsgType) {
case CAMERA_MSG_VIDEO_FRAME:
// should never happen
break;
// For backward-compatibility purpose, if there is no callback
// buffer for raw image, the callback returns null.
case CAMERA_MSG_RAW_IMAGE:
ALOGV("rawCallback");
if (mRawImageCallbackBuffers.isEmpty()) {
env->CallStaticVoidMethod(mCameraJClass, fields.post_event,
mCameraJObjectWeak, dataMsgType, 0, 0, NULL);
} else {
copyAndPost(env, dataPtr, dataMsgType);
}
break;
// There is no data.
case 0:
break;
default:
ALOGV("dataCallback(%d, %p)", dataMsgType, dataPtr.get());
copyAndPost(env, dataPtr, dataMsgType);
break;
}
// post frame metadata to Java
if (metadata && (msgType & CAMERA_MSG_PREVIEW_METADATA)) {
postMetadata(env, CAMERA_MSG_PREVIEW_METADATA, metadata);//這裡有人臉資料
}
}複製程式碼
這裡涉及到的
env->CallStaticVoidMethod(mCameraJClass, fields.post_event,
mCameraJObjectWeak, dataMsgType, 0, 0, NULL);
copyAndPost(env, dataPtr, dataMsgType);
postMetadata(env, CAMERA_MSG_PREVIEW_METADATA, metadata);
直接或間接的使用到fileds.post_event函式,這裡是JNI中的方法註冊,
fields.post_event = env->GetStaticMethodID(clazz, "postEventFromNative",
"(Ljava/lang/Object;IIILjava/lang/Object;)V");
這個是在register_android_hardware_Camera()函式中呼叫的,這裡不做過多停留,這個實際會呼叫到
Camera.java (frameworks\base\core\java\android\hardware)
private static void postEventFromNative(Object camera_ref,
int what, int arg1, int arg2, Object obj)
{
Camera c = (Camera)((WeakReference)camera_ref).get();
if (c == null)
return;
if (c.mEventHandler != null) {
Message m = c.mEventHandler.obtainMessage(what, arg1, arg2, obj);
c.mEventHandler.sendMessage(m);
}
}複製程式碼
這裡也是採用了JAVA中很常用的handler message處理
private class EventHandler extends Handler
{
private final Camera mCamera;
public EventHandler(Camera c, Looper looper) {
super(looper);
mCamera = c;
}
@Override
public void handleMessage(Message msg) {
switch(msg.what) {
case CAMERA_MSG_SHUTTER:
if (mShutterCallback != null) {
mShutterCallback.onShutter();
}
return;
case CAMERA_MSG_RAW_IMAGE:
if (mRawImageCallback != null) {
mRawImageCallback.onPictureTaken((byte[])msg.obj, mCamera);
}
return;
case CAMERA_MSG_COMPRESSED_IMAGE:
if (mJpegCallback != null) {
mJpegCallback.onPictureTaken((byte[])msg.obj, mCamera);
}
return;
case CAMERA_MSG_PREVIEW_FRAME:
PreviewCallback pCb = mPreviewCallback;
if (pCb != null) {
if (mOneShot) {
// Clear the callback variable before the callback
// in case the app calls setPreviewCallback from
// the callback function
mPreviewCallback = null;
} else if (!mWithBuffer) {
// We're faking the camera preview mode to prevent
// the app from being flooded with preview frames.
// Set to oneshot mode again.
setHasPreviewCallback(true, false);
}
pCb.onPreviewFrame((byte[])msg.obj, mCamera);
}
return;
case CAMERA_MSG_POSTVIEW_FRAME:
if (mPostviewCallback != null) {
mPostviewCallback.onPictureTaken((byte[])msg.obj, mCamera);
}
return;
case CAMERA_MSG_FOCUS:
AutoFocusCallback cb = null;
synchronized (mAutoFocusCallbackLock) {
cb = mAutoFocusCallback;
}
if (cb != null) {
boolean success = msg.arg1 == 0 ? false : true;
cb.onAutoFocus(success, mCamera);
}
return;
case CAMERA_MSG_ZOOM:
if (mZoomListener != null) {
mZoomListener.onZoomChange(msg.arg1, msg.arg2 != 0, mCamera);
}
return;
case CAMERA_MSG_PREVIEW_METADATA:
if (mFaceListener != null) {
mFaceListener.onFaceDetection((Face[])msg.obj, mCamera);
}
return;
case CAMERA_MSG_ERROR :
Log.e(TAG, "Error " + msg.arg1);
if (mErrorCallback != null) {
mErrorCallback.onError(msg.arg1, mCamera);
}
return;
case CAMERA_MSG_FOCUS_MOVE:
if (mAutoFocusMoveCallback != null) {
mAutoFocusMoveCallback.onAutoFocusMoving(msg.arg1 == 0 ? false : true, mCamera);
}
return;
default:
Log.e(TAG, "Unknown message type " + msg.what);
return;
}
}
}複製程式碼
到這裡基本上就是上層的處理了,callback都是在相機應用中設定的,然後各種資料就在相機應用中得到對應的處理。
具體的每一個資料怎麼處理,這裡我們不做分析,後續有需要,可以在細講一下,旨在弄清楚程式碼是怎麼走的。
備註
本文中程式碼使用的是Android5.0原始程式碼,最新的Android N版本除了把CameraService單獨拎出來,其他的內容基本上大同小異。
版權宣告:本文為博主原創文章,未經博主允許不得轉載。
個人微信公眾號,歡迎大家掃碼關注,Android技術交流或者諮詢。