通过前面一章我们知道了在jni层我们通过sp<Camera> camera = Camera::connect(cameraId);获取到了一个Camera示例,Camera包含两个变量:
sp<ICameraService> mCameraService (实际类型为sp<BpCameraService>,最终实现类为CameraService)
sp<ICamera> mCamera (实际类型为sp<BpCamera>,最终实现类为Client)
于是我们通过类Camera::xx的函数就可以直接操作Client类的接口。
在CameraService::connect()函数中我们首先根据cameraId创建了一个CameraHardwareInterface的对象,然后将这个对象作为参数创建了一个Client对象,并返回sp<ICamera>给客户端。
下面我们开始分析CameraService::Client这个类:下面是Client类的构造函数和析构函数。
CameraService::Client::Client(const sp<CameraService>& cameraService, const sp<ICameraClient>& cameraClient, const sp<CameraHardwareInterface>& hardware, int cameraId, int cameraFacing, int clientPid) { int callingPid = getCallingPid(); LOG1("Client::Client E (pid %d)", callingPid); mCameraService = cameraService; mCameraClient = cameraClient; mHardware = hardware; mCameraId = cameraId; mCameraFacing = cameraFacing; mClientPid = clientPid; mMsgEnabled = 0; mSurface = 0; mPreviewWindow = 0; mHardware->setCallbacks(notifyCallback, dataCallback, dataCallbackTimestamp, (void *)cameraId); // Enable zoom, error, focus, and metadata messages by default enableMsgType(CAMERA_MSG_ERROR | CAMERA_MSG_ZOOM | CAMERA_MSG_FOCUS | CAMERA_MSG_PREVIEW_METADATA); // Callback is disabled by default mPreviewCallbackFlag = CAMERA_FRAME_CALLBACK_FLAG_NOOP; mOrientation = getOrientation(0, mCameraFacing == CAMERA_FACING_FRONT); mPlayShutterSound = true; cameraService->setCameraBusy(cameraId); cameraService->loadSound(); LOG1("Client::Client X (pid %d)", callingPid); } // tear down the client CameraService::Client::~Client() { int callingPid = getCallingPid(); LOG1("Client::~Client E (pid %d, this %p)", callingPid, this); // set mClientPid to let disconnet() tear down the hardware mClientPid = callingPid; disconnect(); mCameraService->releaseSound(); LOG1("Client::~Client X (pid %d, this %p)", callingPid, this); }
在构造函数中:
1)首先调用CameraHardwareInterface::setCallbacks(notifyCallback, dataCallback, dataCallbackTimestamp, (void*)cameraId);
其中dataCallback函数主要用于从底层采集数据的缓冲区拷贝数据到显示的preview缓冲区。
void CameraService::Client::dataCallback(int32_t msgType,
const sp<IMemory>& dataPtr, camera_frame_metadata_t *metadata, void* user) {
LOG2("dataCallback(%d)", msgType);
sp<Client> client = getClientFromCookie(user);
if (client == 0) return;
if (!client->lockIfMessageWanted(msgType)) return;
if (dataPtr == 0 && metadata == NULL) {
LOGE("Null data returned in data callback");
client->handleGenericNotify(CAMERA_MSG_ERROR, UNKNOWN_ERROR, 0);
return;
}
switch (msgType & ~CAMERA_MSG_PREVIEW_METADATA) {
case CAMERA_MSG_PREVIEW_FRAME:
client->handlePreviewData(msgType, dataPtr, metadata);
break;
case CAMERA_MSG_POSTVIEW_FRAME:
client->handlePostview(dataPtr);
break;
case CAMERA_MSG_RAW_IMAGE:
client->handleRawPicture(dataPtr);
break;
case CAMERA_MSG_COMPRESSED_IMAGE:
client->handleCompressedPicture(dataPtr);
break;
default:
client->handleGenericData(msgType, dataPtr, metadata);
break;
}
}
// preview callback - frame buffer update void CameraService::Client::handlePreviewData(int32_t msgType, const sp<IMemory>& mem, camera_frame_metadata_t *metadata) { ssize_t offset; size_t size; sp<IMemoryHeap> heap = mem->getMemory(&offset, &size); // local copy of the callback flags int flags = mPreviewCallbackFlag; // is callback enabled? if (!(flags & CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK)) { // If the enable bit is off, the copy-out and one-shot bits are ignored LOG2("frame callback is disabled"); mLock.unlock(); return; } // hold a strong pointer to the client sp<ICameraClient> c = mCameraClient; // clear callback flags if no client or one-shot mode if (c == 0 || (mPreviewCallbackFlag & CAMERA_FRAME_CALLBACK_FLAG_ONE_SHOT_MASK)) { LOG2("Disable preview callback"); mPreviewCallbackFlag &= ~(CAMERA_FRAME_CALLBACK_FLAG_ONE_SHOT_MASK | CAMERA_FRAME_CALLBACK_FLAG_COPY_OUT_MASK | CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK); disableMsgType(CAMERA_MSG_PREVIEW_FRAME); } if (c != 0) { // Is the received frame copied out or not? if (flags & CAMERA_FRAME_CALLBACK_FLAG_COPY_OUT_MASK) { LOG2("frame is copied"); copyFrameAndPostCopiedFrame(msgType, c, heap, offset, size, metadata); } else { LOG2("frame is forwarded"); mLock.unlock(); c->dataCallback(msgType, mem, metadata); } } else { mLock.unlock(); } }
最后调用下面函数将数据拷贝到preview的缓冲区用于显示:
void CameraService::Client::copyFrameAndPostCopiedFrame( int32_t msgType, const sp<ICameraClient>& client, const sp<IMemoryHeap>& heap, size_t offset, size_t size, camera_frame_metadata_t *metadata) { LOG2("copyFrameAndPostCopiedFrame"); // It is necessary to copy out of pmem before sending this to // the callback. For efficiency, reuse the same MemoryHeapBase // provided it's big enough. Don't allocate the memory or // perform the copy if there's no callback. // hold the preview lock while we grab a reference to the preview buffer sp<MemoryHeapBase> previewBuffer; if (mPreviewBuffer == 0) { mPreviewBuffer = new MemoryHeapBase(size, 0, NULL); } else if (size > mPreviewBuffer->virtualSize()) { mPreviewBuffer.clear(); mPreviewBuffer = new MemoryHeapBase(size, 0, NULL); } if (mPreviewBuffer == 0) { LOGE("failed to allocate space for preview buffer"); mLock.unlock(); return; } previewBuffer = mPreviewBuffer; memcpy(previewBuffer->base(), (uint8_t *)heap->base() + offset, size); sp<MemoryBase> frame = new MemoryBase(previewBuffer, 0, size); if (frame == 0) { LOGE("failed to allocate space for frame callback"); mLock.unlock(); return; } mLock.unlock(); client->dataCallback(msgType, frame, metadata); }
这里将数据拷贝到sp<MemoryHeapBase> previewBuffer中,根据previewBuffer创建一个sp<MemoryBase> 对象frame,然后调用client->datacallback()处理这个frame数据。
// callback from camera service when frame or image is ready void Camera::dataCallback(int32_t msgType, const sp<IMemory>& dataPtr, camera_frame_metadata_t *metadata) { sp<CameraListener> listener; { Mutex::Autolock _l(mLock); listener = mListener; } if (listener != NULL) { listener->postData(msgType, dataPtr, metadata); } }
这里调用了listener->postData(msgType, dataPtr, meradata)函数,根据名字我们能看出来这个listener是监听底层数据的。于是找到前面初始化的时候设置的监听类:
在jni函数里面,android_hardware_camera_native_setup()
sp<JNICameraContext> context = new JNICameraContext(env, weak_this, clazz, camera);
context->incStrong(thiz);
camera->setListner(context); 将Camera内部的 sp<CameraListener> mListener = context;
这里我们的JNICameraContext是继承于CameraListener类。
所以最后又回到了JNICameraContext::postData函数中,
postData函数调用的是:
void copyAndPost(env, dataptr, dataMstType);
{
jbyteArray obj = env->NewByteArray(size); 建立一个byte数组Obj
env->SetByteArrayRegion(obj, 0, size, data); 将dataptr数据拷贝到obj数组
env->CallStativVoidMethod(mCameraJClass, fields.post_event, mCameraJObjectWeak, mstType, 0, 0, obj) 调用java函数postEventFromNative()这样就讲数据从底层抛到java应用层进行显示了
}
第二个notifyCallback函数:
void CameraService::Client::notifyCallback(int32_t msgType, int32_t ext1,
int32_t ext2, void* user) {
LOG2("notifyCallback(%d)", msgType);
sp<Client> client = getClientFromCookie(user);
if (client == 0) return;
if (!client->lockIfMessageWanted(msgType)) return;
switch (msgType) {
case CAMERA_MSG_SHUTTER:
// ext1 is the dimension of the yuv picture.
client->handleShutter();
break;
default:
client->handleGenericNotify(msgType, ext1, ext2);
break; 回调消息
}
}
调用的是CameraService::Client::handleGenericNotify(int32_t msgType, int32_t ext1, int32_t ext2);
最后调用的的Camera::notifyCallback(mstType, ext1, ext2);
调用listener->notify(msgType, ext1, ext2);
回调到java代码:
env->CallStaticVoidMethod(mCameraJClass, fields.post_event, mCameraJObjectWeak, msgType, ext1, ext2, NULL);
第三个dataCallbackTimestamp函数:
void CameraService::Client::dataCallbackTimestamp(nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr, void* user) { LOG2("dataCallbackTimestamp(%d)", msgType); sp<Client> client = getClientFromCookie(user); if (client == 0) return; if (!client->lockIfMessageWanted(msgType)) return; if (dataPtr == 0) { LOGE("Null data returned in data with timestamp callback"); client->handleGenericNotify(CAMERA_MSG_ERROR, UNKNOWN_ERROR, 0); return; } client->handleGenericDataTimestamp(timestamp, msgType, dataPtr); }
调用的是CameraService::Client::handleGenericDataTimestamp(nsecs_t timestamp, int32_t msgType, const sp<IMemory> dataPtr);
最后调用的是Camera::dataCallbackTimestamp(timestamp, msgType, dataPtr)
调用listener->postDataTimestamp(timestamp, msgType, dataPtr);
调用的是postData(msgType, dataPtr, NULL);
2)然后调用mOrientation = getOrientation(0, mCameraFacing == CAMERA_FACING_FRONT);
3)接着调用CameraService::setCameraBusy(cameraId);
4)最后调用CameraService::loadSound();
这几个函数比较简单,没什么可分析的。CameraService::loadSound()里面会new 两个MediaPlayer()对象,分别用来播放拍照快门音和录制的声音文件。具体请参考Android开发之MediaPlayer分析。
这三个回调函数都是CameraService::Client类的内部static函数:
vodi notifyCallback(int32_t msgType, int32_t ext1, int32_t ext2m void* user);
void dataCallback(int32_t msgType, const sp<IMemory>& dataPtr, camera_frame_metadata_t *metadata, void* user);
void dataCallbackTimeStamp(nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr, void* user);
———————————————————————————————————————————————————————————————————————————————————
CameraHardwareInterface
frameworks/base/services/camera/libcameraservice/CameraHardwareInterface.h
CameraHardwareInterface是连接应用层和底层驱动的类,我们通过前面一章知道在sp<ICameraService>强引用时候我们通过hw_get_module获取到了Hal层的代理,然后在CameraService::connect()里面我们构造了一个CameraHardwareInterface对象hardware,接着调用hardware->initialize()函数,通过Hal层的代理modules打开了Camera驱动。现在我们就可以使用hardware来进行Camera操作了。
typedef struct hw_module_t { uint32_t tag; uint16_t version_major; uint16_t version_minor; const char* id; const char* name; const char* author; struct hw_module_methods_t* methods; void* dso; uint32_t reserved[32-7]; } hw_module_t; typedef struct hw_module_methods_t { int (*open)(const struct hw_module_t *module, const char* id, struct hw_device_t **device); } hw_module_methods_t;
typedef struct camera_module { hw_module_t common; int (*get_number_of_cameras)(void); int (*get_camera_info)(int camera_id, struct camera_info* info); } camera_module_t;
camera_module_t mModule;
1)hw_get_modules("camera", (const hw_module_t **)&mModule) 获得 mModule
int hw_get_modules(const char*id, const struct hw_module_t **module)
{
return hw_get_module_by_class(id, NULL, module);
}调用static int load(const char*id, const char*path, const struct hw_module_t **pHmi);
这个函数作用是从/system/lib/hw/camera.xx.so加载动态库,查找HMI符号的地址,保存到hw_module_t结构体中。
2)调用mModule->get_camera_info(cameraId, &info);
3)hardware = new CameraHardwareInterface() 调用然后hardware->initialize(&mModule->common)
camera_device_t mDevice;
status_t initialize(hw_module_t *module) { modules->methods->open(module, mName.string(), (hw_device_t**)&mDevice); initHalPrevicewWindow(); }
typedef struct camera_device { hw_device_t common; camera_device_opts_t *ops; void* priv; } camera_device_t; typedef struct hw_device_t { uint32_t tag; uint32_t version; struct hw_module_t* module; uint32_t reserved[12]; int (*close)(struct hw_device_t* device); } hw_device_t;
4)new Client(this, cameraClient, hardware, cameraId, info.facing, callingPid)
mHardwre = hardware;
调用mHardware->setCallbacks()
所以最后我们厂商需要做的就是在HAL层实现:
camera_module_t 结构体,包含:hw_camera_t common, get_number_of_cameras, get_camera_info
camera_device_t 结构体,包含:hw_device_t common, camera_device_ops_t *ops, void* priv