rockchip: aiq: fix potential crash on mGroupMsgMap.erase()

Signed-off-by: hmz007 <hmz007@gmail.com>
master
hmz007 7 months ago
parent e26d821f07
commit 0dc2c163a3

@ -134,6 +134,8 @@ bool RkAiqAnalyzerGroup::msgHandle(RkAiqCoreVdBufMsg* msg) {
if (msg->frame_id == mAwakenId) if (msg->frame_id == mAwakenId)
delayCnt = 0; delayCnt = 0;
uint32_t userId = msg->frame_id + delayCnt; uint32_t userId = msg->frame_id + delayCnt;
std::unique_lock<std::mutex> lock(mGroupMsgMapLock);
GroupMessage& msgWrapper = mGroupMsgMap[userId]; GroupMessage& msgWrapper = mGroupMsgMap[userId];
msgWrapper.msg_flags |= 1ULL << msg->msg_id; msgWrapper.msg_flags |= 1ULL << msg->msg_id;
@ -161,26 +163,32 @@ bool RkAiqAnalyzerGroup::msgHandle(RkAiqCoreVdBufMsg* msg) {
MessageType2Str[msg->msg_id], delayCnt, mGroupMsgMap.size()); MessageType2Str[msg->msg_id], delayCnt, mGroupMsgMap.size());
uint64_t msg_flags = msgWrapper.msg_flags; uint64_t msg_flags = msgWrapper.msg_flags;
if (!(msg_flags ^ mDepsFlag)) { if (msg_flags ^ mDepsFlag) {
std::array<RkAiqCoreVdBufMsg, MAX_MESSAGES>& msgList = msgWrapper.msgList; msgReduction(mGroupMsgMap);
mHandler(msgList, msg_cnts, userId, getType()); return true;
}
// copy msgList and msg_cnts for mHandler()
std::array<RkAiqCoreVdBufMsg, MAX_MESSAGES> msgList = msgWrapper.msgList;
int msgCount = msgWrapper.msg_cnts;
#if 0 #if 0
for (auto it = mGroupMsgMap.begin(); it != mGroupMsgMap.end();) { for (auto it = mGroupMsgMap.begin(); it != mGroupMsgMap.end();) {
if ((*it).first <= userId) { if ((*it).first <= userId) {
it = mGroupMsgMap.erase(it); it = mGroupMsgMap.erase(it);
} else { } else {
break;//it++; break;//it++;
}
} }
}
#else #else
std::map<uint32_t, GroupMessage>::iterator itup = mGroupMsgMap.upper_bound(userId); std::map<uint32_t, GroupMessage>::iterator itup = mGroupMsgMap.upper_bound(userId);
mGroupMsgMap.erase(mGroupMsgMap.begin(), itup); mGroupMsgMap.erase(mGroupMsgMap.begin(), itup);
#endif #endif
LOGD_ANALYZER("%s, group %s erase frame(%d) msg map\n", __FUNCTION__, AnalyzerGroupType2Str[mGroupType], userId);
} else { lock.unlock();
msgReduction(mGroupMsgMap); LOGD_ANALYZER("%s, group %s erase frame(%d) msg map\n", __FUNCTION__, AnalyzerGroupType2Str[mGroupType], userId);
return true;
} mHandler(msgList, msgCount, userId, getType());
return true; return true;
} }
@ -190,7 +198,11 @@ XCamReturn RkAiqAnalyzerGroup::stop() {
mRkAiqGroupMsgHdlTh->triger_stop(); mRkAiqGroupMsgHdlTh->triger_stop();
mRkAiqGroupMsgHdlTh->stop(); mRkAiqGroupMsgHdlTh->stop();
} }
mGroupMsgMap.clear();
{
std::lock_guard<std::mutex> lock(mGroupMsgMapLock);
mGroupMsgMap.clear();
}
return XCAM_RETURN_NO_ERROR; return XCAM_RETURN_NO_ERROR;
} }
@ -198,6 +210,7 @@ XCamReturn RkAiqAnalyzerGroup::stop() {
void RkAiqAnalyzerGroup::setDepsFlagAndClearMap(uint64_t new_deps) void RkAiqAnalyzerGroup::setDepsFlagAndClearMap(uint64_t new_deps)
{ {
mDepsFlag = new_deps; mDepsFlag = new_deps;
std::lock_guard<std::mutex> lock(mGroupMsgMapLock);
if (mGroupMsgMap.size()) { if (mGroupMsgMap.size()) {
mGroupMsgMap.clear(); mGroupMsgMap.clear();
} }

@ -87,6 +87,7 @@ class RkAiqAnalyzerGroup {
RkAiqGrpConditions_t mGrpConds; RkAiqGrpConditions_t mGrpConds;
SmartPtr<RkAiqAnalyzeGroupMsgHdlThread> mRkAiqGroupMsgHdlTh; SmartPtr<RkAiqAnalyzeGroupMsgHdlThread> mRkAiqGroupMsgHdlTh;
std::map<uint32_t, GroupMessage> mGroupMsgMap; std::map<uint32_t, GroupMessage> mGroupMsgMap;
mutable std::mutex mGroupMsgMapLock;
MessageHandleWrapper mHandler; MessageHandleWrapper mHandler;
int8_t mUserSetDelayCnts; int8_t mUserSetDelayCnts;
bool mVicapScaleStart{false}; bool mVicapScaleStart{false};

Loading…
Cancel
Save