-
Notifications
You must be signed in to change notification settings - Fork 20
RDK-60291 [telemetry] RDK Coverity Defect Resolution for Device Management #235
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: develop
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | ||||||||
|---|---|---|---|---|---|---|---|---|---|---|
| @@ -1,6 +1,6 @@ | ||||||||||
| /* | ||||||||||
| * If not stated otherwise in this file or this component's LICENSE file the | ||||||||||
| * following copyright and licenses apply: | ||||||||||
|
Check failure on line 3 in source/bulkdata/datamodel.c
|
||||||||||
| * | ||||||||||
| * Copyright 2019 RDK Management | ||||||||||
| * | ||||||||||
|
|
@@ -50,17 +50,28 @@ | |||||||||
| { | ||||||||||
| (void) data;//To fix compiler warning | ||||||||||
| cJSON *reportProfiles = NULL; | ||||||||||
| bool shouldContinue = true; | ||||||||||
|
|
||||||||||
| T2Debug("%s ++in\n", __FUNCTION__); | ||||||||||
|
|
||||||||||
| while(!stopProcessing) | ||||||||||
| while(shouldContinue) | ||||||||||
| { | ||||||||||
| pthread_mutex_lock(&rpMutex); | ||||||||||
| shouldContinue = !stopProcessing; | ||||||||||
| if(!shouldContinue) | ||||||||||
| { | ||||||||||
| pthread_mutex_unlock(&rpMutex); | ||||||||||
| break; | ||||||||||
| } | ||||||||||
| T2Info("%s: Waiting for event from tr-181 \n", __FUNCTION__); | ||||||||||
| pthread_cond_wait(&rpCond, &rpMutex); | ||||||||||
| while(t2_queue_count(rpQueue) == 0 && shouldContinue) | ||||||||||
| { | ||||||||||
| pthread_cond_wait(&rpCond, &rpMutex); | ||||||||||
| shouldContinue = !stopProcessing; | ||||||||||
| } | ||||||||||
|
|
||||||||||
| T2Debug("%s: Received wake up signal \n", __FUNCTION__); | ||||||||||
| if(t2_queue_count(rpQueue) > 0) | ||||||||||
| if(t2_queue_count(rpQueue) > 0 && shouldContinue) | ||||||||||
| { | ||||||||||
| reportProfiles = (cJSON *)t2_queue_pop(rpQueue); | ||||||||||
| if (reportProfiles) | ||||||||||
|
|
@@ -80,17 +91,32 @@ | |||||||||
| { | ||||||||||
| (void) data;//To fix compiler warning | ||||||||||
| cJSON *tmpReportProfiles = NULL; | ||||||||||
| bool shouldContinue = true; | ||||||||||
|
|
||||||||||
| T2Debug("%s ++in\n", __FUNCTION__); | ||||||||||
|
|
||||||||||
| while(!stopProcessing) | ||||||||||
| while(shouldContinue) | ||||||||||
| { | ||||||||||
| pthread_mutex_lock(&tmpRpMutex); | ||||||||||
| pthread_mutex_lock(&rpMutex); | ||||||||||
| shouldContinue = !stopProcessing; | ||||||||||
| pthread_mutex_unlock(&rpMutex); | ||||||||||
| if(!shouldContinue) | ||||||||||
| { | ||||||||||
| pthread_mutex_unlock(&tmpRpMutex); | ||||||||||
| break; | ||||||||||
| } | ||||||||||
| T2Info("%s: Waiting for event from tr-181 \n", __FUNCTION__); | ||||||||||
| pthread_cond_wait(&tmpRpCond, &tmpRpMutex); | ||||||||||
| while(t2_queue_count(tmpRpQueue) == 0 && shouldContinue) | ||||||||||
| { | ||||||||||
| pthread_cond_wait(&tmpRpCond, &tmpRpMutex); | ||||||||||
| pthread_mutex_lock(&rpMutex); | ||||||||||
|
Comment on lines
109
to
+113
|
||||||||||
| shouldContinue = !stopProcessing; | ||||||||||
| pthread_mutex_unlock(&rpMutex); | ||||||||||
| } | ||||||||||
|
Comment on lines
88
to
+116
|
||||||||||
|
|
||||||||||
| T2Debug("%s: Received wake up signal \n", __FUNCTION__); | ||||||||||
| if(t2_queue_count(tmpRpQueue) > 0) | ||||||||||
| if(t2_queue_count(tmpRpQueue) > 0 && shouldContinue) | ||||||||||
| { | ||||||||||
| tmpReportProfiles = (cJSON *)t2_queue_pop(tmpRpQueue); | ||||||||||
| if (tmpReportProfiles) | ||||||||||
|
|
@@ -110,11 +136,31 @@ | |||||||||
| { | ||||||||||
| (void) data;//To fix compiler warning | ||||||||||
| struct __msgpack__ *msgpack; | ||||||||||
| while(!stopProcessing) | ||||||||||
| bool shouldContinue = true; | ||||||||||
|
|
||||||||||
| while(shouldContinue) | ||||||||||
| { | ||||||||||
| // Check stopProcessing first without holding rpMsgMutex to avoid | ||||||||||
| // nested mutex acquisition which could lead to deadlock if another | ||||||||||
| // thread acquires these mutexes in opposite order (e.g., rpMutex then rpMsgMutex) | ||||||||||
| pthread_mutex_lock(&rpMutex); | ||||||||||
| shouldContinue = !stopProcessing; | ||||||||||
| pthread_mutex_unlock(&rpMutex); | ||||||||||
|
|
||||||||||
| if(!shouldContinue) | ||||||||||
| { | ||||||||||
| break; | ||||||||||
| } | ||||||||||
|
|
||||||||||
| pthread_mutex_lock(&rpMsgMutex); | ||||||||||
| pthread_cond_wait(&msg_Cond, &rpMsgMutex); | ||||||||||
| if(t2_queue_count(rpMsgPkgQueue) > 0) | ||||||||||
| while(t2_queue_count(rpMsgPkgQueue) == 0 && shouldContinue) | ||||||||||
| { | ||||||||||
| pthread_cond_wait(&msg_Cond, &rpMsgMutex); | ||||||||||
| pthread_mutex_lock(&rpMutex); | ||||||||||
| shouldContinue = !stopProcessing; | ||||||||||
| pthread_mutex_unlock(&rpMutex); | ||||||||||
|
Comment on lines
+159
to
+161
|
||||||||||
| } | ||||||||||
| if(t2_queue_count(rpMsgPkgQueue) > 0 && shouldContinue) | ||||||||||
| { | ||||||||||
| msgpack = (struct __msgpack__ *)t2_queue_pop(rpMsgPkgQueue); | ||||||||||
| if (msgpack) | ||||||||||
|
|
@@ -274,18 +320,24 @@ | |||||||||
|
|
||||||||||
| msgpack->msgpack_blob = str; | ||||||||||
| msgpack->msgpack_blob_size = strSize; | ||||||||||
| pthread_mutex_lock(&rpMsgMutex); | ||||||||||
| if (!stopProcessing) | ||||||||||
| { | ||||||||||
| t2_queue_push(rpMsgPkgQueue, (void *)msgpack); | ||||||||||
| pthread_cond_signal(&msg_Cond); | ||||||||||
| } | ||||||||||
| else | ||||||||||
|
|
||||||||||
| // Check stopProcessing without holding rpMsgMutex to avoid nested mutex | ||||||||||
| // acquisition which could lead to deadlock | ||||||||||
| pthread_mutex_lock(&rpMutex); | ||||||||||
| bool isProcessing = !stopProcessing; | ||||||||||
| pthread_mutex_unlock(&rpMutex); | ||||||||||
|
|
||||||||||
| if (!isProcessing) | ||||||||||
| { | ||||||||||
| free(msgpack->msgpack_blob); | ||||||||||
|
||||||||||
| free(msgpack->msgpack_blob); | |
| /* Do not free msgpack_blob here; ownership of 'str' remains with caller | |
| * since the message is not queued. | |
| */ |
Copilot
AI
Jan 19, 2026
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The error handling at line 343 returns T2ERROR_SUCCESS even though an error condition was detected (datamodel not initialized). This should return T2ERROR_FAILURE or an appropriate error code to indicate the operation failed.
| return T2ERROR_SUCCESS; | |
| return T2ERROR_FAILURE; |
Copilot
AI
Jan 20, 2026
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There is a race condition here. Between unlocking rpMutex at line 336 and locking rpMsgMutex at line 346, another thread could set stopProcessing to false. This means the msgpack data could be pushed to the queue even though the processing threads might be shutting down. Consider holding rpMutex until after pushing to the queue, or use a local copy of rpMsgPkgQueue under rpMutex to ensure atomic checking and queueing.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
tmpRpQueueoperations are protected bytmpRpMutexin this thread, but producers push totmpRpQueueunderrpMutex(seedatamodel_processProfileusingpthread_mutex_lock(&rpMutex)beforet2_queue_push(tmpRpQueue, ...)). This meanstmpRpQueueis accessed under different mutexes, which is a data race and can corrupt the queue. Use a single mutex consistently fortmpRpQueue(and pairtmpRpCondwith that same mutex), or remove the extra mutex.